diff options
Diffstat (limited to 'include')
182 files changed, 6669 insertions, 2985 deletions
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index eda04546cdf..473d584b1d3 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -103,8 +103,9 @@ #define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER) #define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER) #define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER) +#define AE_BAD_ADDRESS (acpi_status) (0x0009 | AE_CODE_PROGRAMMER) -#define AE_CODE_PGM_MAX 0x0008 +#define AE_CODE_PGM_MAX 0x0009 /* * Acpi table exceptions @@ -224,7 +225,8 @@ char const *acpi_gbl_exception_names_pgm[] = { "AE_BAD_HEX_CONSTANT", "AE_BAD_OCTAL_CONSTANT", "AE_BAD_DECIMAL_CONSTANT", - "AE_MISSING_ARGUMENTS" + "AE_MISSING_ARGUMENTS", + "AE_BAD_ADDRESS" }; char const *acpi_gbl_exception_names_tbl[] = { diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index e9f6574930e..c34b1102290 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -88,44 +88,30 @@ struct acpi_device; typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); -typedef int (*acpi_op_lock) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); typedef int (*acpi_op_stop) (struct acpi_device * device, int type); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); -typedef int (*acpi_op_scan) (struct acpi_device * device); typedef int (*acpi_op_bind) (struct acpi_device * device); typedef int (*acpi_op_unbind) (struct acpi_device * device); -typedef int (*acpi_op_shutdown) (struct acpi_device * device); +typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); struct acpi_bus_ops { u32 acpi_op_add:1; - u32 acpi_op_remove:1; - u32 acpi_op_lock:1; u32 acpi_op_start:1; - u32 acpi_op_stop:1; - u32 acpi_op_suspend:1; - u32 acpi_op_resume:1; - u32 acpi_op_scan:1; - u32 acpi_op_bind:1; - u32 acpi_op_unbind:1; - u32 acpi_op_shutdown:1; - u32 reserved:21; }; struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; - acpi_op_lock lock; acpi_op_start start; acpi_op_stop stop; acpi_op_suspend suspend; acpi_op_resume resume; - acpi_op_scan scan; acpi_op_bind bind; acpi_op_unbind unbind; - acpi_op_shutdown shutdown; + acpi_op_notify notify; }; struct acpi_driver { @@ -284,7 +270,6 @@ struct acpi_device { struct list_head children; struct list_head node; struct list_head wakeup_list; - struct list_head g_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 5fc1bb0f4a9..0352c8f0b05 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -67,6 +67,16 @@ #define ACPI_BAY_HID "LNXIOBAY" #define ACPI_DOCK_HID "LNXDOCK" +/* + * For fixed hardware buttons, we fabricate acpi_devices with HID + * ACPI_BUTTON_HID_POWERF or ACPI_BUTTON_HID_SLEEPF. Fixed hardware + * signals only an event; it doesn't supply a notification value. + * To allow drivers to treat notifications from fixed hardware the + * same as those from real devices, we turn the events into this + * notification value. + */ +#define ACPI_FIXED_HARDWARE_EVENT 0x100 + /* -------------------------------------------------------------------------- PCI -------------------------------------------------------------------------- */ @@ -99,24 +109,6 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain, int bus); /* -------------------------------------------------------------------------- - Power Resource - -------------------------------------------------------------------------- */ - -int acpi_device_sleep_wake(struct acpi_device *dev, - int enable, int sleep_state, int dev_state); -int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state); -int acpi_disable_wakeup_device_power(struct acpi_device *dev); -int acpi_power_get_inferred_state(struct acpi_device *device); -int acpi_power_transition(struct acpi_device *device, int state); -extern int acpi_power_nocheck; - -/* -------------------------------------------------------------------------- - Embedded Controller - -------------------------------------------------------------------------- */ -int acpi_ec_ecdt_probe(void); -int acpi_boot_ec_enable(void); - -/* -------------------------------------------------------------------------- Processor -------------------------------------------------------------------------- */ @@ -165,9 +157,4 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle) } #endif -/*-------------------------------------------------------------------------- - Suspend/Resume - -------------------------------------------------------------------------- */ -extern int acpi_sleep_init(void); - #endif /*__ACPI_DRIVERS_H__*/ diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index ab0b85cf21f..3e798593b17 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -242,10 +242,6 @@ acpi_os_derive_pci_id(acpi_handle rhandle, acpi_status acpi_os_validate_interface(char *interface); acpi_status acpi_osi_invalidate(char* interface); -acpi_status -acpi_os_validate_address(u8 space_id, acpi_physical_address address, - acpi_size length, char *name); - u64 acpi_os_get_timer(void); acpi_status acpi_os_signal(u32 function, void *info); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index cc40102fe2f..4db89e98535 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -47,7 +47,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20081204 +#define ACPI_CA_VERSION 0x20090320 #include "actypes.h" #include "actbl.h" @@ -191,14 +191,12 @@ acpi_evaluate_object(acpi_handle object, struct acpi_object_list *parameter_objects, struct acpi_buffer *return_object_buffer); -#ifdef ACPI_FUTURE_USAGE acpi_status acpi_evaluate_object_typed(acpi_handle object, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer, acpi_object_type return_type); -#endif acpi_status acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer); @@ -349,17 +347,15 @@ acpi_resource_to_address64(struct acpi_resource *resource, */ acpi_status acpi_reset(void); -acpi_status acpi_get_register(u32 register_id, u32 * return_value); - -acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value); +acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value); -acpi_status acpi_set_register(u32 register_id, u32 value); +acpi_status acpi_write_bit_register(u32 register_id, u32 value); -acpi_status -acpi_set_firmware_waking_vector(u32 physical_address); +acpi_status acpi_set_firmware_waking_vector(u32 physical_address); -acpi_status -acpi_set_firmware_waking_vector64(u64 physical_address); +#if ACPI_MACHINE_WIDTH == 64 +acpi_status acpi_set_firmware_waking_vector64(u64 physical_address); +#endif acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg); diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index bf8d4cfd8cf..222733d01f3 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -214,11 +214,11 @@ struct acpi_table_fadt { u16 flush_size; /* Processor's memory cache line width, in bytes */ u16 flush_stride; /* Number of flush strides that need to be read */ u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */ - u8 duty_width; /* Processor duty cycle value bit width in P_CNT register. */ + u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ u8 century; /* Index to century in RTC CMOS RAM */ - u16 boot_flags; /* IA-PC Boot Architecture Flags. See Table 5-10 for description */ + u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ u8 reserved; /* Reserved, must be zero */ u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ @@ -236,32 +236,41 @@ struct acpi_table_fadt { struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ }; +/* FADT Boot Architecture Flags (boot_flags) */ + +#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */ +#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */ +#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */ +#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */ +#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */ + +#define FADT2_REVISION_ID 3 + /* FADT flags */ -#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */ -#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: The wbinvd flushes but does not invalidate */ -#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: All processors support C1 state */ -#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: C2 state works on MP system */ -#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: Power button is handled as a generic feature */ -#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: Sleep button is handled as a generic feature, or not present */ -#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: RTC wakeup stat not in fixed register space */ -#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: RTC wakeup possible from S4 */ -#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: tmr_val is 32 bits 0=24-bits */ -#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: Docking supported */ -#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: System reset via the FADT RESET_REG supported */ -#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: No internal expansion capabilities and case is sealed */ -#define ACPI_FADT_HEADLESS (1<<12) /* 12: No local video capabilities or local input devices */ -#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: Must execute native instruction after writing SLP_TYPx register */ -#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ -#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: OSPM should use platform-provided timer (ACPI 3.0) */ -#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ -#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: System is compatible with remote power on (ACPI 3.0) */ -#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: All local APICs must use cluster model (ACPI 3.0) */ -#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */ +#define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */ +#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */ +#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: [V1] All processors support C1 state */ +#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: [V1] C2 state works on MP system */ +#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: [V1] Power button is handled as a control method device */ +#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: [V1] Sleep button is handled as a control method device */ +#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: [V1] RTC wakeup status not in fixed register space */ +#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: [V1] RTC alarm can wake system from S4 */ +#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: [V1] ACPI timer width is 32-bit (0=24-bit) */ +#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: [V1] Docking supported */ +#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: [V2] System reset via the FADT RESET_REG supported */ +#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: [V3] No internal expansion capabilities and case is sealed */ +#define ACPI_FADT_HEADLESS (1<<12) /* 12: [V3] No local video capabilities or local input devices */ +#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: [V3] Must execute native instruction after writing SLP_TYPx register */ +#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: [V4] System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ +#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: [V4] OSPM should use platform-provided timer (ACPI 3.0) */ +#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ +#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */ +#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */ +#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */ + +/* FADT Prefered Power Management Profiles */ -/* - * FADT Prefered Power Management Profiles - */ enum acpi_prefered_pm_profiles { PM_UNSPECIFIED = 0, PM_DESKTOP = 1, @@ -272,16 +281,6 @@ enum acpi_prefered_pm_profiles { PM_APPLIANCE_PC = 6 }; -/* FADT Boot Arch Flags */ - -#define BAF_LEGACY_DEVICES 0x0001 -#define BAF_8042_KEYBOARD_CONTROLLER 0x0002 -#define BAF_MSI_NOT_SUPPORTED 0x0008 -#define BAF_PCIE_ASPM_CONTROL 0x0010 - -#define FADT2_REVISION_ID 3 -#define FADT2_MINUS_REVISION_ID 2 - /* Reset to default packing */ #pragma pack() @@ -310,8 +309,9 @@ struct acpi_table_desc { #define ACPI_TABLE_ORIGIN_UNKNOWN (0) #define ACPI_TABLE_ORIGIN_MAPPED (1) #define ACPI_TABLE_ORIGIN_ALLOCATED (2) -#define ACPI_TABLE_ORIGIN_MASK (3) -#define ACPI_TABLE_IS_LOADED (4) +#define ACPI_TABLE_ORIGIN_OVERRIDE (4) +#define ACPI_TABLE_ORIGIN_MASK (7) +#define ACPI_TABLE_IS_LOADED (8) /* * Get the remaining ACPI tables diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 18963b96811..59ade075247 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -1016,9 +1016,9 @@ struct acpi_madt_interrupt_source { struct acpi_madt_local_x2apic { struct acpi_subtable_header header; u16 reserved; /* Reserved - must be zero */ - u32 local_apic_id; /* Processor X2_APIC ID */ + u32 local_apic_id; /* Processor x2APIC ID */ u32 lapic_flags; - u32 uid; /* Extended X2_APIC processor ID */ + u32 uid; /* ACPI processor UID */ }; /* 10: Local X2APIC NMI (07/2008) */ @@ -1026,7 +1026,7 @@ struct acpi_madt_local_x2apic { struct acpi_madt_local_x2apic_nmi { struct acpi_subtable_header header; u16 inti_flags; - u32 uid; /* Processor X2_APIC ID */ + u32 uid; /* ACPI processor UID */ u8 lint; /* LINTn to which NMI is connected */ u8 reserved[3]; }; diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index a20aab51017..f555d927f7c 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -777,17 +777,25 @@ typedef u8 acpi_adr_space_type; #define ACPI_BITREG_SCI_ENABLE 0x0E #define ACPI_BITREG_BUS_MASTER_RLD 0x0F #define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 -#define ACPI_BITREG_SLEEP_TYPE_A 0x11 -#define ACPI_BITREG_SLEEP_TYPE_B 0x12 -#define ACPI_BITREG_SLEEP_ENABLE 0x13 +#define ACPI_BITREG_SLEEP_TYPE 0x11 +#define ACPI_BITREG_SLEEP_ENABLE 0x12 /* PM2 Control register */ -#define ACPI_BITREG_ARB_DISABLE 0x14 +#define ACPI_BITREG_ARB_DISABLE 0x13 -#define ACPI_BITREG_MAX 0x14 +#define ACPI_BITREG_MAX 0x13 #define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 +/* Status register values. A 1 clears a status bit. 0 = no effect */ + +#define ACPI_CLEAR_STATUS 1 + +/* Enable and Control register values */ + +#define ACPI_ENABLE_EVENT 1 +#define ACPI_DISABLE_EVENT 0 + /* * External ACPI object definition */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 0574add2a1e..b09c4fde972 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -322,7 +322,7 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr) int acpi_processor_tstate_has_changed(struct acpi_processor *pr); int acpi_processor_get_throttling_info(struct acpi_processor *pr); extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); -extern struct file_operations acpi_processor_throttling_fops; +extern const struct file_operations acpi_processor_throttling_fops; extern void acpi_processor_throttling_init(void); /* in processor_idle.c */ int acpi_processor_power_init(struct acpi_processor *pr, @@ -336,7 +336,7 @@ extern struct cpuidle_driver acpi_idle_driver; /* in processor_thermal.c */ int acpi_processor_get_limit_info(struct acpi_processor *pr); -extern struct file_operations acpi_processor_limit_fops; +extern const struct file_operations acpi_processor_limit_fops; extern struct thermal_cooling_device_ops processor_cooling_ops; #ifdef CONFIG_CPU_FREQ void acpi_thermal_cpufreq_init(void); diff --git a/include/acpi/video.h b/include/acpi/video.h new file mode 100644 index 00000000000..f0275bb79ce --- /dev/null +++ b/include/acpi/video.h @@ -0,0 +1,11 @@ +#ifndef __ACPI_VIDEO_H +#define __ACPI_VIDEO_H + +#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) +extern int acpi_video_register(void); +#else +static inline int acpi_video_register(void) { return 0; } +#endif + +#endif + diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h new file mode 100644 index 00000000000..40a8c178f10 --- /dev/null +++ b/include/asm-frv/ftrace.h @@ -0,0 +1 @@ +/* empty */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a654d724d3b..7fa660fd449 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -61,6 +61,30 @@ #define BRANCH_PROFILE() #endif +#ifdef CONFIG_EVENT_TRACER +#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ + *(_ftrace_events) \ + VMLINUX_SYMBOL(__stop_ftrace_events) = .; +#else +#define FTRACE_EVENTS() +#endif + +#ifdef CONFIG_TRACING +#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ + *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; +#else +#define TRACE_PRINTKS() +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ + *(__syscalls_metadata) \ + VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; +#else +#define TRACE_SYSCALLS() +#endif + /* .data section */ #define DATA_DATA \ *(.data) \ @@ -86,7 +110,10 @@ *(__verbose) \ VMLINUX_SYMBOL(__stop___verbose) = .; \ LIKELY_PROFILE() \ - BRANCH_PROFILE() + BRANCH_PROFILE() \ + TRACE_PRINTKS() \ + FTRACE_EVENTS() \ + TRACE_SYSCALLS() #define RO_DATA(align) \ . = ALIGN((align)); \ diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h new file mode 100644 index 00000000000..40a8c178f10 --- /dev/null +++ b/include/asm-m32r/ftrace.h @@ -0,0 +1 @@ +/* empty */ diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h new file mode 100644 index 00000000000..40a8c178f10 --- /dev/null +++ b/include/asm-mn10300/ftrace.h @@ -0,0 +1 @@ +/* empty */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d047f846c3e..6586cbd0d4a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -97,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following four functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); void acpi_numa_arch_fixup(void); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 45f6297821b..5fc2ef8d97f 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -21,6 +21,15 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> +/* on architectures without dma-mapping capabilities we need to ensure + * that the asynchronous path compiles away + */ +#ifdef CONFIG_HAS_DMA +#define __async_inline +#else +#define __async_inline __always_inline +#endif + /** * dma_chan_ref - object used to manage dma channels received from the * dmaengine core. diff --git a/include/linux/ata.h b/include/linux/ata.h index 6617c9f8f2c..cb79b7a208e 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -29,6 +29,8 @@ #ifndef __LINUX_ATA_H__ #define __LINUX_ATA_H__ +#include <linux/kernel.h> +#include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> @@ -91,6 +93,7 @@ enum { ATA_ID_CFA_POWER = 160, ATA_ID_CFA_KEY_MGMT = 162, ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), @@ -248,6 +251,7 @@ enum { ATA_CMD_SMART = 0xB0, ATA_CMD_MEDIA_LOCK = 0xDE, ATA_CMD_MEDIA_UNLOCK = 0xDF, + ATA_CMD_DSM = 0x06, /* marked obsolete in the ATA/ATAPI-7 spec */ ATA_CMD_RESTORE = 0x10, @@ -321,6 +325,9 @@ enum { ATA_SMART_READ_VALUES = 0xD0, ATA_SMART_READ_THRESHOLDS = 0xD1, + /* feature values for Data Set Management */ + ATA_DSM_TRIM = 0x01, + /* password used in LBA Mid / LBA High for executing SMART commands */ ATA_SMART_LBAM_PASS = 0x4F, ATA_SMART_LBAH_PASS = 0xC2, @@ -723,6 +730,14 @@ static inline int ata_id_has_unload(const u16 *id) return 0; } +static inline int ata_id_has_trim(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_DATA_SET_MGMT] & 1)) + return 1; + return 0; +} + static inline int ata_id_current_chs_valid(const u16 *id) { /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command @@ -863,6 +878,32 @@ static inline void ata_id_to_hd_driveid(u16 *id) #endif } +/* + * Write up to 'max' LBA Range Entries to the buffer that will cover the + * extent from sector to sector + count. This is used for TRIM and for + * ADD LBA(S) TO NV CACHE PINNED SET. + */ +static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max, + u64 sector, unsigned long count) +{ + __le64 *buffer = _buffer; + unsigned i = 0; + + while (i < max) { + u64 entry = sector | + ((u64)(count > 0xffff ? 0xffff : count) << 48); + buffer[i++] = __cpu_to_le64(entry); + if (count <= 0xffff) + break; + count -= 0xffff; + sector += 0xffff; + } + + max = ALIGN(i * 8, 512); + memset(buffer + i, 0, max - i * 8); + return max; +} + static inline int is_multi_taskfile(struct ata_taskfile *tf) { return (tf->command == ATA_CMD_READ_MULTI) || diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index bee52abb8a4..0ec2c594868 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -24,8 +24,8 @@ struct dentry; */ enum bdi_state { BDI_pdflush, /* A pdflush thread is working this device */ - BDI_write_congested, /* The write queue is getting full */ - BDI_read_congested, /* The read queue is getting full */ + BDI_async_congested, /* The async (write) queue is getting full */ + BDI_sync_congested, /* The sync queue is getting full */ BDI_unused, /* Available bits start here */ }; @@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) static inline int bdi_read_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, 1 << BDI_read_congested); + return bdi_congested(bdi, 1 << BDI_sync_congested); } static inline int bdi_write_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, 1 << BDI_write_congested); + return bdi_congested(bdi, 1 << BDI_async_congested); } static inline int bdi_rw_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, (1 << BDI_read_congested)| - (1 << BDI_write_congested)); + return bdi_congested(bdi, (1 << BDI_sync_congested) | + (1 << BDI_async_congested)); } void clear_bdi_congested(struct backing_dev_info *bdi, int rw); diff --git a/include/linux/bio.h b/include/linux/bio.h index b05b1d4d17d..b900d2c67d2 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -145,20 +145,21 @@ struct bio { * bit 2 -- barrier * Insert a serialization point in the IO queue, forcing previously * submitted IO to be completed before this one is issued. - * bit 3 -- synchronous I/O hint: the block layer will unplug immediately - * Note that this does NOT indicate that the IO itself is sync, just - * that the block layer will not postpone issue of this IO by plugging. - * bit 4 -- metadata request + * bit 3 -- synchronous I/O hint. + * bit 4 -- Unplug the device immediately after submitting this bio. + * bit 5 -- metadata request * Used for tracing to differentiate metadata and data IO. May also * get some preferential treatment in the IO scheduler - * bit 5 -- discard sectors + * bit 6 -- discard sectors * Informs the lower level device that this range of sectors is no longer * used by the file system and may thus be freed by the device. Used * for flash based storage. - * bit 6 -- fail fast device errors - * bit 7 -- fail fast transport errors - * bit 8 -- fail fast driver errors + * bit 7 -- fail fast device errors + * bit 8 -- fail fast transport errors + * bit 9 -- fail fast driver errors * Don't want driver retries for any fast fail whatever the reason. + * bit 10 -- Tell the IO scheduler not to wait for more requests after this + one has been submitted, even if it is a SYNC request. */ #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ @@ -170,6 +171,7 @@ struct bio { #define BIO_RW_FAILFAST_DEV 7 #define BIO_RW_FAILFAST_TRANSPORT 8 #define BIO_RW_FAILFAST_DRIVER 9 +#define BIO_RW_NOIDLE 10 #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) @@ -188,6 +190,7 @@ struct bio { #define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) #define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) #define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) +#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE) /* * upper 16 bits of bi_rw define the io priority of this bio diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 465d6babc84..ba54c834a59 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -38,6 +38,10 @@ struct request; typedef void (rq_end_io_fn)(struct request *, int); struct request_list { + /* + * count[], starved[], and wait[] are indexed by + * BLK_RW_SYNC/BLK_RW_ASYNC + */ int count[2]; int starved[2]; int elvpriv; @@ -66,6 +70,11 @@ enum rq_cmd_type_bits { REQ_TYPE_ATA_PC, }; +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + /* * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a @@ -103,12 +112,12 @@ enum rq_flag_bits { __REQ_QUIET, /* don't worry about errors */ __REQ_PREEMPT, /* set for "ide_preempt" requests */ __REQ_ORDERED_COLOR, /* is before or after barrier */ - __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ + __REQ_RW_SYNC, /* request is sync (sync write or read) */ __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_RW_META, /* metadata io request */ __REQ_COPY_USER, /* contains copies of user pages */ __REQ_INTEGRITY, /* integrity metadata has been remapped */ - __REQ_UNPLUG, /* unplug queue on submission */ + __REQ_NOIDLE, /* Don't anticipate more IO after this one */ __REQ_NR_BITS, /* stops here */ }; @@ -135,7 +144,7 @@ enum rq_flag_bits { #define REQ_RW_META (1 << __REQ_RW_META) #define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) -#define REQ_UNPLUG (1 << __REQ_UNPLUG) +#define REQ_NOIDLE (1 << __REQ_NOIDLE) #define BLK_MAX_CDB 16 @@ -438,8 +447,8 @@ struct request_queue #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ -#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ -#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ +#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ +#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ @@ -611,32 +620,42 @@ enum { #define rq_data_dir(rq) ((rq)->cmd_flags & 1) /* - * We regard a request as sync, if it's a READ or a SYNC write. + * We regard a request as sync, if either a read or a sync write */ -#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) +static inline bool rw_is_sync(unsigned int rw_flags) +{ + return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); +} + +static inline bool rq_is_sync(struct request *rq) +{ + return rw_is_sync(rq->cmd_flags); +} + #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) +#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) -static inline int blk_queue_full(struct request_queue *q, int rw) +static inline int blk_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); - return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + if (sync) + return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); + return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); } -static inline void blk_set_queue_full(struct request_queue *q, int rw) +static inline void blk_set_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - queue_flag_set(QUEUE_FLAG_READFULL, q); + if (sync) + queue_flag_set(QUEUE_FLAG_SYNCFULL, q); else - queue_flag_set(QUEUE_FLAG_WRITEFULL, q); + queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); } -static inline void blk_clear_queue_full(struct request_queue *q, int rw) +static inline void blk_clear_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - queue_flag_clear(QUEUE_FLAG_READFULL, q); + if (sync) + queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); else - queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); + queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); } diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 6e915878e88..d960889e92e 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -144,6 +144,9 @@ struct blk_user_trace_setup { #ifdef __KERNEL__ #if defined(CONFIG_BLK_DEV_IO_TRACE) + +#include <linux/sysfs.h> + struct blk_trace { int trace_state; struct rchan *rchan; @@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, extern int blk_trace_startstop(struct request_queue *q, int start); extern int blk_trace_remove(struct request_queue *q); +extern struct attribute_group blk_trace_attr_group; + #else /* !CONFIG_BLK_DEV_IO_TRACE */ #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) #define blk_trace_shutdown(q) do { } while (0) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 4316a546beb..665fa70e409 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -365,7 +365,10 @@ int cgroup_task_count(const struct cgroup *cgrp); /* Return true if cgrp is a descendant of the task's cgroup */ int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); -/* Control Group subsystem type. See Documentation/cgroups.txt for details */ +/* + * Control Group subsystem type. + * See Documentation/cgroups/cgroups.txt for details + */ struct cgroup_subsys { struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, diff --git a/include/linux/compat.h b/include/linux/compat.h index 9723edd6455..f2ded21f9a3 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -193,10 +193,10 @@ asmlinkage ssize_t compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen); asmlinkage ssize_t compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec, - unsigned long vlen, u32 pos_high, u32 pos_low); + unsigned long vlen, u32 pos_low, u32 pos_high); asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec, - unsigned long vlen, u32 pos_high, u32 pos_low); + unsigned long vlen, u32 pos_low, u32 pos_high); int compat_do_execve(char * filename, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs * regs); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d95da1020f1..37bcb50a4d7 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -68,6 +68,7 @@ struct ftrace_branch_data { unsigned long miss; unsigned long hit; }; + unsigned long miss_hit[2]; }; }; @@ -75,7 +76,8 @@ struct ftrace_branch_data { * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ -#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING) +#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ + && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #define likely_notrace(x) __builtin_expect(!!(x), 1) @@ -113,7 +115,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * "Define 'is'", Bill Clinton * "Define 'if'", Steven Rostedt */ -#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \ +#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) +#define __trace_if(cond) \ + if (__builtin_constant_p((cond)) ? !!(cond) : \ ({ \ int ______r; \ static struct ftrace_branch_data \ @@ -125,10 +129,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); .line = __LINE__, \ }; \ ______r = !!(cond); \ - if (______r) \ - ______f.hit++; \ - else \ - ______f.miss++; \ + ______f.miss_hit[______r]++; \ ______r; \ })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ diff --git a/include/linux/connector.h b/include/linux/connector.h index fc65d219d88..b9966e64604 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -39,8 +39,10 @@ #define CN_IDX_V86D 0x4 #define CN_VAL_V86D_UVESAFB 0x1 #define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ +#define CN_DST_IDX 0x6 +#define CN_DST_VAL 0x1 -#define CN_NETLINK_USERS 6 +#define CN_NETLINK_USERS 7 /* * Maximum connector's message size. diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index af0e01d4c66..eb5c2ba2f81 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode, struct dentry *debugfs_create_blob(const char *name, mode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob); + +bool debugfs_initialized(void); + #else #include <linux/err.h> @@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode, return ERR_PTR(-ENODEV); } +static inline bool debugfs_initialized(void) +{ + return false; +} + #endif #endif diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8209e08969f..ded2d7c4266 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -116,7 +116,6 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d); /* * Target features */ -#define DM_TARGET_SUPPORTS_BARRIERS 0x00000001 struct target_type { uint64_t features; @@ -139,6 +138,9 @@ struct target_type { dm_ioctl_fn ioctl; dm_merge_fn merge; dm_busy_fn busy; + + /* For internal device-mapper use. */ + struct list_head list; }; struct io_restrictions { diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h index 600c5fb2daa..5e8b11d88f6 100644 --- a/include/linux/dm-dirty-log.h +++ b/include/linux/dm-dirty-log.h @@ -28,6 +28,9 @@ struct dm_dirty_log_type { const char *name; struct module *module; + /* For internal device-mapper use */ + struct list_head list; + int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, unsigned argc, char **argv); void (*dtr)(struct dm_dirty_log *log); @@ -113,6 +116,16 @@ struct dm_dirty_log_type { */ int (*status)(struct dm_dirty_log *log, status_type_t status_type, char *result, unsigned maxlen); + + /* + * is_remote_recovering is necessary for cluster mirroring. It provides + * a way to detect recovery on another node, so we aren't writing + * concurrently. This function is likely to block (when a cluster log + * is used). + * + * Returns: 0, 1 + */ + int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); }; int dm_dirty_log_type_register(struct dm_dirty_log_type *type); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index d7d090d2103..8083b6a36a3 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -115,7 +115,7 @@ static inline u64 dma_get_mask(struct device *dev) { if (dev && dev->dma_mask && *dev->dma_mask) return *dev->dma_mask; - return DMA_32BIT_MASK; + return DMA_BIT_MASK(32); } extern u64 dma_get_required_mask(struct device *dev); diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index af1dab41674..1a455f1f86d 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -11,6 +11,7 @@ #define DMA_PTE_READ (1) #define DMA_PTE_WRITE (2) +#define DMA_PTE_SNP (1 << 11) struct intel_iommu; struct dmar_domain; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1956c8d46d3..2e2aa3df170 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -23,9 +23,6 @@ #include <linux/device.h> #include <linux/uio.h> -#include <linux/kref.h> -#include <linux/completion.h> -#include <linux/rcupdate.h> #include <linux/dma-mapping.h> /** @@ -205,6 +202,7 @@ struct dma_async_tx_descriptor { /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported + * @privatecnt: how many DMA channels are requested by dma_request_channel * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags @@ -227,6 +225,7 @@ struct dma_async_tx_descriptor { struct dma_device { unsigned int chancnt; + unsigned int privatecnt; struct list_head channels; struct list_head global_node; dma_cap_mask_t cap_mask; @@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void) } #endif +#ifdef CONFIG_ASYNC_TX_DMA +#define async_dmaengine_get() dmaengine_get() +#define async_dmaengine_put() dmaengine_put() +#define async_dma_find_channel(type) dma_find_channel(type) +#else +static inline void async_dmaengine_get(void) +{ +} +static inline void async_dmaengine_put(void) +{ +} +static inline struct dma_chan * +async_dma_find_channel(enum dma_transaction_type type) +{ + return NULL; +} +#endif + dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, @@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) set_bit(tx_type, dstp->bits); } +#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) +static inline void +__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) +{ + clear_bit(tx_type, dstp->bits); +} + #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) static inline void __dma_cap_zero(dma_cap_mask_t *dstp) { diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 2f342746895..e397dc342cd 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -34,6 +34,7 @@ struct dmar_drhd_unit { u64 reg_base_addr; /* register base address*/ struct pci_dev **devices; /* target device array */ int devices_cnt; /* target device count */ + u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; struct intel_iommu *iommu; @@ -44,6 +45,14 @@ extern struct list_head dmar_drhd_units; #define for_each_drhd_unit(drhd) \ list_for_each_entry(drhd, &dmar_drhd_units, list) +#define for_each_active_iommu(i, drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, drhd->ignored) {} else + +#define for_each_iommu(i, drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, 0) {} else + extern int dmar_table_init(void); extern int dmar_dev_scope_init(void); @@ -100,6 +109,8 @@ struct irte { #ifdef CONFIG_INTR_REMAP extern int intr_remapping_enabled; extern int enable_intr_remapping(int); +extern void disable_intr_remapping(void); +extern int reenable_intr_remapping(int); extern int get_irte(int irq, struct irte *entry); extern int modify_irte(int irq, struct irte *irte_modified); diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h deleted file mode 100644 index d3c65e48a2e..00000000000 --- a/include/linux/ds1wm.h +++ /dev/null @@ -1,12 +0,0 @@ -/* platform data for the DS1WM driver */ - -struct ds1wm_platform_data { - int bus_shift; /* number of shifts needed to calculate the - * offset between DS1WM registers; - * e.g. on h5xxx and h2200 this is 2 - * (registers aligned to 4-byte boundaries), - * while on hx4700 this is 1 */ - int active_high; - void (*enable)(struct platform_device *pdev); - void (*disable)(struct platform_device *pdev); -}; diff --git a/include/linux/dst.h b/include/linux/dst.h new file mode 100644 index 00000000000..e26fed84b1a --- /dev/null +++ b/include/linux/dst.h @@ -0,0 +1,587 @@ +/* + * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DST_H +#define __DST_H + +#include <linux/types.h> +#include <linux/connector.h> + +#define DST_NAMELEN 32 +#define DST_NAME "dst" + +enum { + /* Remove node with given id from storage */ + DST_DEL_NODE = 0, + /* Add remote node with given id to the storage */ + DST_ADD_REMOTE, + /* Add local node with given id to the storage to be exported and used by remote peers */ + DST_ADD_EXPORT, + /* Crypto initialization command (hash/cipher used to protect the connection) */ + DST_CRYPTO, + /* Security attributes for given connection (permissions for example) */ + DST_SECURITY, + /* Register given node in the block layer subsystem */ + DST_START, + DST_CMD_MAX +}; + +struct dst_ctl +{ + /* Storage name */ + char name[DST_NAMELEN]; + /* Command flags */ + __u32 flags; + /* Command itself (see above) */ + __u32 cmd; + /* Maximum number of pages per single request in this device */ + __u32 max_pages; + /* Stale/error transaction scanning timeout in milliseconds */ + __u32 trans_scan_timeout; + /* Maximum number of retry sends before completing transaction as broken */ + __u32 trans_max_retries; + /* Storage size */ + __u64 size; +}; + +/* Reply command carries completion status */ +struct dst_ctl_ack +{ + struct cn_msg msg; + int error; + int unused[3]; +}; + +/* + * Unfortunaltely socket address structure is not exported to userspace + * and is redefined there. + */ +#define SADDR_MAX_DATA 128 + +struct saddr { + /* address family, AF_xxx */ + unsigned short sa_family; + /* 14 bytes of protocol address */ + char sa_data[SADDR_MAX_DATA]; + /* Number of bytes used in sa_data */ + unsigned short sa_data_len; +}; + +/* Address structure */ +struct dst_network_ctl +{ + /* Socket type: datagram, stream...*/ + unsigned int type; + /* Let me guess, is it a Jupiter diameter? */ + unsigned int proto; + /* Peer's address */ + struct saddr addr; +}; + +struct dst_crypto_ctl +{ + /* Cipher and hash names */ + char cipher_algo[DST_NAMELEN]; + char hash_algo[DST_NAMELEN]; + + /* Key sizes. Can be zero for digest for example */ + unsigned int cipher_keysize, hash_keysize; + /* Alignment. Calculated by the DST itself. */ + unsigned int crypto_attached_size; + /* Number of threads to perform crypto operations */ + int thread_num; +}; + +/* Export security attributes have this bits checked in when client connects */ +#define DST_PERM_READ (1<<0) +#define DST_PERM_WRITE (1<<1) + +/* + * Right now it is simple model, where each remote address + * is assigned to set of permissions it is allowed to perform. + * In real world block device does not know anything but + * reading and writing, so it should be more than enough. + */ +struct dst_secure_user +{ + unsigned int permissions; + struct saddr addr; +}; + +/* + * Export control command: device to export and network address to accept + * clients to work with given device + */ +struct dst_export_ctl +{ + char device[DST_NAMELEN]; + struct dst_network_ctl ctl; +}; + +enum { + DST_CFG = 1, /* Request remote configuration */ + DST_IO, /* IO command */ + DST_IO_RESPONSE, /* IO response */ + DST_PING, /* Keepalive message */ + DST_NCMD_MAX, +}; + +struct dst_cmd +{ + /* Network command itself, see above */ + __u32 cmd; + /* + * Size of the attached data + * (in most cases, for READ command it means how many bytes were requested) + */ + __u32 size; + /* Crypto size: number of attached bytes with digest/hmac */ + __u32 csize; + /* Here we can carry secret data */ + __u32 reserved; + /* Read/write bits, see how they are encoded in bio structure */ + __u64 rw; + /* BIO flags */ + __u64 flags; + /* Unique command id (like transaction ID) */ + __u64 id; + /* Sector to start IO from */ + __u64 sector; + /* Hash data is placed after this header */ + __u8 hash[0]; +}; + +/* + * Convert command to/from network byte order. + * We do not use hton*() functions, since there is + * no 64-bit implementation. + */ +static inline void dst_convert_cmd(struct dst_cmd *c) +{ + c->cmd = __cpu_to_be32(c->cmd); + c->csize = __cpu_to_be32(c->csize); + c->size = __cpu_to_be32(c->size); + c->sector = __cpu_to_be64(c->sector); + c->id = __cpu_to_be64(c->id); + c->flags = __cpu_to_be64(c->flags); + c->rw = __cpu_to_be64(c->rw); +} + +/* Transaction id */ +typedef __u64 dst_gen_t; + +#ifdef __KERNEL__ + +#include <linux/blkdev.h> +#include <linux/bio.h> +#include <linux/device.h> +#include <linux/mempool.h> +#include <linux/net.h> +#include <linux/poll.h> +#include <linux/rbtree.h> + +#ifdef CONFIG_DST_DEBUG +#define dprintk(f, a...) printk(KERN_NOTICE f, ##a) +#else +static inline void __attribute__ ((format (printf, 1, 2))) + dprintk(const char *fmt, ...) {} +#endif + +struct dst_node; + +struct dst_trans +{ + /* DST node we are working with */ + struct dst_node *n; + + /* Entry inside transaction tree */ + struct rb_node trans_entry; + + /* Merlin kills this transaction when this memory cell equals zero */ + atomic_t refcnt; + + /* How this transaction should be processed by crypto engine */ + short enc; + /* How many times this transaction was resent */ + short retries; + /* Completion status */ + int error; + + /* When did we send it to the remote peer */ + long send_time; + + /* My name is... + * Well, computers does not speak, they have unique id instead */ + dst_gen_t gen; + + /* Block IO we are working with */ + struct bio *bio; + + /* Network command for above block IO request */ + struct dst_cmd cmd; +}; + +struct dst_crypto_engine +{ + /* What should we do with all block requests */ + struct crypto_hash *hash; + struct crypto_ablkcipher *cipher; + + /* Pool of pages used to encrypt data into before sending */ + int page_num; + struct page **pages; + + /* What to do with current request */ + int enc; + /* Who we are and where do we go */ + struct scatterlist *src, *dst; + + /* Maximum timeout waiting for encryption to be completed */ + long timeout; + /* IV is a 64-bit sequential counter */ + u64 iv; + + /* Secret data */ + void *private; + + /* Cached temporary data lives here */ + int size; + void *data; +}; + +struct dst_state +{ + /* The main state protection */ + struct mutex state_lock; + + /* Polling machinery for sockets */ + wait_queue_t wait; + wait_queue_head_t *whead; + /* Most of events are being waited here */ + wait_queue_head_t thread_wait; + + /* Who owns this? */ + struct dst_node *node; + + /* Network address for this state */ + struct dst_network_ctl ctl; + + /* Permissions to work with: read-only or rw connection */ + u32 permissions; + + /* Called when we need to clean private data */ + void (* cleanup)(struct dst_state *st); + + /* Used by the server: BIO completion queues BIOs here */ + struct list_head request_list; + spinlock_t request_lock; + + /* Guess what? No, it is not number of planets */ + atomic_t refcnt; + + /* This flags is set when connection should be dropped */ + int need_exit; + + /* + * Socket to work with. Second pointer is used for + * lockless check if socket was changed before performing + * next action (like working with cached polling result) + */ + struct socket *socket, *read_socket; + + /* Cached preallocated data */ + void *data; + unsigned int size; + + /* Currently processed command */ + struct dst_cmd cmd; +}; + +struct dst_info +{ + /* Device size */ + u64 size; + + /* Local device name for export devices */ + char local[DST_NAMELEN]; + + /* Network setup */ + struct dst_network_ctl net; + + /* Sysfs bits use this */ + struct device device; +}; + +struct dst_node +{ + struct list_head node_entry; + + /* Hi, my name is stored here */ + char name[DST_NAMELEN]; + /* My cache name is stored here */ + char cache_name[DST_NAMELEN]; + + /* Block device attached to given node. + * Only valid for exporting nodes */ + struct block_device *bdev; + /* Network state machine for given peer */ + struct dst_state *state; + + /* Block IO machinery */ + struct request_queue *queue; + struct gendisk *disk; + + /* Number of threads in processing pool */ + int thread_num; + /* Maximum number of pages in single IO */ + int max_pages; + + /* I'm that big in bytes */ + loff_t size; + + /* Exported to userspace node information */ + struct dst_info *info; + + /* + * Security attribute list. + * Used only by exporting node currently. + */ + struct list_head security_list; + struct mutex security_lock; + + /* + * When this unerflows below zero, university collapses. + * But this will not happen, since node will be freed, + * when reference counter reaches zero. + */ + atomic_t refcnt; + + /* How precisely should I be started? */ + int (*start)(struct dst_node *); + + /* Crypto capabilities */ + struct dst_crypto_ctl crypto; + u8 *hash_key; + u8 *cipher_key; + + /* Pool of processing thread */ + struct thread_pool *pool; + + /* Transaction IDs live here */ + atomic_long_t gen; + + /* + * How frequently and how many times transaction + * tree should be scanned to drop stale objects. + */ + long trans_scan_timeout; + int trans_max_retries; + + /* Small gnomes live here */ + struct rb_root trans_root; + struct mutex trans_lock; + + /* + * Transaction cache/memory pool. + * It is big enough to contain not only transaction + * itself, but additional crypto data (digest/hmac). + */ + struct kmem_cache *trans_cache; + mempool_t *trans_pool; + + /* This entity scans transaction tree */ + struct delayed_work trans_work; + + wait_queue_head_t wait; +}; + +/* Kernel representation of the security attribute */ +struct dst_secure +{ + struct list_head sec_entry; + struct dst_secure_user sec; +}; + +int dst_process_bio(struct dst_node *n, struct bio *bio); + +int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r); +int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le); + +static inline struct dst_state *dst_state_get(struct dst_state *st) +{ + BUG_ON(atomic_read(&st->refcnt) == 0); + atomic_inc(&st->refcnt); + return st; +} + +void dst_state_put(struct dst_state *st); + +struct dst_state *dst_state_alloc(struct dst_node *n); +int dst_state_socket_create(struct dst_state *st); +void dst_state_socket_release(struct dst_state *st); + +void dst_state_exit_connected(struct dst_state *st); + +int dst_state_schedule_receiver(struct dst_state *st); + +void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str); + +static inline void dst_state_lock(struct dst_state *st) +{ + mutex_lock(&st->state_lock); +} + +static inline void dst_state_unlock(struct dst_state *st) +{ + mutex_unlock(&st->state_lock); +} + +void dst_poll_exit(struct dst_state *st); +int dst_poll_init(struct dst_state *st); + +static inline unsigned int dst_state_poll(struct dst_state *st) +{ + unsigned int revents = POLLHUP | POLLERR; + + dst_state_lock(st); + if (st->socket) + revents = st->socket->ops->poll(NULL, st->socket, NULL); + dst_state_unlock(st); + + return revents; +} + +static inline int dst_thread_setup(void *private, void *data) +{ + return 0; +} + +void dst_node_put(struct dst_node *n); + +static inline struct dst_node *dst_node_get(struct dst_node *n) +{ + atomic_inc(&n->refcnt); + return n; +} + +int dst_data_recv(struct dst_state *st, void *data, unsigned int size); +int dst_recv_cdata(struct dst_state *st, void *cdata); +int dst_data_send_header(struct socket *sock, + void *data, unsigned int size, int more); + +int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio); + +int dst_process_io(struct dst_state *st); +int dst_export_crypto(struct dst_node *n, struct bio *bio); +int dst_export_send_bio(struct bio *bio); +int dst_start_export(struct dst_node *n); + +int __init dst_export_init(void); +void dst_export_exit(void); + +/* Private structure for export block IO requests */ +struct dst_export_priv +{ + struct list_head request_entry; + struct dst_state *state; + struct bio *bio; + struct dst_cmd cmd; +}; + +static inline void dst_trans_get(struct dst_trans *t) +{ + atomic_inc(&t->refcnt); +} + +struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen); +int dst_trans_remove(struct dst_trans *t); +int dst_trans_remove_nolock(struct dst_trans *t); +void dst_trans_put(struct dst_trans *t); + +/* + * Convert bio into network command. + */ +static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd, + u32 command, u64 id) +{ + cmd->cmd = command; + cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS; + cmd->rw = bio->bi_rw; + cmd->size = bio->bi_size; + cmd->csize = 0; + cmd->id = id; + cmd->sector = bio->bi_sector; +}; + +int dst_trans_send(struct dst_trans *t); +int dst_trans_crypto(struct dst_trans *t); + +int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl); +void dst_node_crypto_exit(struct dst_node *n); + +static inline int dst_need_crypto(struct dst_node *n) +{ + struct dst_crypto_ctl *c = &n->crypto; + /* + * Logical OR is appropriate here, but boolean one produces + * more optimal code, so it is used instead. + */ + return (c->hash_algo[0] | c->cipher_algo[0]); +} + +int dst_node_trans_init(struct dst_node *n, unsigned int size); +void dst_node_trans_exit(struct dst_node *n); + +/* + * Pool of threads. + * Ready list contains threads currently free to be used, + * active one contains threads with some work scheduled for them. + * Caller can wait in given queue when thread is ready. + */ +struct thread_pool +{ + int thread_num; + struct mutex thread_lock; + struct list_head ready_list, active_list; + + wait_queue_head_t wait; +}; + +void thread_pool_del_worker(struct thread_pool *p); +void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id); +int thread_pool_add_worker(struct thread_pool *p, + char *name, + unsigned int id, + void *(* init)(void *data), + void (* cleanup)(void *data), + void *data); + +void thread_pool_destroy(struct thread_pool *p); +struct thread_pool *thread_pool_create(int num, char *name, + void *(* init)(void *data), + void (* cleanup)(void *data), + void *data); + +int thread_pool_schedule(struct thread_pool *p, + int (* setup)(void *stored_private, void *setup_data), + int (* action)(void *stored_private, void *setup_data), + void *setup_data, long timeout); +int thread_pool_schedule_private(struct thread_pool *p, + int (* setup)(void *private, void *data), + int (* action)(void *private, void *data), + void *data, long timeout, void *id); + +#endif /* __KERNEL__ */ +#endif /* __DST_H */ diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index d797dde247f..c8aad713a04 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -74,4 +74,23 @@ struct dw_dma_slave { #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ +/* DMA API extensions */ +struct dw_cyclic_desc { + struct dw_desc **desc; + unsigned long periods; + void (*period_callback)(void *param); + void *period_callback_param; +}; + +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_data_direction direction); +void dw_dma_cyclic_free(struct dma_chan *chan); +int dw_dma_cyclic_start(struct dma_chan *chan); +void dw_dma_cyclic_stop(struct dma_chan *chan); + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); + #endif /* DW_DMAC_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 7a204256b15..c59b769f62b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -116,6 +116,7 @@ extern void elv_abort_queue(struct request_queue *); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *, struct request *, gfp_t); extern void elv_put_request(struct request_queue *, struct request *); +extern void elv_drain_elevator(struct request_queue *); /* * io scheduler registration diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index e263acaa405..634a5e5aba3 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags) #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ #define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ +#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008 /* Used to pass group descriptor data when online resize is done */ struct ext3_new_group_input { diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 09d6c5bbddd..a2ec74bc481 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -5,12 +5,14 @@ #ifndef __LINUX_FDTABLE_H #define __LINUX_FDTABLE_H -#include <asm/atomic.h> #include <linux/posix_types.h> #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/types.h> +#include <linux/init.h> + +#include <asm/atomic.h> /* * The default fd array needs to be at least BITS_PER_LONG, diff --git a/include/linux/fs.h b/include/linux/fs.h index a09e17c8f5f..562d2855cf3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -95,8 +95,12 @@ struct inodes_stat_t { #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) #define READ_META (READ | (1 << BIO_RW_META)) -#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) -#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) +#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) +#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) +#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) +#define SWRITE_SYNC_PLUG \ + (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) +#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) @@ -1695,6 +1699,9 @@ struct file_system_type { struct lock_class_key i_alloc_sem_key; }; +extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt); extern int get_sb_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int), @@ -2337,19 +2344,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos); int simple_transaction_release(struct inode *inode, struct file *file); -static inline void simple_transaction_set(struct file *file, size_t n) -{ - struct simple_transaction_argresp *ar = file->private_data; - - BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); - - /* - * The barrier ensures that ar->size will really remain zero until - * ar->data is ready for reading. - */ - smp_mb(); - ar->size = n; -} +void simple_transaction_set(struct file *file, size_t n); /* * simple attribute files @@ -2396,27 +2391,6 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); - -#ifdef CONFIG_SECURITY -static inline char *alloc_secdata(void) -{ - return (char *)get_zeroed_page(GFP_KERNEL); -} - -static inline void free_secdata(void *secdata) -{ - free_page((unsigned long)secdata); -} -#else -static inline char *alloc_secdata(void) -{ - return (char *)1; -} - -static inline void free_secdata(void *secdata) -{ } -#endif /* CONFIG_SECURITY */ - struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h new file mode 100644 index 00000000000..84d3532dd3e --- /dev/null +++ b/include/linux/fscache-cache.h @@ -0,0 +1,505 @@ +/* General filesystem caching backing cache interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/backend-api.txt + * + * for a description of the cache backend interface declared here. + */ + +#ifndef _LINUX_FSCACHE_CACHE_H +#define _LINUX_FSCACHE_CACHE_H + +#include <linux/fscache.h> +#include <linux/sched.h> +#include <linux/slow-work.h> + +#define NR_MAXCACHES BITS_PER_LONG + +struct fscache_cache; +struct fscache_cache_ops; +struct fscache_object; +struct fscache_operation; + +/* + * cache tag definition + */ +struct fscache_cache_tag { + struct list_head link; + struct fscache_cache *cache; /* cache referred to by this tag */ + unsigned long flags; +#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ + atomic_t usage; + char name[0]; /* tag name */ +}; + +/* + * cache definition + */ +struct fscache_cache { + const struct fscache_cache_ops *ops; + struct fscache_cache_tag *tag; /* tag representing this cache */ + struct kobject *kobj; /* system representation of this cache */ + struct list_head link; /* link in list of caches */ + size_t max_index_size; /* maximum size of index data */ + char identifier[36]; /* cache label */ + + /* node management */ + struct work_struct op_gc; /* operation garbage collector */ + struct list_head object_list; /* list of data/index objects */ + struct list_head op_gc_list; /* list of ops to be deleted */ + spinlock_t object_list_lock; + spinlock_t op_gc_list_lock; + atomic_t object_count; /* no. of live objects in this cache */ + struct fscache_object *fsdef; /* object for the fsdef index */ + unsigned long flags; +#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */ +#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */ +}; + +extern wait_queue_head_t fscache_cache_cleared_wq; + +/* + * operation to be applied to a cache object + * - retrieval initiation operations are done in the context of the process + * that issued them, and not in an async thread pool + */ +typedef void (*fscache_operation_release_t)(struct fscache_operation *op); +typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); + +struct fscache_operation { + union { + struct work_struct fast_work; /* record for fast ops */ + struct slow_work slow_work; /* record for (very) slow ops */ + }; + struct list_head pend_link; /* link in object->pending_ops */ + struct fscache_object *object; /* object to be operated upon */ + + unsigned long flags; +#define FSCACHE_OP_TYPE 0x000f /* operation type */ +#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ +#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ +#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ +#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ +#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ +#define FSCACHE_OP_DEAD 6 /* op is now dead */ + + atomic_t usage; + unsigned debug_id; /* debugging ID */ + + /* operation processor callback + * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform + * the op in a non-pool thread */ + fscache_operation_processor_t processor; + + /* operation releaser */ + fscache_operation_release_t release; +}; + +extern atomic_t fscache_op_debug_id; +extern const struct slow_work_ops fscache_op_slow_work_ops; + +extern void fscache_enqueue_operation(struct fscache_operation *); +extern void fscache_put_operation(struct fscache_operation *); + +/** + * fscache_operation_init - Do basic initialisation of an operation + * @op: The operation to initialise + * @release: The release function to assign + * + * Do basic initialisation of an operation. The caller must still set flags, + * object, either fast_work or slow_work if necessary, and processor if needed. + */ +static inline void fscache_operation_init(struct fscache_operation *op, + fscache_operation_release_t release) +{ + atomic_set(&op->usage, 1); + op->debug_id = atomic_inc_return(&fscache_op_debug_id); + op->release = release; + INIT_LIST_HEAD(&op->pend_link); +} + +/** + * fscache_operation_init_slow - Do additional initialisation of a slow op + * @op: The operation to initialise + * @processor: The processor function to assign + * + * Do additional initialisation of an operation as required for slow work. + */ +static inline +void fscache_operation_init_slow(struct fscache_operation *op, + fscache_operation_processor_t processor) +{ + op->processor = processor; + slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); +} + +/* + * data read operation + */ +struct fscache_retrieval { + struct fscache_operation op; + struct address_space *mapping; /* netfs pages */ + fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ + void *context; /* netfs read context (pinned) */ + struct list_head to_do; /* list of things to be done by the backend */ + unsigned long start_time; /* time at which retrieval started */ +}; + +typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, + struct page *page, + gfp_t gfp); + +typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op, + struct list_head *pages, + unsigned *nr_pages, + gfp_t gfp); + +/** + * fscache_get_retrieval - Get an extra reference on a retrieval operation + * @op: The retrieval operation to get a reference on + * + * Get an extra reference on a retrieval operation. + */ +static inline +struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op) +{ + atomic_inc(&op->op.usage); + return op; +} + +/** + * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing + * @op: The retrieval operation affected + * + * Enqueue a retrieval operation for processing by the FS-Cache thread pool. + */ +static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) +{ + fscache_enqueue_operation(&op->op); +} + +/** + * fscache_put_retrieval - Drop a reference to a retrieval operation + * @op: The retrieval operation affected + * + * Drop a reference to a retrieval operation. + */ +static inline void fscache_put_retrieval(struct fscache_retrieval *op) +{ + fscache_put_operation(&op->op); +} + +/* + * cached page storage work item + * - used to do three things: + * - batch writes to the cache + * - do cache writes asynchronously + * - defer writes until cache object lookup completion + */ +struct fscache_storage { + struct fscache_operation op; + pgoff_t store_limit; /* don't write more than this */ +}; + +/* + * cache operations + */ +struct fscache_cache_ops { + /* name of cache provider */ + const char *name; + + /* allocate an object record for a cookie */ + struct fscache_object *(*alloc_object)(struct fscache_cache *cache, + struct fscache_cookie *cookie); + + /* look up the object for a cookie */ + void (*lookup_object)(struct fscache_object *object); + + /* finished looking up */ + void (*lookup_complete)(struct fscache_object *object); + + /* increment the usage count on this object (may fail if unmounting) */ + struct fscache_object *(*grab_object)(struct fscache_object *object); + + /* pin an object in the cache */ + int (*pin_object)(struct fscache_object *object); + + /* unpin an object in the cache */ + void (*unpin_object)(struct fscache_object *object); + + /* store the updated auxilliary data on an object */ + void (*update_object)(struct fscache_object *object); + + /* discard the resources pinned by an object and effect retirement if + * necessary */ + void (*drop_object)(struct fscache_object *object); + + /* dispose of a reference to an object */ + void (*put_object)(struct fscache_object *object); + + /* sync a cache */ + void (*sync_cache)(struct fscache_cache *cache); + + /* notification that the attributes of a non-index object (such as + * i_size) have changed */ + int (*attr_changed)(struct fscache_object *object); + + /* reserve space for an object's data and associated metadata */ + int (*reserve_space)(struct fscache_object *object, loff_t i_size); + + /* request a backing block for a page be read or allocated in the + * cache */ + fscache_page_retrieval_func_t read_or_alloc_page; + + /* request backing blocks for a list of pages be read or allocated in + * the cache */ + fscache_pages_retrieval_func_t read_or_alloc_pages; + + /* request a backing block for a page be allocated in the cache so that + * it can be written directly */ + fscache_page_retrieval_func_t allocate_page; + + /* request backing blocks for pages be allocated in the cache so that + * they can be written directly */ + fscache_pages_retrieval_func_t allocate_pages; + + /* write a page to its backing block in the cache */ + int (*write_page)(struct fscache_storage *op, struct page *page); + + /* detach backing block from a page (optional) + * - must release the cookie lock before returning + * - may sleep + */ + void (*uncache_page)(struct fscache_object *object, + struct page *page); + + /* dissociate a cache from all the pages it was backing */ + void (*dissociate_pages)(struct fscache_cache *cache); +}; + +/* + * data file or index object cookie + * - a file will only appear in one cache + * - a request to cache a file may or may not be honoured, subject to + * constraints such as disk space + * - indices are created on disk just-in-time + */ +struct fscache_cookie { + atomic_t usage; /* number of users of this cookie */ + atomic_t n_children; /* number of children of this cookie */ + spinlock_t lock; + struct hlist_head backing_objects; /* object(s) backing this file/index */ + const struct fscache_cookie_def *def; /* definition */ + struct fscache_cookie *parent; /* parent of this entry */ + void *netfs_data; /* back pointer to netfs */ + struct radix_tree_root stores; /* pages to be stored on this cookie */ +#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ + + unsigned long flags; +#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ +#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */ +#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */ +#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ +#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ +#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ +}; + +extern struct fscache_cookie fscache_fsdef_index; + +/* + * on-disk cache file or index handle + */ +struct fscache_object { + enum fscache_object_state { + FSCACHE_OBJECT_INIT, /* object in initial unbound state */ + FSCACHE_OBJECT_LOOKING_UP, /* looking up object */ + FSCACHE_OBJECT_CREATING, /* creating object */ + + /* active states */ + FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ + FSCACHE_OBJECT_ACTIVE, /* object is usable */ + FSCACHE_OBJECT_UPDATING, /* object is updating */ + + /* terminal states */ + FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */ + FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */ + FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */ + FSCACHE_OBJECT_RELEASING, /* releasing object */ + FSCACHE_OBJECT_RECYCLING, /* retiring object */ + FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ + FSCACHE_OBJECT_DEAD, /* object is now dead */ + } state; + + int debug_id; /* debugging ID */ + int n_children; /* number of child objects */ + int n_ops; /* number of ops outstanding on object */ + int n_obj_ops; /* number of object ops outstanding on object */ + int n_in_progress; /* number of ops in progress */ + int n_exclusive; /* number of exclusive ops queued */ + spinlock_t lock; /* state and operations lock */ + + unsigned long lookup_jif; /* time at which lookup started */ + unsigned long event_mask; /* events this object is interested in */ + unsigned long events; /* events to be processed by this object + * (order is important - using fls) */ +#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */ +#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */ +#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */ +#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */ +#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ +#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ +#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ + + unsigned long flags; +#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ +#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ +#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ + + struct list_head cache_link; /* link in cache->object_list */ + struct hlist_node cookie_link; /* link in cookie->backing_objects */ + struct fscache_cache *cache; /* cache that supplied this object */ + struct fscache_cookie *cookie; /* netfs's file/index object */ + struct fscache_object *parent; /* parent object */ + struct slow_work work; /* attention scheduling record */ + struct list_head dependents; /* FIFO of dependent objects */ + struct list_head dep_link; /* link in parent's dependents list */ + struct list_head pending_ops; /* unstarted operations on this object */ + pgoff_t store_limit; /* current storage limit */ +}; + +extern const char *fscache_object_states[]; + +#define fscache_object_is_active(obj) \ + (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ + (obj)->state < FSCACHE_OBJECT_DYING) + +extern const struct slow_work_ops fscache_object_slow_work_ops; + +/** + * fscache_object_init - Initialise a cache object description + * @object: Object description + * + * Initialise a cache object description to its basic values. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_object_init(struct fscache_object *object, + struct fscache_cookie *cookie, + struct fscache_cache *cache) +{ + atomic_inc(&cache->object_count); + + object->state = FSCACHE_OBJECT_INIT; + spin_lock_init(&object->lock); + INIT_LIST_HEAD(&object->cache_link); + INIT_HLIST_NODE(&object->cookie_link); + vslow_work_init(&object->work, &fscache_object_slow_work_ops); + INIT_LIST_HEAD(&object->dependents); + INIT_LIST_HEAD(&object->dep_link); + INIT_LIST_HEAD(&object->pending_ops); + object->n_children = 0; + object->n_ops = object->n_in_progress = object->n_exclusive = 0; + object->events = object->event_mask = 0; + object->flags = 0; + object->store_limit = 0; + object->cache = cache; + object->cookie = cookie; + object->parent = NULL; +} + +extern void fscache_object_lookup_negative(struct fscache_object *object); +extern void fscache_obtained_object(struct fscache_object *object); + +/** + * fscache_object_destroyed - Note destruction of an object in a cache + * @cache: The cache from which the object came + * + * Note the destruction and deallocation of an object record in a cache. + */ +static inline void fscache_object_destroyed(struct fscache_cache *cache) +{ + if (atomic_dec_and_test(&cache->object_count)) + wake_up_all(&fscache_cache_cleared_wq); +} + +/** + * fscache_object_lookup_error - Note an object encountered an error + * @object: The object on which the error was encountered + * + * Note that an object encountered a fatal error (usually an I/O error) and + * that it should be withdrawn as soon as possible. + */ +static inline void fscache_object_lookup_error(struct fscache_object *object) +{ + set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events); +} + +/** + * fscache_set_store_limit - Set the maximum size to be stored in an object + * @object: The object to set the maximum on + * @i_size: The limit to set in bytes + * + * Set the maximum size an object is permitted to reach, implying the highest + * byte that may be written. Intended to be called by the attr_changed() op. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) +{ + object->store_limit = i_size >> PAGE_SHIFT; + if (i_size & ~PAGE_MASK) + object->store_limit++; +} + +/** + * fscache_end_io - End a retrieval operation on a page + * @op: The FS-Cache operation covering the retrieval + * @page: The page that was to be fetched + * @error: The error code (0 if successful) + * + * Note the end of an operation to retrieve a page, as covered by a particular + * operation record. + */ +static inline void fscache_end_io(struct fscache_retrieval *op, + struct page *page, int error) +{ + op->end_io_func(page, op->context, error); +} + +/* + * out-of-line cache backend functions + */ +extern void fscache_init_cache(struct fscache_cache *cache, + const struct fscache_cache_ops *ops, + const char *idfmt, + ...) __attribute__ ((format (printf, 3, 4))); + +extern int fscache_add_cache(struct fscache_cache *cache, + struct fscache_object *fsdef, + const char *tagname); +extern void fscache_withdraw_cache(struct fscache_cache *cache); + +extern void fscache_io_error(struct fscache_cache *cache); + +extern void fscache_mark_pages_cached(struct fscache_retrieval *op, + struct pagevec *pagevec); + +extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + const void *data, + uint16_t datalen); + +#endif /* _LINUX_FSCACHE_CACHE_H */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h new file mode 100644 index 00000000000..6d8ee466e0a --- /dev/null +++ b/include/linux/fscache.h @@ -0,0 +1,618 @@ +/* General filesystem caching interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/netfs-api.txt + * + * for a description of the network filesystem interface declared here. + */ + +#ifndef _LINUX_FSCACHE_H +#define _LINUX_FSCACHE_H + +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/pagemap.h> +#include <linux/pagevec.h> + +#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) +#define fscache_available() (1) +#define fscache_cookie_valid(cookie) (cookie) +#else +#define fscache_available() (0) +#define fscache_cookie_valid(cookie) (0) +#endif + + +/* + * overload PG_private_2 to give us PG_fscache - this is used to indicate that + * a page is currently backed by a local disk cache + */ +#define PageFsCache(page) PagePrivate2((page)) +#define SetPageFsCache(page) SetPagePrivate2((page)) +#define ClearPageFsCache(page) ClearPagePrivate2((page)) +#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) +#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) + +/* pattern used to fill dead space in an index entry */ +#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 + +struct pagevec; +struct fscache_cache_tag; +struct fscache_cookie; +struct fscache_netfs; + +typedef void (*fscache_rw_complete_t)(struct page *page, + void *context, + int error); + +/* result of index entry consultation */ +enum fscache_checkaux { + FSCACHE_CHECKAUX_OKAY, /* entry okay as is */ + FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */ + FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */ +}; + +/* + * fscache cookie definition + */ +struct fscache_cookie_def { + /* name of cookie type */ + char name[16]; + + /* cookie type */ + uint8_t type; +#define FSCACHE_COOKIE_TYPE_INDEX 0 +#define FSCACHE_COOKIE_TYPE_DATAFILE 1 + + /* select the cache into which to insert an entry in this index + * - optional + * - should return a cache identifier or NULL to cause the cache to be + * inherited from the parent if possible or the first cache picked + * for a non-index file if not + */ + struct fscache_cache_tag *(*select_cache)( + const void *parent_netfs_data, + const void *cookie_netfs_data); + + /* get an index key + * - should store the key data in the buffer + * - should return the amount of amount stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_key)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + + /* get certain file attributes from the netfs data + * - this function can be absent for an index + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); + + /* get the auxilliary data from netfs data + * - this function can be absent if the index carries no state data + * - should store the auxilliary data in the buffer + * - should return the amount of amount stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_aux)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + + /* consult the netfs about the state of an object + * - this function can be absent if the index carries no state data + * - the netfs data from the cookie being used as the target is + * presented, as is the auxilliary data + */ + enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, + const void *data, + uint16_t datalen); + + /* get an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*get_context)(void *cookie_netfs_data, void *context); + + /* release an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*put_context)(void *cookie_netfs_data, void *context); + + /* indicate pages that now have cache metadata retained + * - this function should mark the specified pages as now being cached + * - the pages will have been marked with PG_fscache before this is + * called, so this is optional + */ + void (*mark_pages_cached)(void *cookie_netfs_data, + struct address_space *mapping, + struct pagevec *cached_pvec); + + /* indicate the cookie is no longer cached + * - this function is called when the backing store currently caching + * a cookie is removed + * - the netfs should use this to clean up any markers indicating + * cached pages + * - this is mandatory for any object that may have data + */ + void (*now_uncached)(void *cookie_netfs_data); +}; + +/* + * fscache cached network filesystem type + * - name, version and ops must be filled in before registration + * - all other fields will be set during registration + */ +struct fscache_netfs { + uint32_t version; /* indexing version */ + const char *name; /* filesystem name */ + struct fscache_cookie *primary_index; + struct list_head link; /* internal link */ +}; + +/* + * slow-path functions for when there is actually caching available, and the + * netfs does actually have a valid token + * - these are not to be called directly + * - these are undefined symbols when FS-Cache is not configured and the + * optimiser takes care of not using them + */ +extern int __fscache_register_netfs(struct fscache_netfs *); +extern void __fscache_unregister_netfs(struct fscache_netfs *); +extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *); +extern void __fscache_release_cache_tag(struct fscache_cache_tag *); + +extern struct fscache_cookie *__fscache_acquire_cookie( + struct fscache_cookie *, + const struct fscache_cookie_def *, + void *); +extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); +extern void __fscache_update_cookie(struct fscache_cookie *); +extern int __fscache_attr_changed(struct fscache_cookie *); +extern int __fscache_read_or_alloc_page(struct fscache_cookie *, + struct page *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, + struct address_space *, + struct list_head *, + unsigned *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); +extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); +extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); +extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); +extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); + +/** + * fscache_register_netfs - Register a filesystem as desiring caching services + * @netfs: The description of the filesystem + * + * Register a filesystem as desiring caching services if they're available. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_register_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + return __fscache_register_netfs(netfs); + else + return 0; +} + +/** + * fscache_unregister_netfs - Indicate that a filesystem no longer desires + * caching services + * @netfs: The description of the filesystem + * + * Indicate that a filesystem no longer desires caching services for the + * moment. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unregister_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + __fscache_unregister_netfs(netfs); +} + +/** + * fscache_lookup_cache_tag - Look up a cache tag + * @name: The name of the tag to search for + * + * Acquire a specific cache referral tag that can be used to select a specific + * cache in which to cache an index. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) +{ + if (fscache_available()) + return __fscache_lookup_cache_tag(name); + else + return NULL; +} + +/** + * fscache_release_cache_tag - Release a cache tag + * @tag: The tag to release + * + * Release a reference to a cache referral tag previously looked up. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_release_cache_tag(struct fscache_cache_tag *tag) +{ + if (fscache_available()) + __fscache_release_cache_tag(tag); +} + +/** + * fscache_acquire_cookie - Acquire a cookie to represent a cache object + * @parent: The cookie that's to be the parent of this one + * @def: A description of the cache object, including callback operations + * @netfs_data: An arbitrary piece of data to be kept in the cookie to + * represent the cache object to the netfs + * + * This function is used to inform FS-Cache about part of an index hierarchy + * that can be used to locate files. This is done by requesting a cookie for + * each index in the path to the file. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cookie *fscache_acquire_cookie( + struct fscache_cookie *parent, + const struct fscache_cookie_def *def, + void *netfs_data) +{ + if (fscache_cookie_valid(parent)) + return __fscache_acquire_cookie(parent, def, netfs_data); + else + return NULL; +} + +/** + * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding + * it + * @cookie: The cookie being returned + * @retire: True if the cache object the cookie represents is to be discarded + * + * This function returns a cookie to the cache, forcibly discarding the + * associated cache object if retire is set to true. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) +{ + if (fscache_cookie_valid(cookie)) + __fscache_relinquish_cookie(cookie, retire); +} + +/** + * fscache_update_cookie - Request that a cache object be updated + * @cookie: The cookie representing the cache object + * + * Request an update of the index data for the cache object associated with the + * cookie. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_update_cookie(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + __fscache_update_cookie(cookie); +} + +/** + * fscache_pin_cookie - Pin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be pinned in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_pin_cookie(struct fscache_cookie *cookie) +{ + return -ENOBUFS; +} + +/** + * fscache_pin_cookie - Unpin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be unpinned from the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unpin_cookie(struct fscache_cookie *cookie) +{ +} + +/** + * fscache_attr_changed - Notify cache that an object's attributes changed + * @cookie: The cookie representing the cache object + * + * Send a notification to the cache indicating that an object's attributes have + * changed. This includes the data size. These attributes will be obtained + * through the get_attr() cookie definition op. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_attr_changed(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_attr_changed(cookie); + else + return -ENOBUFS; +} + +/** + * fscache_reserve_space - Reserve data space for a cached object + * @cookie: The cookie representing the cache object + * @i_size: The amount of space to be reserved + * + * Reserve an amount of space in the cache for the cache object attached to a + * cookie so that a write to that object within the space can always be + * honoured. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) +{ + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_page - Read a page from the cache or allocate a block + * in which to store it + * @cookie: The cookie representing the cache object + * @page: The netfs page to fill if possible + * @end_io_func: The callback to invoke when and if the page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a page from the cache, or if that's not possible make a potential + * one-block reservation in the cache into which the page may be stored once + * fetched from the server. + * + * If the page is not backed by the cache object, or if it there's some reason + * it can't be, -ENOBUFS will be returned and nothing more will be done for + * that page. + * + * Else, if that page is backed by the cache, a read will be initiated directly + * to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if the page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_page(struct fscache_cookie *cookie, + struct page *page, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_read_or_alloc_page(cookie, page, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate + * blocks in which to store them + * @cookie: The cookie representing the cache object + * @mapping: The netfs inode mapping to which the pages will be attached + * @pages: A list of potential netfs pages to be filled + * @end_io_func: The callback to invoke when and if each page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a set of pages from the cache, or if that's not possible, attempt to + * make a potential one-block reservation for each page in the cache into which + * that page may be stored once fetched from the server. + * + * If some pages are not backed by the cache object, or if it there's some + * reason they can't be, -ENOBUFS will be returned and nothing more will be + * done for that pages. + * + * Else, if some of the pages are backed by the cache, a read will be initiated + * directly to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if a page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in + * regard to different pages, the return values are prioritised in that order. + * Any pages submitted for reading are removed from the pages list. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_read_or_alloc_pages(cookie, mapping, pages, + nr_pages, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_alloc_page - Allocate a block in which to store a page + * @cookie: The cookie representing the cache object + * @page: The netfs page to allocate a page for + * @gfp: The conditions under which memory allocation should be made + * + * Request Allocation a block in the cache in which to store a netfs page + * without retrieving any contents from the cache. + * + * If the page is not backed by a file then -ENOBUFS will be returned and + * nothing more will be done, and no reservation will be made. + * + * Else, a block will be allocated if one wasn't already, and 0 will be + * returned + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_alloc_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_alloc_page(cookie, page, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_write_page - Request storage of a page in the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page to store + * @gfp: The conditions under which memory allocation should be made + * + * Request the contents of the netfs page be written into the cache. This + * request may be ignored if no cache block is currently allocated, in which + * case it will return -ENOBUFS. + * + * If a cache block was already allocated, a write will be initiated and 0 will + * be returned. The PG_fscache_write page bit is set immediately and will then + * be cleared at the completion of the write to indicate the success or failure + * of the operation. Note that the completion may happen before the return. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_write_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_write_page(cookie, page, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_uncache_page - Indicate that caching is no longer required on a page + * @cookie: The cookie representing the cache object + * @page: The netfs page that was being cached. + * + * Tell the cache that we no longer want a page to be cached and that it should + * remove any knowledge of the netfs page it may have. + * + * Note that this cannot cancel any outstanding I/O operations between this + * page and the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_uncache_page(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_uncache_page(cookie, page); +} + +/** + * fscache_check_page_write - Ask if a page is being writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache if a page is being written to the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +bool fscache_check_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_check_page_write(cookie, page); + return false; +} + +/** + * fscache_wait_on_page_write - Wait for a page to complete writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache to wake us up when a page is no longer being written to the + * cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_wait_on_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_wait_on_page_write(cookie, page); +} + +#endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 7ef1caf5026..f2a78b5e8b5 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -18,7 +18,6 @@ #define _FSL_DEVICE_H_ #include <linux/types.h> -#include <linux/phy.h> /* * Some conventions on how we handle peripherals on Freescale chips @@ -44,27 +43,6 @@ * */ -struct gianfar_platform_data { - /* device specific information */ - u32 device_flags; - char bus_id[BUS_ID_SIZE]; - phy_interface_t interface; -}; - -struct gianfar_mdio_data { - /* board specific information */ - int irq[32]; -}; - -/* Flags in gianfar_platform_data */ -#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */ -#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */ - -struct fsl_i2c_platform_data { - /* device specific information */ - u32 device_flags; -}; - /* Flags related to I2C device features */ #define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001 #define FSL_I2C_DEV_CLOCK_5200 0x00000002 diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a7f8134c594..da5405dce34 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1,15 +1,18 @@ #ifndef _LINUX_FTRACE_H #define _LINUX_FTRACE_H -#include <linux/linkage.h> -#include <linux/fs.h> -#include <linux/ktime.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/module.h> +#include <linux/trace_clock.h> #include <linux/kallsyms.h> +#include <linux/linkage.h> #include <linux/bitops.h> +#include <linux/module.h> +#include <linux/ktime.h> #include <linux/sched.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/fs.h> + +#include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER @@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write, loff_t *ppos); #endif +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(char *func, char *cmd, + char *params, int enable); +}; + #ifdef CONFIG_DYNAMIC_FTRACE -/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ -#include <asm/ftrace.h> + +int ftrace_arch_code_modify_prepare(void); +int ftrace_arch_code_modify_post_process(void); + +struct seq_file; + +struct ftrace_probe_ops { + void (*func)(unsigned long ip, + unsigned long parent_ip, + void **data); + int (*callback)(unsigned long ip, void **data); + void (*free)(void **data); + int (*print)(struct seq_file *m, + unsigned long ip, + struct ftrace_probe_ops *ops, + void *data); +}; + +extern int +register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); +extern void unregister_ftrace_function_probe_all(char *glob); enum { FTRACE_FL_FREE = (1 << 0), @@ -110,15 +145,23 @@ enum { }; struct dyn_ftrace { - struct list_head list; - unsigned long ip; /* address of mcount call-site */ - unsigned long flags; - struct dyn_arch_ftrace arch; + union { + unsigned long ip; /* address of mcount call-site */ + struct dyn_ftrace *freelist; + }; + union { + unsigned long flags; + struct dyn_ftrace *newlist; + }; + struct dyn_arch_ftrace arch; }; int ftrace_force_update(void); void ftrace_set_filter(unsigned char *buf, int len, int reset); +int register_ftrace_command(struct ftrace_func_command *cmd); +int unregister_ftrace_command(struct ftrace_func_command *cmd); + /* defined in arch */ extern int ftrace_ip_converted(unsigned long ip); extern int ftrace_dyn_arch_init(void *data); @@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func); extern void ftrace_caller(void); extern void ftrace_call(void); extern void mcount_call(void); + +#ifndef FTRACE_ADDR +#define FTRACE_ADDR ((unsigned long)ftrace_caller) +#endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER extern void ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void); @@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } #endif /** - * ftrace_make_nop - convert code into top + * ftrace_make_nop - convert code into nop * @mod: module structure if called by module load initialization * @rec: the mcount call site record * @addr: the address that the call site should be calling @@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod, */ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); - /* May be defined in arch */ extern int ftrace_arch_read_dyn_info(char *buf, int size); @@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void); # define ftrace_disable_daemon() do { } while (0) # define ftrace_enable_daemon() do { } while (0) static inline void ftrace_release(void *start, unsigned long size) { } +static inline int register_ftrace_command(struct ftrace_func_command *cmd) +{ + return -EINVAL; +} +static inline int unregister_ftrace_command(char *cmd_name) +{ + return -EINVAL; +} #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ @@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled) #endif } -#ifdef CONFIG_FRAME_POINTER -/* TODO: need to fix this for ARM */ -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) -# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) -# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) -# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) -# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) -# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) -#else -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 0UL -# define CALLER_ADDR2 0UL -# define CALLER_ADDR3 0UL -# define CALLER_ADDR4 0UL -# define CALLER_ADDR5 0UL -# define CALLER_ADDR6 0UL -#endif +#ifndef HAVE_ARCH_CALLER_ADDR +# ifdef CONFIG_FRAME_POINTER +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) +# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) +# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) +# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) +# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) +# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) +# else +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 0UL +# define CALLER_ADDR2 0UL +# define CALLER_ADDR3 0UL +# define CALLER_ADDR4 0UL +# define CALLER_ADDR5 0UL +# define CALLER_ADDR6 0UL +# endif +#endif /* ifndef HAVE_ARCH_CALLER_ADDR */ #ifdef CONFIG_IRQSOFF_TRACER extern void time_hardirqs_on(unsigned long a0, unsigned long a1); @@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled) # define trace_preempt_off(a0, a1) do { } while (0) #endif -#ifdef CONFIG_TRACING -extern int ftrace_dump_on_oops; - -extern void tracing_start(void); -extern void tracing_stop(void); -extern void ftrace_off_permanent(void); - -extern void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); - -/** - * ftrace_printk - printf formatting in the ftrace buffer - * @fmt: the printf format for printing - * - * Note: __ftrace_printk is an internal function for ftrace_printk and - * the @ip is passed in via the ftrace_printk macro. - * - * This function allows a kernel developer to debug fast path sections - * that printk is not appropriate for. By scattering in various - * printk like tracing in the code, a developer can quickly see - * where problems are occurring. - * - * This is intended as a debugging tool for the developer only. - * Please refrain from leaving ftrace_printks scattered around in - * your code. - */ -# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) -extern int -__ftrace_printk(unsigned long ip, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void ftrace_dump(void); -#else -static inline void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } -static inline int -ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); - -static inline void tracing_start(void) { } -static inline void tracing_stop(void) { } -static inline void ftrace_off_permanent(void) { } -static inline int -ftrace_printk(const char *fmt, ...) -{ - return 0; -} -static inline void ftrace_dump(void) { } -#endif - #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); extern void ftrace_init_module(struct module *mod, @@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod, unsigned long *start, unsigned long *end) { } #endif -enum { - POWER_NONE = 0, - POWER_CSTATE = 1, - POWER_PSTATE = 2, -}; - -struct power_trace { -#ifdef CONFIG_POWER_TRACER - ktime_t stamp; - ktime_t end; - int type; - int state; -#endif -}; - -#ifdef CONFIG_POWER_TRACER -extern void trace_power_start(struct power_trace *it, unsigned int type, - unsigned int state); -extern void trace_power_mark(struct power_trace *it, unsigned int type, - unsigned int state); -extern void trace_power_end(struct power_trace *it); -#else -static inline void trace_power_start(struct power_trace *it, unsigned int type, - unsigned int state) { } -static inline void trace_power_mark(struct power_trace *it, unsigned int type, - unsigned int state) { } -static inline void trace_power_end(struct power_trace *it) { } -#endif - - /* * Structure that defines an entry function trace. */ @@ -379,6 +356,9 @@ struct ftrace_graph_ret { #ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* for init task */ +#define INIT_FTRACE_GRAPH .ret_stack = NULL + /* * Stack of return addresses for functions * of a thread. @@ -398,8 +378,7 @@ struct ftrace_ret_stack { extern void return_to_handler(void); extern int -ftrace_push_return_trace(unsigned long ret, unsigned long long time, - unsigned long func, int *depth); +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); extern void ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); @@ -454,10 +433,11 @@ static inline void unpause_graph_tracing(void) { atomic_dec(¤t->tracing_graph_pause); } -#else +#else /* !CONFIG_FUNCTION_GRAPH_TRACER */ #define __notrace_funcgraph #define __irq_entry +#define INIT_FTRACE_GRAPH static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } @@ -469,7 +449,7 @@ static inline int task_curr_ret_stack(struct task_struct *tsk) static inline void pause_graph_tracing(void) { } static inline void unpause_graph_tracing(void) { } -#endif +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_TRACING #include <linux/sched.h> @@ -514,6 +494,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) return tsk->trace & TSK_TRACE_FL_GRAPH; } +extern int ftrace_dump_on_oops; + #endif /* CONFIG_TRACING */ + +#ifdef CONFIG_HW_BRANCH_TRACER + +void trace_hw_branch(u64 from, u64 to); +void trace_hw_branch_oops(void); + +#else /* CONFIG_HW_BRANCH_TRACER */ + +static inline void trace_hw_branch(u64 from, u64 to) {} +static inline void trace_hw_branch_oops(void) {} + +#endif /* CONFIG_HW_BRANCH_TRACER */ + +/* + * A syscall entry in the ftrace syscalls array. + * + * @name: name of the syscall + * @nb_args: number of parameters it takes + * @types: list of types as strings + * @args: list of args as strings (args[i] matches types[i]) + */ +struct syscall_metadata { + const char *name; + int nb_args; + const char **types; + const char **args; +}; + +#ifdef CONFIG_FTRACE_SYSCALLS +extern void arch_init_ftrace_syscalls(void); +extern struct syscall_metadata *syscall_nr_to_meta(int nr); +extern void start_ftrace_syscalls(void); +extern void stop_ftrace_syscalls(void); +extern void ftrace_syscall_enter(struct pt_regs *regs); +extern void ftrace_syscall_exit(struct pt_regs *regs); +#else +static inline void start_ftrace_syscalls(void) { } +static inline void stop_ftrace_syscalls(void) { } +static inline void ftrace_syscall_enter(struct pt_regs *regs) { } +static inline void ftrace_syscall_exit(struct pt_regs *regs) { } +#endif + #endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 366a054d0b0..dca7bf8cffe 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -2,7 +2,7 @@ #define _LINUX_FTRACE_IRQ_H -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) +#ifdef CONFIG_FTRACE_NMI_ENTER extern void ftrace_nmi_enter(void); extern void ftrace_nmi_exit(void); #else diff --git a/include/linux/gfp.h b/include/linux/gfp.h index dd20cd78faa..0bbc15f5453 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -4,6 +4,7 @@ #include <linux/mmzone.h> #include <linux/stddef.h> #include <linux/linkage.h> +#include <linux/topology.h> struct vm_area_struct; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dd..45257475623 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -15,55 +15,61 @@ * - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256) * - * The hardirq count can be overridden per architecture, the default is: + * The hardirq count can in theory reach the same as NR_IRQS. + * In reality, the number of nested IRQS is limited to the stack + * size as well. For archs with over 1000 IRQS it is not practical + * to expect that they will all nest. We give a max of 10 bits for + * hardirq nesting. An arch may choose to give less than 10 bits. + * m68k expects it to be 8. * - * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) - * - ( bit 28 is the PREEMPT_ACTIVE flag. ) + * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) + * - bit 26 is the NMI_MASK + * - bit 28 is the PREEMPT_ACTIVE flag * * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 - * HARDIRQ_MASK: 0x0fff0000 + * HARDIRQ_MASK: 0x03ff0000 + * NMI_MASK: 0x04000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 +#define NMI_BITS 1 -#ifndef HARDIRQ_BITS -#define HARDIRQ_BITS 12 +#define MAX_HARDIRQ_BITS 10 -#ifndef MAX_HARDIRQS_PER_CPU -#define MAX_HARDIRQS_PER_CPU NR_IRQS +#ifndef HARDIRQ_BITS +# define HARDIRQ_BITS MAX_HARDIRQ_BITS #endif -/* - * The hardirq mask has to be large enough to have space for potentially - * all IRQ sources in the system nesting on a single CPU. - */ -#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU -# error HARDIRQ_BITS is too low! -#endif +#if HARDIRQ_BITS > MAX_HARDIRQ_BITS +#error HARDIRQ_BITS too high! #endif #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) +#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) #define __IRQ_MASK(x) ((1UL << (x))-1) #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#define NMI_OFFSET (1UL << NMI_SHIFT) -#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) +#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) #error PREEMPT_ACTIVE is too low! #endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) -#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) /* * Are we doing bottom half or hardware interrupt processing? @@ -73,6 +79,11 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) +/* + * Are we in NMI context? + */ +#define in_nmi() (preempt_count() & NMI_MASK) + #if defined(CONFIG_PREEMPT) # define PREEMPT_INATOMIC_BASE kernel_locked() # define PREEMPT_CHECK_OFFSET 1 @@ -105,7 +116,7 @@ # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET #endif -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) extern void synchronize_irq(unsigned int irq); #else # define synchronize_irq(irq) barrier() @@ -164,20 +175,24 @@ extern void irq_enter(void); */ extern void irq_exit(void); -#define nmi_enter() \ - do { \ - ftrace_nmi_enter(); \ - lockdep_off(); \ - rcu_nmi_enter(); \ - __irq_enter(); \ +#define nmi_enter() \ + do { \ + ftrace_nmi_enter(); \ + BUG_ON(in_nmi()); \ + add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + lockdep_off(); \ + rcu_nmi_enter(); \ + trace_hardirq_enter(); \ } while (0) -#define nmi_exit() \ - do { \ - __irq_exit(); \ - rcu_nmi_exit(); \ - lockdep_on(); \ - ftrace_nmi_exit(); \ +#define nmi_exit() \ + do { \ + trace_hardirq_exit(); \ + rcu_nmi_exit(); \ + lockdep_on(); \ + BUG_ON(!in_nmi()); \ + sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + ftrace_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index ed21bd3dbd2..29ee2873f4a 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h @@ -1,68 +1,6 @@ #ifndef _LINUX_HDREG_H #define _LINUX_HDREG_H -#ifdef __KERNEL__ -#include <linux/ata.h> - -/* - * This file contains some defines for the AT-hd-controller. - * Various sources. - */ - -/* ide.c has its own port definitions in "ide.h" */ - -#define HD_IRQ 14 - -/* Hd controller regs. Ref: IBM AT Bios-listing */ -#define HD_DATA 0x1f0 /* _CTL when writing */ -#define HD_ERROR 0x1f1 /* see err-bits */ -#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */ -#define HD_SECTOR 0x1f3 /* starting sector */ -#define HD_LCYL 0x1f4 /* starting cylinder */ -#define HD_HCYL 0x1f5 /* high byte of starting cyl */ -#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */ -#define HD_STATUS 0x1f7 /* see status-bits */ -#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */ -#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */ -#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */ - -#define HD_CMD 0x3f6 /* used for resets */ -#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */ - -/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */ - -/* Bits of HD_STATUS */ -#define ERR_STAT 0x01 -#define INDEX_STAT 0x02 -#define ECC_STAT 0x04 /* Corrected error */ -#define DRQ_STAT 0x08 -#define SEEK_STAT 0x10 -#define SRV_STAT 0x10 -#define WRERR_STAT 0x20 -#define READY_STAT 0x40 -#define BUSY_STAT 0x80 - -/* Bits for HD_ERROR */ -#define MARK_ERR 0x01 /* Bad address mark */ -#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */ -#define TRK0_ERR 0x02 /* couldn't find track 0 */ -#define EOM_ERR 0x02 /* End Of Media (ATAPI) */ -#define ABRT_ERR 0x04 /* Command aborted */ -#define MCR_ERR 0x08 /* media change request */ -#define ID_ERR 0x10 /* ID field not found */ -#define MC_ERR 0x20 /* media changed */ -#define ECC_ERR 0x40 /* Uncorrectable ECC error */ -#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ -#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ -#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */ - -/* Bits of HD_NSECTOR */ -#define CD 0x01 -#define IO 0x02 -#define REL 0x04 -#define TAG_MASK 0xf8 -#endif /* __KERNEL__ */ - #include <linux/types.h> /* @@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr { #define TASKFILE_INVALID 0x7fff #endif +#ifndef __KERNEL__ /* ATA/ATAPI Commands pre T13 Spec */ #define WIN_NOP 0x00 /* @@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr { #define SECURITY_ERASE_UNIT 0xBD #define SECURITY_FREEZE_LOCK 0xBE #define SECURITY_DISABLE_PASSWORD 0xBF +#endif /* __KERNEL__ */ struct hd_geometry { unsigned char heads; @@ -448,6 +388,7 @@ enum { #define __NEW_HD_DRIVE_ID +#ifndef __KERNEL__ /* * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. * @@ -699,6 +640,7 @@ struct hd_driveid { * 7:0 Signature */ }; +#endif /* __KERNEL__ */ /* * IDE "nice" flags. These are used on a per drive basis to determine diff --git a/include/linux/hid.h b/include/linux/hid.h index fa8ee9cef7b..a72876e4358 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -270,6 +270,7 @@ struct hid_item { #define HID_QUIRK_INVERT 0x00000001 #define HID_QUIRK_NOTOUCH 0x00000002 +#define HID_QUIRK_IGNORE 0x00000004 #define HID_QUIRK_NOGET 0x00000008 #define HID_QUIRK_BADPAD 0x00000020 #define HID_QUIRK_MULTI_INPUT 0x00000040 @@ -603,12 +604,17 @@ struct hid_ll_driver { int (*open)(struct hid_device *hdev); void (*close)(struct hid_device *hdev); + int (*power)(struct hid_device *hdev, int level); + int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, unsigned int code, int value); int (*parse)(struct hid_device *hdev); }; +#define PM_HINT_FULLON 1<<5 +#define PM_HINT_NORMAL 1<<1 + /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) @@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int void hid_output_report(struct hid_report *report, __u8 *data); struct hid_device *hid_allocate_device(void); int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); +int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); /** @@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...) __FILE__ , ## arg) #endif /* HID_FF */ -#ifdef __KERNEL__ -#ifdef CONFIG_HID_COMPAT -#define HID_COMPAT_LOAD_DRIVER(name) \ -/* prototype to avoid sparse warning */ \ -extern void hid_compat_##name(void); \ -void hid_compat_##name(void) { } \ -EXPORT_SYMBOL(hid_compat_##name) -#else -#define HID_COMPAT_LOAD_DRIVER(name) -#endif /* HID_COMPAT */ -#define HID_COMPAT_CALL_DRIVER(name) do { \ - extern void hid_compat_##name(void); \ - hid_compat_##name(); \ -} while (0) -#endif /* __KERNEL__ */ - #endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 7ff5c55f9b5..1fcb7126a01 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page) } #endif -#ifdef CONFIG_HIGHMEM +#include <asm/kmap_types.h> + +#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) + +void debug_kmap_atomic(enum km_type type); + +#else +static inline void debug_kmap_atomic(enum km_type type) +{ +} + +#endif + +#ifdef CONFIG_HIGHMEM #include <asm/highmem.h> /* declarations for linux/mm/highmem.c */ @@ -44,8 +57,6 @@ static inline void *kmap(struct page *page) #define kunmap(page) do { (void) (page); } while (0) -#include <asm/kmap_types.h> - static inline void *kmap_atomic(struct page *page, enum km_type idx) { pagefault_disable(); @@ -187,16 +198,4 @@ static inline void copy_highpage(struct page *to, struct page *from) kunmap_atomic(vto, KM_USER1); } -#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) - -void debug_kmap_atomic(enum km_type type); - -#else - -static inline void debug_kmap_atomic(enum km_type type) -{ -} - -#endif - #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index bd37078c2d7..0d2f7c8a33d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode); extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long range_ns, const enum hrtimer_mode mode); +extern int +__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + unsigned long delta_ns, + const enum hrtimer_mode mode, int wakeup); + extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h deleted file mode 100644 index 3b7715024e6..00000000000 --- a/include/linux/i2c-algo-sgi.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License version 2 as published by the Free Software Foundation. - * - * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> - */ - -#ifndef I2C_ALGO_SGI_H -#define I2C_ALGO_SGI_H 1 - -#include <linux/i2c.h> - -struct i2c_algo_sgi_data { - void *data; /* private data for lowlevel routines */ - unsigned (*getctrl)(void *data); - void (*setctrl)(void *data, unsigned val); - unsigned (*rdata)(void *data); - void (*wdata)(void *data, unsigned val); - - int xfer_timeout; - int ack_timeout; -}; - -int i2c_sgi_add_bus(struct i2c_adapter *); - -#endif /* I2C_ALGO_SGI_H */ diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index f27604af837..c9087de5c6c 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -33,47 +33,10 @@ #define I2C_DRIVERID_MSP3400 1 #define I2C_DRIVERID_TUNER 2 -#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */ -#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */ -#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */ -#define I2C_DRIVERID_SAA7111A 8 /* video input processor */ -#define I2C_DRIVERID_SAA7185B 13 /* video encoder */ -#define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ #define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ -#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */ -#define I2C_DRIVERID_BT819 40 /* video decoder */ -#define I2C_DRIVERID_BT856 41 /* video encoder */ -#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */ -#define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */ -#define I2C_DRIVERID_SAA7114 49 /* video decoder */ -#define I2C_DRIVERID_ADV7170 54 /* video encoder */ -#define I2C_DRIVERID_SAA7191 57 /* video decoder */ -#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */ -#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */ -#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */ -#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */ -#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */ -#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */ -#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */ -#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */ #define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ #define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ -#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */ -#define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */ -#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */ -#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */ -#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */ -#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */ -#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */ -#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */ -#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */ -#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */ -#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */ -#define I2C_DRIVERID_AU8522 97 /* Auvitek au8522 */ - -#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */ /* * ---- Adapter types ---------------------------------------------------- @@ -88,6 +51,7 @@ #define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ #define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ +#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ #define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ /* --- SGI adapters */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index c86c3b07604..00ee11eb909 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -353,8 +353,8 @@ struct i2c_adapter { void *algo_data; /* --- administration stuff. */ - int (*client_register)(struct i2c_client *); - int (*client_unregister)(struct i2c_client *); + int (*client_register)(struct i2c_client *) __deprecated; + int (*client_unregister)(struct i2c_client *) __deprecated; /* data fields that are valid for all devices */ u8 level; /* nesting level for lockdep */ diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h new file mode 100644 index 00000000000..d9b34bfdae7 --- /dev/null +++ b/include/linux/i2c/s6000.h @@ -0,0 +1,10 @@ +#ifndef __LINUX_I2C_S6000_H +#define __LINUX_I2C_S6000_H + +struct s6_i2c_platform_data { + const char *clock; /* the clock to use */ + int bus_num; /* the bus number to register */ +}; + +#endif + diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 8137f660a5c..0dc80ef2497 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); /*----------------------------------------------------------------------*/ +/* Power bus message definitions */ + +#define DEV_GRP_NULL 0x0 +#define DEV_GRP_P1 0x1 +#define DEV_GRP_P2 0x2 +#define DEV_GRP_P3 0x4 + +#define RES_GRP_RES 0x0 +#define RES_GRP_PP 0x1 +#define RES_GRP_RC 0x2 +#define RES_GRP_PP_RC 0x3 +#define RES_GRP_PR 0x4 +#define RES_GRP_PP_PR 0x5 +#define RES_GRP_RC_PR 0x6 +#define RES_GRP_ALL 0x7 + +#define RES_TYPE2_R0 0x0 + +#define RES_TYPE_ALL 0x7 + +#define RES_STATE_WRST 0xF +#define RES_STATE_ACTIVE 0xE +#define RES_STATE_SLEEP 0x8 +#define RES_STATE_OFF 0x0 + +/* + * Power Bus Message Format ... these can be sent individually by Linux, + * but are usually part of downloaded scripts that are run when various + * power events are triggered. + * + * Broadcast Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] + * RES_STATE[3:0] + * + * Singular Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] + */ + +#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ + ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ + | (type) << 4 | (state)) + +#define MSG_SINGULAR(devgrp, id, state) \ + ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) + +/*----------------------------------------------------------------------*/ + struct twl4030_bci_platform_data { int *battery_tmp_tbl; unsigned int tblsize; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index af1de95e711..dcfb93337e9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -5,6 +5,7 @@ #include <linux/irqflags.h> #include <linux/utsname.h> #include <linux/lockdep.h> +#include <linux/ftrace.h> #include <linux/ipc.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> @@ -185,6 +186,7 @@ extern struct cred init_cred; INIT_IDS \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ + INIT_FTRACE_GRAPH \ } diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1d6c71d96ed..aa8c5317123 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define ecap_eim_support(e) ((e >> 4) & 0x1) #define ecap_ir_support(e) ((e >> 3) & 0x1) #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) - +#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ /* IOTLB_REG */ #define DMA_TLB_FLUSH_GRANU_OFFSET 60 @@ -164,6 +164,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define DMA_GCMD_QIE (((u32)1) << 26) #define DMA_GCMD_SIRTP (((u32)1) << 24) #define DMA_GCMD_IRE (((u32) 1) << 25) +#define DMA_GCMD_CFI (((u32) 1) << 23) /* GSTS_REG */ #define DMA_GSTS_TES (((u32)1) << 31) @@ -174,6 +175,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define DMA_GSTS_QIES (((u32)1) << 26) #define DMA_GSTS_IRTPS (((u32)1) << 24) #define DMA_GSTS_IRES (((u32)1) << 25) +#define DMA_GSTS_CFIS (((u32)1) << 23) /* CCMD_REG */ #define DMA_CCMD_ICC (((u64)1) << 63) @@ -284,6 +286,14 @@ struct iommu_flush { unsigned int size_order, u64 type, int non_present_entry_flush); }; +enum { + SR_DMAR_FECTL_REG, + SR_DMAR_FEDATA_REG, + SR_DMAR_FEADDR_REG, + SR_DMAR_FEUADDR_REG, + MAX_SR_DMAR_REGS +}; + struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 cap; @@ -304,6 +314,8 @@ struct intel_iommu { struct iommu_flush flush; #endif struct q_inval *qi; /* Queued invalidation info */ + u32 *iommu_state; /* Store iommu states between suspend and resume.*/ + #ifdef CONFIG_INTR_REMAP struct ir_table *ir_table; /* Interrupt remapping info */ #endif @@ -322,6 +334,7 @@ extern int alloc_iommu(struct dmar_drhd_unit *drhd); extern void free_iommu(struct intel_iommu *iommu); extern int dmar_enable_qi(struct intel_iommu *iommu); extern void dmar_disable_qi(struct intel_iommu *iommu); +extern int dmar_reenable_qi(struct intel_iommu *iommu); extern void qi_global_iec(struct intel_iommu *iommu); extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c68bffd182b..91bb76f44f1 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -59,6 +59,18 @@ #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 +/* + * Bits used by threaded handlers: + * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run + * IRQTF_DIED - handler thread died + * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed + */ +enum { + IRQTF_RUNTHREAD, + IRQTF_DIED, + IRQTF_WARNED, +}; + typedef irqreturn_t (*irq_handler_t)(int, void *); /** @@ -71,6 +83,9 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @next: pointer to the next irqaction for shared interrupts * @irq: interrupt number * @dir: pointer to the proc/irq/NN/name entry + * @thread_fn: interupt handler function for threaded interrupts + * @thread: thread pointer for threaded interrupts + * @thread_flags: flags related to @thread */ struct irqaction { irq_handler_t handler; @@ -81,18 +96,68 @@ struct irqaction { struct irqaction *next; int irq; struct proc_dir_entry *dir; + irq_handler_t thread_fn; + struct task_struct *thread; + unsigned long thread_flags; }; extern irqreturn_t no_action(int cpl, void *dev_id); -extern int __must_check request_irq(unsigned int, irq_handler_t handler, - unsigned long, const char *, void *); + +#ifdef CONFIG_GENERIC_HARDIRQS +extern int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev); + +static inline int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev) +{ + return request_threaded_irq(irq, handler, NULL, flags, name, dev); +} + +extern void exit_irq_thread(void); +#else + +extern int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev); + +/* + * Special function to avoid ifdeffery in kernel/irq/devres.c which + * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, + * m68k). I really love these $@%#!* obvious Makefile references: + * ../../../kernel/irq/devres.o + */ +static inline int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev) +{ + return request_irq(irq, handler, flags, name, dev); +} + +static inline void exit_irq_thread(void) { } +#endif + extern void free_irq(unsigned int, void *); struct device; -extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, - irq_handler_t handler, unsigned long irqflags, - const char *devname, void *dev_id); +extern int __must_check +devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id); + +static inline int __must_check +devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, + devname, dev_id); +} + extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); /* @@ -278,6 +343,11 @@ enum NR_SOFTIRQS }; +/* map softirq index to softirq name. update 'softirq_to_name' in + * kernel/softirq.c when adding a new softirq. + */ +extern char *softirq_to_name[NR_SOFTIRQS]; + /* softirq mask and active fields moved to irq_cpustat_t in * asm/hardirq.h to get better cache usage. KAO */ @@ -294,6 +364,7 @@ extern void softirq_init(void); #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void wakeup_softirqd(void); /* This is the worklist that queues up per-cpu softirq work. * diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 8a7bfb1b6ca..3af4ffd591b 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -21,6 +21,7 @@ #define IOMMU_READ (1) #define IOMMU_WRITE (2) +#define IOMMU_CACHE (4) /* DMA cache coherency */ struct device; @@ -28,6 +29,8 @@ struct iommu_domain { void *priv; }; +#define IOMMU_CAP_CACHE_COHERENCY 0x1 + struct iommu_ops { int (*domain_init)(struct iommu_domain *domain); void (*domain_destroy)(struct iommu_domain *domain); @@ -39,6 +42,8 @@ struct iommu_ops { size_t size); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, unsigned long iova); + int (*domain_has_cap)(struct iommu_domain *domain, + unsigned long cap); }; #ifdef CONFIG_IOMMU_API @@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, size_t size); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova); +extern int iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap); #else /* CONFIG_IOMMU_API */ @@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, return 0; } +static inline int domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + return 0; +} + #endif /* CONFIG_IOMMU_API */ #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index ea330f9e710..3bf40e246a8 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -25,7 +25,7 @@ struct ipc_ids { }; struct ipc_namespace { - struct kref kref; + atomic_t count; struct ipc_ids ids[3]; int sem_ctls[4]; @@ -44,25 +44,57 @@ struct ipc_namespace { int shm_tot; struct notifier_block ipcns_nb; + + /* The kern_mount of the mqueuefs sb. We take a ref on it */ + struct vfsmount *mq_mnt; + + /* # queues in this ns, protected by mq_lock */ + unsigned int mq_queues_count; + + /* next fields are set through sysctl */ + unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */ + unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */ + unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */ + }; extern struct ipc_namespace init_ipc_ns; extern atomic_t nr_ipc_ns; -#ifdef CONFIG_SYSVIPC +extern spinlock_t mq_lock; +#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, +#else +#define INIT_IPC_NS(ns) +#endif +#ifdef CONFIG_SYSVIPC extern int register_ipcns_notifier(struct ipc_namespace *); extern int cond_register_ipcns_notifier(struct ipc_namespace *); extern void unregister_ipcns_notifier(struct ipc_namespace *); extern int ipcns_notify(unsigned long); - #else /* CONFIG_SYSVIPC */ -#define INIT_IPC_NS(ns) +static inline int register_ipcns_notifier(struct ipc_namespace *ns) +{ return 0; } +static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) +{ return 0; } +static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } +static inline int ipcns_notify(unsigned long l) { return 0; } #endif /* CONFIG_SYSVIPC */ -#if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) -extern void free_ipc_ns(struct kref *kref); +#ifdef CONFIG_POSIX_MQUEUE +extern int mq_init_ns(struct ipc_namespace *ns); +/* default values */ +#define DFLT_QUEUESMAX 256 /* max number of message queues */ +#define DFLT_MSGMAX 10 /* max number of messages in each queue */ +#define HARD_MSGMAX (131072/sizeof(void *)) +#define DFLT_MSGSIZEMAX 8192 /* max message size */ +#else +static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } +#endif + +#if defined(CONFIG_IPC_NS) +extern void free_ipc_ns(struct ipc_namespace *ns); extern struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns); extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, @@ -72,14 +104,11 @@ extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { if (ns) - kref_get(&ns->kref); + atomic_inc(&ns->count); return ns; } -static inline void put_ipc_ns(struct ipc_namespace *ns) -{ - kref_put(&ns->kref, free_ipc_ns); -} +extern void put_ipc_ns(struct ipc_namespace *ns); #else static inline struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) @@ -99,4 +128,18 @@ static inline void put_ipc_ns(struct ipc_namespace *ns) { } #endif + +#ifdef CONFIG_POSIX_MQUEUE_SYSCTL + +struct ctl_table_header; +extern struct ctl_table_header *mq_register_sysctl_table(void); + +#else /* CONFIG_POSIX_MQUEUE_SYSCTL */ + +static inline struct ctl_table_header *mq_register_sysctl_table(void) +{ + return NULL; +} + +#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */ #endif diff --git a/include/linux/irq.h b/include/linux/irq.h index 99d147efe39..b7cbeed972e 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -22,6 +22,7 @@ #include <linux/irqnr.h> #include <linux/errno.h> #include <linux/topology.h> +#include <linux/wait.h> #include <asm/irq.h> #include <asm/ptrace.h> @@ -158,6 +159,8 @@ struct irq_2_iommu; * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing * @pending_mask: pending rebalanced interrupts + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -189,6 +192,8 @@ struct irq_desc { cpumask_var_t pending_mask; #endif #endif + atomic_t threads_active; + wait_queue_head_t wait_for_threads; #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 74bde13224c..b02a3f1d46a 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -24,8 +24,8 @@ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) -# define trace_softirq_enter() do { current->softirq_context++; } while (0) -# define trace_softirq_exit() do { current->softirq_context--; } while (0) +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else # define trace_hardirqs_on() do { } while (0) @@ -38,8 +38,8 @@ # define trace_softirqs_enabled(p) 0 # define trace_hardirq_enter() do { } while (0) # define trace_hardirq_exit() do { } while (0) -# define trace_softirq_enter() do { } while (0) -# define trace_softirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) # define INIT_TRACE_IRQFLAGS #endif diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index c5584ca5b8c..819acaaac3f 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h @@ -5,10 +5,12 @@ * enum irqreturn * @IRQ_NONE interrupt was not from this device * @IRQ_HANDLED interrupt was handled by this device + * @IRQ_WAKE_THREAD handler requests to wake the handler thread */ enum irqreturn { IRQ_NONE, IRQ_HANDLED, + IRQ_WAKE_THREAD, }; typedef enum irqreturn irqreturn_t; diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 64246dce566..53ae4399da2 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -35,7 +35,7 @@ #define journal_oom_retry 1 /* - * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds + * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds * certain classes of error which can occur due to failed IOs. Under * normal use we want ext3 to continue after such errors, because * hardware _can_ fail, but for debugging purposes when running tests on @@ -552,6 +552,11 @@ struct transaction_s */ int t_handle_count; + /* + * This transaction is being forced and some process is + * waiting for it to finish. + */ + int t_synchronous_commit:1; }; /** diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index f3fe34391d8..792274269f2 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -13,10 +13,17 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +struct module; + #ifdef CONFIG_KALLSYMS /* Lookup the address for a symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name); +/* Call a function on each kallsyms symbol in the core kernel */ +int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, + unsigned long), + void *data); + extern int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset); @@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name) return 0; } +static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + static inline int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 556d781e69f..d9e75ec7def 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -242,6 +242,19 @@ extern struct ratelimit_state printk_ratelimit_state; extern int printk_ratelimit(void); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); + +/* + * Print a one-time message (analogous to WARN_ONCE() et al): + */ +#define printk_once(x...) ({ \ + static int __print_once = 1; \ + \ + if (__print_once) { \ + __print_once = 0; \ + printk(x); \ + } \ +}) + void log_buf_kexec_setup(void); #else static inline int vprintk(const char *s, va_list args) @@ -254,6 +267,10 @@ static inline int printk_ratelimit(void) { return 0; } static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ unsigned int interval_msec) \ { return false; } + +/* No effect, but we still get type checking even in the !PRINTK case: */ +#define printk_once(x...) printk(x) + static inline void log_buf_kexec_setup(void) { } @@ -375,6 +392,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte) #endif /* + * General tracing related utility functions - trace_printk(), + * tracing_on/tracing_off and tracing_start()/tracing_stop + * + * Use tracing_on/tracing_off when you want to quickly turn on or off + * tracing. It simply enables or disables the recording of the trace events. + * This also corresponds to the user space debugfs/tracing/tracing_on + * file, which gives a means for the kernel and userspace to interact. + * Place a tracing_off() in the kernel where you want tracing to end. + * From user space, examine the trace, and then echo 1 > tracing_on + * to continue tracing. + * + * tracing_stop/tracing_start has slightly more overhead. It is used + * by things like suspend to ram where disabling the recording of the + * trace is not enough, but tracing must actually stop because things + * like calling smp_processor_id() may crash the system. + * + * Most likely, you want to use tracing_on/tracing_off. + */ +#ifdef CONFIG_RING_BUFFER +void tracing_on(void); +void tracing_off(void); +/* trace_off_permanent stops recording with no way to bring it back */ +void tracing_off_permanent(void); +int tracing_is_on(void); +#else +static inline void tracing_on(void) { } +static inline void tracing_off(void) { } +static inline void tracing_off_permanent(void) { } +static inline int tracing_is_on(void) { return 0; } +#endif +#ifdef CONFIG_TRACING +extern void tracing_start(void); +extern void tracing_stop(void); +extern void ftrace_off_permanent(void); + +extern void +ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); + +static inline void __attribute__ ((format (printf, 1, 2))) +____trace_printk_check_format(const char *fmt, ...) +{ +} +#define __trace_printk_check_format(fmt, args...) \ +do { \ + if (0) \ + ____trace_printk_check_format(fmt, ##args); \ +} while (0) + +/** + * trace_printk - printf formatting in the ftrace buffer + * @fmt: the printf format for printing + * + * Note: __trace_printk is an internal function for trace_printk and + * the @ip is passed in via the trace_printk macro. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please refrain from leaving trace_printks scattered around in + * your code. + */ + +#define trace_printk(fmt, args...) \ +do { \ + __trace_printk_check_format(fmt, ##args); \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ + } else \ + __trace_printk(_THIS_IP_, fmt, ##args); \ +} while (0) + +extern int +__trace_bprintk(unsigned long ip, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +extern int +__trace_printk(unsigned long ip, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +/* + * The double __builtin_constant_p is because gcc will give us an error + * if we try to allocate the static variable to fmt if it is not a + * constant. Even with the outer if statement. + */ +#define ftrace_vprintk(fmt, vargs) \ +do { \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ + } else \ + __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ +} while (0) + +extern int +__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); + +extern int +__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); + +extern void ftrace_dump(void); +#else +static inline void +ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } +static inline int +trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); + +static inline void tracing_start(void) { } +static inline void tracing_stop(void) { } +static inline void ftrace_off_permanent(void) { } +static inline int +trace_printk(const char *fmt, ...) +{ + return 0; +} +static inline int +ftrace_vprintk(const char *fmt, va_list ap) +{ + return 0; +} +static inline void ftrace_dump(void) { } +#endif /* CONFIG_TRACING */ + +/* * Display an IP address in readable format. */ diff --git a/include/linux/key.h b/include/linux/key.h index 21d32a142c0..e544f466d69 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -20,6 +20,7 @@ #include <linux/rbtree.h> #include <linux/rcupdate.h> #include <linux/sysctl.h> +#include <linux/rwsem.h> #include <asm/atomic.h> #ifdef __KERNEL__ diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 92213a9194e..384ca8bbf1a 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -29,10 +29,15 @@ #ifdef CONFIG_MODULES /* modprobe exit status on success, -ve on error. Return value * usually useless though. */ -extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); -#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) +extern int __request_module(bool wait, const char *name, ...) \ + __attribute__((format(printf, 2, 3))); +#define request_module(mod...) __request_module(true, mod) +#define request_module_nowait(mod...) __request_module(false, mod) +#define try_then_request_module(x, mod...) \ + ((x) ?: (__request_module(true, mod), (x))) #else -static inline int request_module(const char * name, ...) { return -ENOSYS; } +static inline int request_module(const char *name, ...) { return -ENOSYS; } +static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; } #define try_then_request_module(x, mod...) (x) #endif diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 2ec6cc14a11..bcd9c07848b 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -94,12 +94,16 @@ struct kprobe { /* Called after addr is executed, unless... */ kprobe_post_handler_t post_handler; - /* ... called if executing addr causes a fault (eg. page fault). - * Return 1 if it handled fault, otherwise kernel will see it. */ + /* + * ... called if executing addr causes a fault (eg. page fault). + * Return 1 if it handled fault, otherwise kernel will see it. + */ kprobe_fault_handler_t fault_handler; - /* ... called if breakpoint trap occurs in probe handler. - * Return 1 if it handled break, otherwise kernel will see it. */ + /* + * ... called if breakpoint trap occurs in probe handler. + * Return 1 if it handled break, otherwise kernel will see it. + */ kprobe_break_handler_t break_handler; /* Saved opcode (which has been replaced with breakpoint) */ @@ -108,18 +112,28 @@ struct kprobe { /* copy of the original instruction */ struct arch_specific_insn ainsn; - /* Indicates various status flags. Protected by kprobe_mutex. */ + /* + * Indicates various status flags. + * Protected by kprobe_mutex after this kprobe is registered. + */ u32 flags; }; /* Kprobe status flags */ #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ +#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ +/* Has this kprobe gone ? */ static inline int kprobe_gone(struct kprobe *p) { return p->flags & KPROBE_FLAG_GONE; } +/* Is this kprobe disabled ? */ +static inline int kprobe_disabled(struct kprobe *p) +{ + return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); +} /* * Special probe type that uses setjmp-longjmp type tricks to resume * execution at a specified entry with a matching prototype corresponding @@ -279,6 +293,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); +int disable_kprobe(struct kprobe *kp); +int enable_kprobe(struct kprobe *kp); + #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) @@ -345,5 +362,30 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) static inline void kprobe_flush_task(struct task_struct *tk) { } +static inline int disable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +static inline int enable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} #endif /* CONFIG_KPROBES */ +static inline int disable_kretprobe(struct kretprobe *rp) +{ + return disable_kprobe(&rp->kp); +} +static inline int enable_kretprobe(struct kretprobe *rp) +{ + return enable_kprobe(&rp->kp); +} +static inline int disable_jprobe(struct jprobe *jp) +{ + return disable_kprobe(&jp->kp); +} +static inline int enable_jprobe(struct jprobe *jp) +{ + return enable_kprobe(&jp->kp); +} + #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h new file mode 100644 index 00000000000..42f854a1a19 --- /dev/null +++ b/include/linux/leds-bd2802.h @@ -0,0 +1,26 @@ +/* + * leds-bd2802.h - RGB LED Driver + * + * Copyright (C) 2009 Samsung Electronics + * Kim Kyuwon <q1.kim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf + * + */ +#ifndef _LEDS_BD2802_H_ +#define _LEDS_BD2802_H_ + +struct bd2802_led_platform_data{ + int reset_gpio; + u8 rgb_time; +}; + +#define RGB_TIME(slopedown, slopeup, waveform) \ + ((slopedown) << 6 | (slopeup) << 4 | (waveform)) + +#endif /* _LEDS_BD2802_H_ */ + diff --git a/include/linux/leds.h b/include/linux/leds.h index 24489da701e..376fe07732e 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -30,6 +30,7 @@ enum led_brightness { struct led_classdev { const char *name; int brightness; + int max_brightness; int flags; /* Lower 16 bits reflect status */ @@ -140,7 +141,8 @@ struct gpio_led { const char *name; const char *default_trigger; unsigned gpio; - u8 active_low; + u8 active_low : 1; + u8 retain_state_suspended : 1; }; struct gpio_led_platform_data { diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h new file mode 100644 index 00000000000..33a07116748 --- /dev/null +++ b/include/linux/leds_pwm.h @@ -0,0 +1,21 @@ +/* + * PWM LED driver data - see drivers/leds/leds-pwm.c + */ +#ifndef __LINUX_LEDS_PWM_H +#define __LINUX_LEDS_PWM_H + +struct led_pwm { + const char *name; + const char *default_trigger; + unsigned pwm_id; + u8 active_low; + unsigned max_brightness; + unsigned pwm_period_ns; +}; + +struct led_pwm_platform_data { + int num_leds; + struct led_pwm *leds; +}; + +#endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 76262d83656..b450a262885 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -379,7 +379,7 @@ enum { ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands not multiple of 16 bytes */ - ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ + ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ /* DMA mask for user DMA control: User visible values; DO NOT diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 7dc5b6cb44c..d39ed1cc5fb 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -25,13 +25,13 @@ struct svc_rqst; #define NLM_MAXCOOKIELEN 32 #define NLM_MAXSTRLEN 1024 -#define nlm_granted __constant_htonl(NLM_LCK_GRANTED) -#define nlm_lck_denied __constant_htonl(NLM_LCK_DENIED) -#define nlm_lck_denied_nolocks __constant_htonl(NLM_LCK_DENIED_NOLOCKS) -#define nlm_lck_blocked __constant_htonl(NLM_LCK_BLOCKED) -#define nlm_lck_denied_grace_period __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD) +#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED) +#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED) +#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS) +#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED) +#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD) -#define nlm_drop_reply __constant_htonl(30000) +#define nlm_drop_reply cpu_to_be32(30000) /* Lock info passed via NLM */ struct nlm_lock { diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index 12bfe09de2b..7353821341e 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h @@ -15,11 +15,11 @@ #include <linux/lockd/xdr.h> /* error codes new to NLMv4 */ -#define nlm4_deadlock __constant_htonl(NLM_DEADLCK) -#define nlm4_rofs __constant_htonl(NLM_ROFS) -#define nlm4_stale_fh __constant_htonl(NLM_STALE_FH) -#define nlm4_fbig __constant_htonl(NLM_FBIG) -#define nlm4_failed __constant_htonl(NLM_FAILED) +#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK) +#define nlm4_rofs cpu_to_be32(NLM_ROFS) +#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH) +#define nlm4_fbig cpu_to_be32(NLM_FBIG) +#define nlm4_failed cpu_to_be32(NLM_FAILED) diff --git a/include/linux/memory.h b/include/linux/memory.h index 42767d1a62e..37fa19b34ef 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -110,4 +110,10 @@ struct memory_accessor { off_t offset, size_t count); }; +/* + * Kernel text modification mutex, used for code patching. Users of this lock + * can sleep. + */ +extern struct mutex text_mutex; + #endif /* _LINUX_MEMORY_H_ */ diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h new file mode 100644 index 00000000000..be469a357cb --- /dev/null +++ b/include/linux/mfd/ds1wm.h @@ -0,0 +1,6 @@ +/* MFD cell driver data for the DS1WM driver */ + +struct ds1wm_driver_data { + int active_high; + int clock_rate; +}; diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h index b4294f12c4f..3d3ed67bd96 100644 --- a/include/linux/mfd/htc-pasic3.h +++ b/include/linux/mfd/htc-pasic3.h @@ -48,7 +48,6 @@ struct pasic3_leds_machinfo { struct pasic3_platform_data { struct pasic3_leds_machinfo *led_pdata; - unsigned int bus_shift; unsigned int clock_rate; }; diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 4455b212d75..c8f51c3c0a7 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -29,6 +29,8 @@ struct pcf50633_platform_data { char **batteries; int num_batteries; + int charging_restart_interval; + /* Callbacks */ void (*probe_done)(struct pcf50633 *); void (*mbc_event_callback)(struct pcf50633 *, int); diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h index 6e17619b773..4119579acf2 100644 --- a/include/linux/mfd/pcf50633/mbc.h +++ b/include/linux/mfd/pcf50633/mbc.h @@ -128,7 +128,6 @@ enum pcf50633_reg_mbcs3 { int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); int pcf50633_mbc_get_status(struct pcf50633 *); -void pcf50633_mbc_set_status(struct pcf50633 *, int what, int status); #endif diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 980669d50dc..42cca672f34 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -640,9 +640,11 @@ struct wm8350 { * * @init: Function called during driver initialisation. Should be * used by the platform to configure GPIO functions and similar. + * @irq_high: Set if WM8350 IRQ is active high. */ struct wm8350_platform_data { int (*init)(struct wm8350 *wm8350); + int irq_high; }; diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h new file mode 100644 index 00000000000..1f76b1ebf62 --- /dev/null +++ b/include/linux/mg_disk.h @@ -0,0 +1,206 @@ +/* + * include/linux/mg_disk.c + * + * Support for the mGine m[g]flash IO mode. + * Based on legacy hd.c + * + * (c) 2008 mGine Co.,LTD + * (c) 2008 unsik Kim <donari75@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MG_DISK_H__ +#define __MG_DISK_H__ + +#include <linux/blkdev.h> +#include <linux/ata.h> + +/* name for block device */ +#define MG_DISK_NAME "mgd" +/* name for platform device */ +#define MG_DEV_NAME "mg_disk" + +#define MG_DISK_MAJ 0 +#define MG_DISK_MAX_PART 16 +#define MG_SECTOR_SIZE 512 +#define MG_MAX_SECTS 256 + +/* Register offsets */ +#define MG_BUFF_OFFSET 0x8000 +#define MG_STORAGE_BUFFER_SIZE 0x200 +#define MG_REG_OFFSET 0xC000 +#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ +#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ +#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) +#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) +#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) +#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) +#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) +#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ +#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ +#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) +#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) + +/* "Drive Select/Head Register" bit values */ +#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */ +#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON) +#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON) +#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON) + + +/* "Device Control Register" bit values */ +#define MG_REG_CTRL_INTR_ENABLE 0x0 +#define MG_REG_CTRL_INTR_DISABLE (0x1<<1) +#define MG_REG_CTRL_RESET (0x1<<2) +#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0 +#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4) +#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0 +#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5) +#define MG_REG_CTRL_DPD_DISABLE 0x0 +#define MG_REG_CTRL_DPD_ENABLE (0x1<<6) + +/* Status register bit */ +/* error bit in status register */ +#define MG_REG_STATUS_BIT_ERROR 0x01 +/* corrected error in status register */ +#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04 +/* data request bit in status register */ +#define MG_REG_STATUS_BIT_DATA_REQ 0x08 +/* DSC - Drive Seek Complete */ +#define MG_REG_STATUS_BIT_SEEK_DONE 0x10 +/* DWF - Drive Write Fault */ +#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20 +#define MG_REG_STATUS_BIT_READY 0x40 +#define MG_REG_STATUS_BIT_BUSY 0x80 + +/* handy status */ +#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE) +#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \ + (MG_REG_STATUS_BIT_BUSY | \ + MG_REG_STATUS_BIT_WRITE_FAULT | \ + MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY) + +/* Error register */ +#define MG_REG_ERR_AMNF 0x01 +#define MG_REG_ERR_ABRT 0x04 +#define MG_REG_ERR_IDNF 0x10 +#define MG_REG_ERR_UNC 0x40 +#define MG_REG_ERR_BBK 0x80 + +/* error code for others */ +#define MG_ERR_NONE 0 +#define MG_ERR_TIMEOUT 0x100 +#define MG_ERR_INIT_STAT 0x101 +#define MG_ERR_TRANSLATION 0x102 +#define MG_ERR_CTRL_RST 0x103 +#define MG_ERR_INV_STAT 0x104 +#define MG_ERR_RSTOUT 0x105 + +#define MG_MAX_ERRORS 6 /* Max read/write errors */ + +/* command */ +#define MG_CMD_RD 0x20 +#define MG_CMD_WR 0x30 +#define MG_CMD_SLEEP 0x99 +#define MG_CMD_WAKEUP 0xC3 +#define MG_CMD_ID 0xEC +#define MG_CMD_WR_CONF 0x3C +#define MG_CMD_RD_CONF 0x40 + +/* operation mode */ +#define MG_OP_CASCADE (1 << 0) +#define MG_OP_CASCADE_SYNC_RD (1 << 1) +#define MG_OP_CASCADE_SYNC_WR (1 << 2) +#define MG_OP_INTERLEAVE (1 << 3) + +/* synchronous */ +#define MG_BURST_LAT_4 (3 << 4) +#define MG_BURST_LAT_5 (4 << 4) +#define MG_BURST_LAT_6 (5 << 4) +#define MG_BURST_LAT_7 (6 << 4) +#define MG_BURST_LAT_8 (7 << 4) +#define MG_BURST_LEN_4 (1 << 1) +#define MG_BURST_LEN_8 (2 << 1) +#define MG_BURST_LEN_16 (3 << 1) +#define MG_BURST_LEN_32 (4 << 1) +#define MG_BURST_LEN_CONT (0 << 1) + +/* timeout value (unit: ms) */ +#define MG_TMAX_CONF_TO_CMD 1 +#define MG_TMAX_WAIT_RD_DRQ 10 +#define MG_TMAX_WAIT_WR_DRQ 500 +#define MG_TMAX_RST_TO_BUSY 10 +#define MG_TMAX_HDRST_TO_RDY 500 +#define MG_TMAX_SWRST_TO_RDY 500 +#define MG_TMAX_RSTOUT 3000 + +/* device attribution */ +/* use mflash as boot device */ +#define MG_BOOT_DEV (1 << 0) +/* use mflash as storage device */ +#define MG_STORAGE_DEV (1 << 1) +/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ +#define MG_STORAGE_DEV_SKIP_RST (1 << 2) + +#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) + +/* names of GPIO resource */ +#define MG_RST_PIN "mg_rst" +/* except MG_BOOT_DEV, reset-out pin should be assigned */ +#define MG_RSTOUT_PIN "mg_rstout" + +/* private driver data */ +struct mg_drv_data { + /* disk resource */ + u32 use_polling; + + /* device attribution */ + u32 dev_attr; + + /* internally used */ + struct mg_host *host; +}; + +/* main structure for mflash driver */ +struct mg_host { + struct device *dev; + + struct request_queue *breq; + spinlock_t lock; + struct gendisk *gd; + + struct timer_list timer; + void (*mg_do_intr) (struct mg_host *); + + u16 id[ATA_ID_WORDS]; + + u16 cyls; + u16 heads; + u16 sectors; + u32 n_sectors; + u32 nres_sectors; + + void __iomem *dev_base; + unsigned int irq; + unsigned int rst; + unsigned int rstout; + + u32 major; + u32 error; +}; + +/* + * Debugging macro and defines + */ +#undef DO_MG_DEBUG +#ifdef DO_MG_DEBUG +# define MG_DBG(fmt, args...) \ + printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) +#else /* CONFIG_MG_DEBUG */ +# define MG_DBG(fmt, args...) do { } while (0) +#endif /* CONFIG_MG_DEBUG */ + +#endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4e457256bd3..3e7615e9087 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) wake_up_process(host->sdio_irq_thread); } +struct regulator; + +int mmc_regulator_get_ocrmask(struct regulator *supply); +int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); + #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 26ef24076b7..186ec6ab334 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int, extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ -#include <linux/topology.h> -/* Returns the number of the current Node. */ -#ifndef numa_node_id -#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) -#endif - #ifndef CONFIG_NEED_MULTIPLE_NODES extern struct pglist_data contig_page_data; diff --git a/include/linux/module.h b/include/linux/module.h index 145a75528cc..627ac082e2a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -248,6 +248,10 @@ struct module const unsigned long *crcs; unsigned int num_syms; + /* Kernel parameters. */ + struct kernel_param *kp; + unsigned int num_kp; + /* GPL-only exported symbols. */ unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; @@ -329,6 +333,11 @@ struct module unsigned int num_tracepoints; #endif +#ifdef CONFIG_TRACING + const char **trace_bprintk_fmt_start; + unsigned int num_trace_bprintk_fmt; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head modules_which_use_me; @@ -350,6 +359,8 @@ struct module #define MODULE_ARCH_INIT {} #endif +extern struct mutex module_mutex; + /* FIXME: It'd be nice to isolate modules during init, too, so they aren't used before they (may) fail. But presently too much code (IDE & SCSI) require entry into the module during init.*/ @@ -358,10 +369,10 @@ static inline int module_is_live(struct module *mod) return mod->state != MODULE_STATE_GOING; } -/* Is this address in a module? (second is with no locks, for oops) */ -struct module *module_text_address(unsigned long addr); struct module *__module_text_address(unsigned long addr); -int is_module_address(unsigned long addr); +struct module *__module_address(unsigned long addr); +bool is_module_address(unsigned long addr); +bool is_module_text_address(unsigned long addr); static inline int within_module_core(unsigned long addr, struct module *mod) { @@ -375,6 +386,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod) addr < (unsigned long)mod->module_init + mod->init_size; } +/* Search for module by name: must hold module_mutex. */ +struct module *find_module(const char *name); + +struct symsearch { + const struct kernel_symbol *start, *stop; + const unsigned long *crcs; + enum { + NOT_GPL_ONLY, + GPL_ONLY, + WILL_BE_GPL_ONLY, + } licence; + bool unused; +}; + +/* Search for an exported symbol by name. */ +const struct kernel_symbol *find_symbol(const char *name, + struct module **owner, + const unsigned long **crc, + bool gplok, + bool warn); + +/* Walk the exported symbol table */ +bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, + unsigned int symnum, void *data), void *data); + /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, @@ -383,6 +419,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); + extern void __module_put_and_exit(struct module *mod, long code) __attribute__((noreturn)); #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); @@ -444,6 +484,7 @@ static inline void __module_get(struct module *module) #define symbol_put_addr(p) do { } while(0) #endif /* CONFIG_MODULE_UNLOAD */ +int use_module(struct module *a, struct module *b); /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ @@ -490,21 +531,24 @@ search_module_extables(unsigned long addr) return NULL; } -/* Is this address in a module? */ -static inline struct module *module_text_address(unsigned long addr) +static inline struct module *__module_address(unsigned long addr) { return NULL; } -/* Is this address in a module? (don't take a lock, we're oopsing) */ static inline struct module *__module_text_address(unsigned long addr) { return NULL; } -static inline int is_module_address(unsigned long addr) +static inline bool is_module_address(unsigned long addr) { - return 0; + return false; +} + +static inline bool is_module_text_address(unsigned long addr) +{ + return false; } /* Get/put a kernel symbol (calls should be symmetric) */ @@ -559,6 +603,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name) return 0; } +static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + static inline int register_module_notifier(struct notifier_block * nb) { /* no events will happen anyway, so this can always succeed */ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index e4af3399ef4..a4f0b931846 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -138,6 +138,16 @@ extern int parse_args(const char *name, unsigned num, int (*unknown)(char *param, char *val)); +/* Called by module remove. */ +#ifdef CONFIG_SYSFS +extern void destroy_params(const struct kernel_param *params, unsigned num); +#else +static inline void destroy_params(const struct kernel_param *params, + unsigned num) +{ +} +#endif /* !CONFIG_SYSFS */ + /* All the helper functions */ /* The macros to do compile-time type checking stolen from Jakub Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 3aa5d77c2cd..5675b63a063 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/uio.h> #include <linux/notifier.h> +#include <linux/device.h> #include <linux/mtd/compatmac.h> #include <mtd/mtd-abi.h> @@ -162,6 +163,20 @@ struct mtd_info { /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); + /* Allow NOMMU mmap() to directly map the device (if not NULL) + * - return the address to which the offset maps + * - return -ENOSYS to indicate refusal to do the mapping + */ + unsigned long (*get_unmapped_area) (struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags); + + /* Backing device capabilities for this device + * - provides mmap capabilities + */ + struct backing_dev_info *backing_dev_info; + int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); @@ -223,6 +238,7 @@ struct mtd_info { void *priv; struct module *owner; + struct device dev; int usecount; /* If the driver is something smart, like UBI, it may need to maintain @@ -233,6 +249,11 @@ struct mtd_info { void (*put_device) (struct mtd_info *mtd); }; +static inline struct mtd_info *dev_to_mtd(struct device *dev) +{ + return dev ? container_of(dev, struct mtd_info, dev) : NULL; +} + static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) { if (mtd->erasesize_shift) diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index db5b63da2a7..7efb9be3466 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -43,8 +43,8 @@ extern void nand_wait_ready(struct mtd_info *mtd); * is supported now. If you add a chip with bigger oobsize/page * adjust this accordingly. */ -#define NAND_MAX_OOBSIZE 64 -#define NAND_MAX_PAGESIZE 2048 +#define NAND_MAX_OOBSIZE 128 +#define NAND_MAX_PAGESIZE 4096 /* * Constants for hardware specific CLE/ALE/NCE function diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index a45dd831b3f..7535a74083b 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -76,4 +76,16 @@ int __devinit of_mtd_parse_partitions(struct device *dev, struct device_node *node, struct mtd_partition **pparts); +#ifdef CONFIG_MTD_PARTITIONS +static inline int mtd_has_partitions(void) { return 1; } +#else +static inline int mtd_has_partitions(void) { return 0; } +#endif + +#ifdef CONFIG_MTD_CMDLINE_PARTS +static inline int mtd_has_cmdlinepart(void) { return 1; } +#else +static inline int mtd_has_cmdlinepart(void) { return 0; } +#endif + #endif diff --git a/include/linux/nfs.h b/include/linux/nfs.h index 54af92c1c70..214d499718f 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h @@ -109,7 +109,6 @@ NFSERR_FILE_OPEN = 10046, /* v4 */ NFSERR_ADMIN_REVOKED = 10047, /* v4 */ NFSERR_CB_PATH_DOWN = 10048, /* v4 */ - NFSERR_REPLAY_ME = 10049 /* v4 */ }; /* NFSv2 file types - beware, these are not the same in NFSv3 */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index b912311a56b..e3f0cbcbd0d 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -21,6 +21,7 @@ #define NFS4_FHSIZE 128 #define NFS4_MAXPATHLEN PATH_MAX #define NFS4_MAXNAMLEN NAME_MAX +#define NFS4_MAX_SESSIONID_LEN 16 #define NFS4_ACCESS_READ 0x0001 #define NFS4_ACCESS_LOOKUP 0x0002 @@ -38,6 +39,7 @@ #define NFS4_OPEN_RESULT_CONFIRM 0x0002 #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 +#define NFS4_SHARE_ACCESS_MASK 0x000F #define NFS4_SHARE_ACCESS_READ 0x0001 #define NFS4_SHARE_ACCESS_WRITE 0x0002 #define NFS4_SHARE_ACCESS_BOTH 0x0003 @@ -45,6 +47,19 @@ #define NFS4_SHARE_DENY_WRITE 0x0002 #define NFS4_SHARE_DENY_BOTH 0x0003 +/* nfs41 */ +#define NFS4_SHARE_WANT_MASK 0xFF00 +#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000 +#define NFS4_SHARE_WANT_READ_DELEG 0x0100 +#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200 +#define NFS4_SHARE_WANT_ANY_DELEG 0x0300 +#define NFS4_SHARE_WANT_NO_DELEG 0x0400 +#define NFS4_SHARE_WANT_CANCEL 0x0500 + +#define NFS4_SHARE_WHEN_MASK 0xF0000 +#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000 +#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000 + #define NFS4_SET_TO_SERVER_TIME 0 #define NFS4_SET_TO_CLIENT_TIME 1 @@ -88,6 +103,31 @@ #define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 #define NFS4_ACE_MASK_ALL 0x001F01FF +#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001 +#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002 +#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000 +#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000 +#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000 +#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 +#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 +/* + * Since the validity of these bits depends on whether + * they're set in the argument or response, have separate + * invalid flag masks for arg (_A) and resp (_R). + */ +#define EXCHGID4_FLAG_MASK_A 0x40070003 +#define EXCHGID4_FLAG_MASK_R 0x80070003 + +#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 +#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 +#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004 +#define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008 +#define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010 +#define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020 +#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040 +#define SEQ4_STATUS_LEASE_MOVED 0x00000080 +#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100 + #define NFS4_MAX_UINT64 (~(u64)0) enum nfs4_acl_whotype { @@ -154,6 +194,28 @@ enum nfs_opnum4 { OP_VERIFY = 37, OP_WRITE = 38, OP_RELEASE_LOCKOWNER = 39, + + /* nfs41 */ + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ILLEGAL = 10044, }; @@ -230,7 +292,48 @@ enum nfsstat4 { NFS4ERR_DEADLOCK = 10045, NFS4ERR_FILE_OPEN = 10046, NFS4ERR_ADMIN_REVOKED = 10047, - NFS4ERR_CB_PATH_DOWN = 10048 + NFS4ERR_CB_PATH_DOWN = 10048, + + /* nfs41 */ + NFS4ERR_BADIOMODE = 10049, + NFS4ERR_BADLAYOUT = 10050, + NFS4ERR_BAD_SESSION_DIGEST = 10051, + NFS4ERR_BADSESSION = 10052, + NFS4ERR_BADSLOT = 10053, + NFS4ERR_COMPLETE_ALREADY = 10054, + NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055, + NFS4ERR_DELEG_ALREADY_WANTED = 10056, + NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */ + NFS4ERR_LAYOUTTRYLATER = 10058, + NFS4ERR_LAYOUTUNAVAILABLE = 10059, + NFS4ERR_NOMATCHING_LAYOUT = 10060, + NFS4ERR_RECALLCONFLICT = 10061, + NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062, + NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */ + NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */ + NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */ + NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */ + NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */ + NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */ + NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */ + NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */ + NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */ + NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */ + /* Error 10073 is unused. */ + NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */ + NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */ + NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not origional */ + NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */ + NFS4ERR_DEADSESSION = 10078, /* persistent session dead */ + NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */ + NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */ + NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */ + NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */ + NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */ + NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */ + NFS4ERR_REJECT_DELEG = 10085, /* on callback */ + NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */ + NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */ }; /* @@ -265,7 +368,13 @@ enum opentype4 { enum createmode4 { NFS4_CREATE_UNCHECKED = 0, NFS4_CREATE_GUARDED = 1, - NFS4_CREATE_EXCLUSIVE = 2 + NFS4_CREATE_EXCLUSIVE = 2, + /* + * New to NFSv4.1. If session is persistent, + * GUARDED4 MUST be used. Otherwise, use + * EXCLUSIVE4_1 instead of EXCLUSIVE4. + */ + NFS4_CREATE_EXCLUSIVE4_1 = 3 }; enum limit_by4 { @@ -301,6 +410,8 @@ enum lock_type4 { #define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) #define FATTR4_WORD0_LEASE_TIME (1UL << 10) #define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) +/* Mandatory in NFSv4.1 */ +#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11) /* Recommended Attributes */ #define FATTR4_WORD0_ACL (1UL << 12) @@ -391,6 +502,29 @@ enum { NFSPROC4_CLNT_GETACL, NFSPROC4_CLNT_SETACL, NFSPROC4_CLNT_FS_LOCATIONS, + + /* nfs41 */ + NFSPROC4_CLNT_EXCHANGE_ID, + NFSPROC4_CLNT_CREATE_SESSION, + NFSPROC4_CLNT_DESTROY_SESSION, + NFSPROC4_CLNT_SEQUENCE, + NFSPROC4_CLNT_GET_LEASE_TIME, +}; + +/* nfs41 types */ +struct nfs4_sessionid { + unsigned char data[NFS4_MAX_SESSIONID_LEN]; +}; + +/* Create Session Flags */ +#define SESSION4_PERSIST 0x001 +#define SESSION4_BACK_CHAN 0x002 +#define SESSION4_RDMA 0x004 + +enum state_protect_how4 { + SP4_NONE = 0, + SP4_MACH_CRED = 1, + SP4_SSV = 2 }; #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index bde2557c2a9..fdffb413b19 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -185,6 +185,9 @@ struct nfs_inode { fmode_t delegation_state; struct rw_semaphore rwsem; #endif /* CONFIG_NFS_V4*/ +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; +#endif struct inode vfs_inode; }; @@ -207,6 +210,8 @@ struct nfs_inode { #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ #define NFS_INO_FLUSHING (4) /* inode is flushing out data */ +#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ +#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { @@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode) return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); } +static inline int NFS_FSCACHE(const struct inode *inode) +{ + return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); +} + static inline __u64 NFS_FILEID(const struct inode *inode) { return NFS_I(inode)->fileid; @@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *, struct list_head *, unsigned); extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); extern void nfs_readdata_release(void *data); +extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, + struct page *); /* * Allocate nfs_read_data structures @@ -583,6 +595,7 @@ extern void * nfs_root_data(void); #define NFSDBG_CALLBACK 0x0100 #define NFSDBG_CLIENT 0x0200 #define NFSDBG_MOUNT 0x0400 +#define NFSDBG_FSCACHE 0x0800 #define NFSDBG_ALL 0xFFFF #ifdef __KERNEL__ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 29b1e40dce9..6ad75948cbf 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -64,6 +64,10 @@ struct nfs_client { char cl_ipaddr[48]; unsigned char cl_id_uniquifier; #endif + +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; /* client index cache cookie */ +#endif }; /* @@ -96,12 +100,19 @@ struct nfs_server { unsigned int acdirmin; unsigned int acdirmax; unsigned int namelen; + unsigned int options; /* extra options enabled by mount */ +#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ struct nfs_fsid fsid; __u64 maxfilesize; /* maximum file size */ unsigned long mount_time; /* when this fs was mounted */ dev_t s_dev; /* superblock dev numbers */ +#ifdef CONFIG_NFS_FSCACHE + struct nfs_fscache_key *fscache_key; /* unique key for superblock */ + struct fscache_cookie *fscache; /* superblock cookie */ +#endif + #ifdef CONFIG_NFS_V4 u32 attr_bitmask[2];/* V4 bitmask representing the set of attributes supported on this diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h index 1cb9a3fed2b..68b10f5f890 100644 --- a/include/linux/nfs_iostat.h +++ b/include/linux/nfs_iostat.h @@ -116,4 +116,16 @@ enum nfs_stat_eventcounters { __NFSIOS_COUNTSMAX, }; +/* + * NFS local caching servicing counters + */ +enum nfs_stat_fscachecounters { + NFSIOS_FSCACHE_PAGES_READ_OK, + NFSIOS_FSCACHE_PAGES_READ_FAIL, + NFSIOS_FSCACHE_PAGES_WRITTEN_OK, + NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, + NFSIOS_FSCACHE_PAGES_UNCACHED, + __NFSIOS_FSCACHEMAX, +}; + #endif /* _LINUX_NFS_IOSTAT */ diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h index 04b355c801d..5bccaab8105 100644 --- a/include/linux/nfsd/cache.h +++ b/include/linux/nfsd/cache.h @@ -76,4 +76,12 @@ void nfsd_reply_cache_shutdown(void); int nfsd_cache_lookup(struct svc_rqst *, int); void nfsd_cache_update(struct svc_rqst *, int, __be32 *); +#ifdef CONFIG_NFSD_V4 +void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); +#else /* CONFIG_NFSD_V4 */ +static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) +{ +} +#endif /* CONFIG_NFSD_V4 */ + #endif /* NFSCACHE_H */ diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index e19f45991b2..2b49d676d0c 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -23,7 +23,7 @@ /* * nfsd version */ -#define NFSD_SUPPORTED_MINOR_VERSION 0 +#define NFSD_SUPPORTED_MINOR_VERSION 1 /* * Flags for nfsd_permission @@ -53,6 +53,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); extern struct svc_program nfsd_program; extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4; +extern u32 nfsd_supported_minorversion; extern struct mutex nfsd_mutex; extern struct svc_serv *nfsd_serv; @@ -105,7 +106,7 @@ void nfsd_close(struct file *); __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, loff_t, struct kvec *, int, unsigned long *); __be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, - loff_t, struct kvec *,int, unsigned long, int *); + loff_t, struct kvec *,int, unsigned long *, int *); __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, char *, int *); __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, @@ -149,6 +150,7 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; int nfsd_vers(int vers, enum vers_op change); +int nfsd_minorversion(u32 minorversion, enum vers_op change); void nfsd_reset_versions(void); int nfsd_create_serv(void); @@ -186,78 +188,119 @@ void nfsd_lockd_shutdown(void); /* * These macros provide pre-xdr'ed values for faster operation. */ -#define nfs_ok __constant_htonl(NFS_OK) -#define nfserr_perm __constant_htonl(NFSERR_PERM) -#define nfserr_noent __constant_htonl(NFSERR_NOENT) -#define nfserr_io __constant_htonl(NFSERR_IO) -#define nfserr_nxio __constant_htonl(NFSERR_NXIO) -#define nfserr_eagain __constant_htonl(NFSERR_EAGAIN) -#define nfserr_acces __constant_htonl(NFSERR_ACCES) -#define nfserr_exist __constant_htonl(NFSERR_EXIST) -#define nfserr_xdev __constant_htonl(NFSERR_XDEV) -#define nfserr_nodev __constant_htonl(NFSERR_NODEV) -#define nfserr_notdir __constant_htonl(NFSERR_NOTDIR) -#define nfserr_isdir __constant_htonl(NFSERR_ISDIR) -#define nfserr_inval __constant_htonl(NFSERR_INVAL) -#define nfserr_fbig __constant_htonl(NFSERR_FBIG) -#define nfserr_nospc __constant_htonl(NFSERR_NOSPC) -#define nfserr_rofs __constant_htonl(NFSERR_ROFS) -#define nfserr_mlink __constant_htonl(NFSERR_MLINK) -#define nfserr_opnotsupp __constant_htonl(NFSERR_OPNOTSUPP) -#define nfserr_nametoolong __constant_htonl(NFSERR_NAMETOOLONG) -#define nfserr_notempty __constant_htonl(NFSERR_NOTEMPTY) -#define nfserr_dquot __constant_htonl(NFSERR_DQUOT) -#define nfserr_stale __constant_htonl(NFSERR_STALE) -#define nfserr_remote __constant_htonl(NFSERR_REMOTE) -#define nfserr_wflush __constant_htonl(NFSERR_WFLUSH) -#define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE) -#define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC) -#define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE) -#define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP) -#define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL) -#define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT) -#define nfserr_badtype __constant_htonl(NFSERR_BADTYPE) -#define nfserr_jukebox __constant_htonl(NFSERR_JUKEBOX) -#define nfserr_denied __constant_htonl(NFSERR_DENIED) -#define nfserr_deadlock __constant_htonl(NFSERR_DEADLOCK) -#define nfserr_expired __constant_htonl(NFSERR_EXPIRED) -#define nfserr_bad_cookie __constant_htonl(NFSERR_BAD_COOKIE) -#define nfserr_same __constant_htonl(NFSERR_SAME) -#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE) -#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID) -#define nfserr_resource __constant_htonl(NFSERR_RESOURCE) -#define nfserr_moved __constant_htonl(NFSERR_MOVED) -#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE) -#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH) -#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED) -#define nfserr_stale_stateid __constant_htonl(NFSERR_STALE_STATEID) -#define nfserr_old_stateid __constant_htonl(NFSERR_OLD_STATEID) -#define nfserr_bad_stateid __constant_htonl(NFSERR_BAD_STATEID) -#define nfserr_bad_seqid __constant_htonl(NFSERR_BAD_SEQID) -#define nfserr_symlink __constant_htonl(NFSERR_SYMLINK) -#define nfserr_not_same __constant_htonl(NFSERR_NOT_SAME) -#define nfserr_restorefh __constant_htonl(NFSERR_RESTOREFH) -#define nfserr_attrnotsupp __constant_htonl(NFSERR_ATTRNOTSUPP) -#define nfserr_bad_xdr __constant_htonl(NFSERR_BAD_XDR) -#define nfserr_openmode __constant_htonl(NFSERR_OPENMODE) -#define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD) -#define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL) -#define nfserr_grace __constant_htonl(NFSERR_GRACE) -#define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE) -#define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD) -#define nfserr_badname __constant_htonl(NFSERR_BADNAME) -#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) -#define nfserr_locked __constant_htonl(NFSERR_LOCKED) -#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC) -#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) +#define nfs_ok cpu_to_be32(NFS_OK) +#define nfserr_perm cpu_to_be32(NFSERR_PERM) +#define nfserr_noent cpu_to_be32(NFSERR_NOENT) +#define nfserr_io cpu_to_be32(NFSERR_IO) +#define nfserr_nxio cpu_to_be32(NFSERR_NXIO) +#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN) +#define nfserr_acces cpu_to_be32(NFSERR_ACCES) +#define nfserr_exist cpu_to_be32(NFSERR_EXIST) +#define nfserr_xdev cpu_to_be32(NFSERR_XDEV) +#define nfserr_nodev cpu_to_be32(NFSERR_NODEV) +#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR) +#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR) +#define nfserr_inval cpu_to_be32(NFSERR_INVAL) +#define nfserr_fbig cpu_to_be32(NFSERR_FBIG) +#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC) +#define nfserr_rofs cpu_to_be32(NFSERR_ROFS) +#define nfserr_mlink cpu_to_be32(NFSERR_MLINK) +#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP) +#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG) +#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY) +#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT) +#define nfserr_stale cpu_to_be32(NFSERR_STALE) +#define nfserr_remote cpu_to_be32(NFSERR_REMOTE) +#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH) +#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE) +#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC) +#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE) +#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP) +#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL) +#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT) +#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE) +#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX) +#define nfserr_denied cpu_to_be32(NFSERR_DENIED) +#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK) +#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED) +#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE) +#define nfserr_same cpu_to_be32(NFSERR_SAME) +#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE) +#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID) +#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE) +#define nfserr_moved cpu_to_be32(NFSERR_MOVED) +#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE) +#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH) +#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED) +#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID) +#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID) +#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID) +#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID) +#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK) +#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME) +#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH) +#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) +#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) +#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) +#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) +#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) +#define nfserr_grace cpu_to_be32(NFSERR_GRACE) +#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE) +#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD) +#define nfserr_badname cpu_to_be32(NFSERR_BADNAME) +#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) +#define nfserr_locked cpu_to_be32(NFSERR_LOCKED) +#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) +#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) +#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) +#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) +#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION) +#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT) +#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY) +#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION) +#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED) +#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY) +#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER) +#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE) +#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT) +#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT) +#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE) +#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED) +#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS) +#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG) +#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG) +#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE) +#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP) +#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND) +#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS) +#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION) +#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP) +#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY) +#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE) +#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY) +#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT) +#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION) +#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP) +#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT) +#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP) +#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED) +#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE) +#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL) +#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG) +#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT) +#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED) /* error codes for internal use */ /* if a request fails due to kmalloc failure, it gets dropped. * Client should resend eventually */ -#define nfserr_dropit __constant_htonl(30000) +#define nfserr_dropit cpu_to_be32(30000) /* end-of-file indicator in readdir */ -#define nfserr_eof __constant_htonl(30001) +#define nfserr_eof cpu_to_be32(30001) +/* replay detected */ +#define nfserr_replay_me cpu_to_be32(11001) +/* nfs41 replay detected */ +#define nfserr_replay_cache cpu_to_be32(11002) /* Check for dir entries '.' and '..' */ #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) @@ -300,7 +343,7 @@ extern struct timeval nfssvc_boot; * TIME_BACKUP (unlikely to be supported any time soon) * TIME_CREATE (unlikely to be supported any time soon) */ -#define NFSD_SUPPORTED_ATTRS_WORD0 \ +#define NFSD4_SUPPORTED_ATTRS_WORD0 \ (FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ @@ -312,7 +355,7 @@ extern struct timeval nfssvc_boot; | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) -#define NFSD_SUPPORTED_ATTRS_WORD1 \ +#define NFSD4_SUPPORTED_ATTRS_WORD1 \ (FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ @@ -320,6 +363,35 @@ extern struct timeval nfssvc_boot; | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) +#define NFSD4_SUPPORTED_ATTRS_WORD2 0 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \ + NFSD4_SUPPORTED_ATTRS_WORD0 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \ + NFSD4_SUPPORTED_ATTRS_WORD1 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \ + (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) + +static inline u32 nfsd_suppattrs0(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 + : NFSD4_SUPPORTED_ATTRS_WORD0; +} + +static inline u32 nfsd_suppattrs1(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1 + : NFSD4_SUPPORTED_ATTRS_WORD1; +} + +static inline u32 nfsd_suppattrs2(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2 + : NFSD4_SUPPORTED_ATTRS_WORD2; +} + /* These will return ERR_INVAL if specified in GETATTR or READDIR. */ #define NFSD_WRITEONLY_ATTRS_WORD1 \ (FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) @@ -330,6 +402,19 @@ extern struct timeval nfssvc_boot; #define NFSD_WRITEABLE_ATTRS_WORD1 \ (FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) +#define NFSD_WRITEABLE_ATTRS_WORD2 0 + +#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \ + NFSD_WRITEABLE_ATTRS_WORD0 +/* + * we currently store the exclusive create verifier in the v_{a,m}time + * attributes so the client can't set these at create time using EXCLUSIVE4_1 + */ +#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \ + (NFSD_WRITEABLE_ATTRS_WORD1 & \ + ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)) +#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \ + NFSD_WRITEABLE_ATTRS_WORD2 #endif /* CONFIG_NFSD_V4 */ diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index fa317f6c154..afa19016c4a 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -269,6 +269,13 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src) return dst; } +static inline void +fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src) +{ + dst->fh_size = src->fh_size; + memcpy(&dst->fh_base, &src->fh_base, src->fh_size); +} + static __inline__ struct svc_fh * fh_init(struct svc_fh *fhp, int maxsize) { diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 128298c0362..4d61c873fee 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -66,8 +66,7 @@ struct nfs4_cb_recall { u32 cbr_ident; int cbr_trunc; stateid_t cbr_stateid; - u32 cbr_fhlen; - char cbr_fhval[NFS4_FHSIZE]; + struct knfsd_fh cbr_fh; struct nfs4_delegation *cbr_dp; }; @@ -86,8 +85,7 @@ struct nfs4_delegation { }; #define dl_stateid dl_recall.cbr_stateid -#define dl_fhlen dl_recall.cbr_fhlen -#define dl_fhval dl_recall.cbr_fhval +#define dl_fh dl_recall.cbr_fh /* client delegation callback info */ struct nfs4_callback { @@ -101,6 +99,64 @@ struct nfs4_callback { struct rpc_clnt * cb_client; }; +/* Maximum number of slots per session. 128 is useful for long haul TCP */ +#define NFSD_MAX_SLOTS_PER_SESSION 128 +/* Maximum number of pages per slot cache entry */ +#define NFSD_PAGES_PER_SLOT 1 +/* Maximum number of operations per session compound */ +#define NFSD_MAX_OPS_PER_COMPOUND 16 + +struct nfsd4_cache_entry { + __be32 ce_status; + struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */ + struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1]; + int ce_cachethis; + short ce_resused; + int ce_opcnt; + int ce_rpchdrlen; +}; + +struct nfsd4_slot { + bool sl_inuse; + u32 sl_seqid; + struct nfsd4_cache_entry sl_cache_entry; +}; + +struct nfsd4_session { + struct kref se_ref; + struct list_head se_hash; /* hash by sessionid */ + struct list_head se_perclnt; + u32 se_flags; + struct nfs4_client *se_client; /* for expire_client */ + struct nfs4_sessionid se_sessionid; + u32 se_fmaxreq_sz; + u32 se_fmaxresp_sz; + u32 se_fmaxresp_cached; + u32 se_fmaxops; + u32 se_fnumslots; + struct nfsd4_slot se_slots[]; /* forward channel slots */ +}; + +static inline void +nfsd4_put_session(struct nfsd4_session *ses) +{ + extern void free_session(struct kref *kref); + kref_put(&ses->se_ref, free_session); +} + +static inline void +nfsd4_get_session(struct nfsd4_session *ses) +{ + kref_get(&ses->se_ref); +} + +/* formatted contents of nfs4_sessionid */ +struct nfsd4_sessionid { + clientid_t clientid; + u32 sequence; + u32 reserved; +}; + #define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ /* @@ -132,6 +188,12 @@ struct nfs4_client { struct nfs4_callback cl_callback; /* callback info */ atomic_t cl_count; /* ref count */ u32 cl_firststate; /* recovery dir creation */ + + /* for nfs41 */ + struct list_head cl_sessions; + struct nfsd4_slot cl_slot; /* create_session slot */ + u32 cl_exchange_flags; + struct nfs4_sessionid cl_sessionid; }; /* struct nfs4_client_reset @@ -168,8 +230,7 @@ struct nfs4_replay { unsigned int rp_buflen; char *rp_buf; unsigned intrp_allocated; - int rp_openfh_len; - char rp_openfh[NFS4_FHSIZE]; + struct knfsd_fh rp_openfh; char rp_ibuf[NFSD4_REPLAY_ISIZE]; }; @@ -217,7 +278,7 @@ struct nfs4_stateowner { * share_acces, share_deny on the file. */ struct nfs4_file { - struct kref fi_ref; + atomic_t fi_ref; struct list_head fi_hash; /* hash by "struct inode *" */ struct list_head fi_stateids; struct list_head fi_delegations; @@ -259,14 +320,13 @@ struct nfs4_stateid { }; /* flags for preprocess_seqid_op() */ -#define CHECK_FH 0x00000001 +#define HAS_SESSION 0x00000001 #define CONFIRM 0x00000002 #define OPEN_STATE 0x00000004 #define LOCK_STATE 0x00000008 #define RD_STATE 0x00000010 #define WR_STATE 0x00000020 #define CLOSE_STATE 0x00000040 -#define DELEG_RET 0x00000080 #define seqid_mutating_err(err) \ (((err) != nfserr_stale_clientid) && \ @@ -274,7 +334,9 @@ struct nfs4_stateid { ((err) != nfserr_stale_stateid) && \ ((err) != nfserr_bad_stateid)) -extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, +struct nfsd4_compound_state; + +extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, stateid_t *stateid, int flags, struct file **filp); extern void nfs4_lock_state(void); extern void nfs4_unlock_state(void); @@ -290,7 +352,7 @@ extern void nfsd4_init_recdir(char *recdir_name); extern int nfsd4_recdir_load(void); extern void nfsd4_shutdown_recdir(void); extern int nfs4_client_to_reclaim(const char *name); -extern int nfs4_has_reclaimed_state(const char *name); +extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); extern void nfsd4_recdir_purge_old(void); extern int nfsd4_create_clid_dir(struct nfs4_client *clp); extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h index 7678cfbe996..2693ef647df 100644 --- a/include/linux/nfsd/stats.h +++ b/include/linux/nfsd/stats.h @@ -11,6 +11,11 @@ #include <linux/nfs4.h> +/* thread usage wraps very million seconds (approx one fortnight) */ +#define NFSD_USAGE_WRAP (HZ*1000000) + +#ifdef __KERNEL__ + struct nfsd_stats { unsigned int rchits; /* repcache hits */ unsigned int rcmisses; /* repcache hits */ @@ -35,10 +40,6 @@ struct nfsd_stats { }; -/* thread usage wraps very million seconds (approx one fortnight) */ -#define NFSD_USAGE_WRAP (HZ*1000000) - -#ifdef __KERNEL__ extern struct nfsd_stats nfsdstats; extern struct svc_stat nfsd_svcstats; diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 27bd3e38ec5..f80d6013fdc 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -45,10 +45,22 @@ #define XDR_LEN(n) (((n) + 3) & ~3) struct nfsd4_compound_state { - struct svc_fh current_fh; - struct svc_fh save_fh; - struct nfs4_stateowner *replay_owner; -}; + struct svc_fh current_fh; + struct svc_fh save_fh; + struct nfs4_stateowner *replay_owner; + /* For sessions DRC */ + struct nfsd4_session *session; + struct nfsd4_slot *slot; + __be32 *statp; + size_t iovlen; + u32 minorversion; + u32 status; +}; + +static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) +{ + return cs->slot != NULL; +} struct nfsd4_change_info { u32 atomic; @@ -90,7 +102,7 @@ struct nfsd4_create { u32 specdata2; } dev; /* NF4BLK, NF4CHR */ } u; - u32 cr_bmval[2]; /* request */ + u32 cr_bmval[3]; /* request */ struct iattr cr_iattr; /* request */ struct nfsd4_change_info cr_cinfo; /* response */ struct nfs4_acl *cr_acl; @@ -105,7 +117,7 @@ struct nfsd4_delegreturn { }; struct nfsd4_getattr { - u32 ga_bmval[2]; /* request */ + u32 ga_bmval[3]; /* request */ struct svc_fh *ga_fhp; /* response */ }; @@ -206,11 +218,9 @@ struct nfsd4_open { stateid_t op_delegate_stateid; /* request - response */ u32 op_create; /* request */ u32 op_createmode; /* request */ - u32 op_bmval[2]; /* request */ - union { /* request */ - struct iattr iattr; /* UNCHECKED4,GUARDED4 */ - nfs4_verifier verf; /* EXCLUSIVE4 */ - } u; + u32 op_bmval[3]; /* request */ + struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ + nfs4_verifier verf; /* EXCLUSIVE4 */ clientid_t op_clientid; /* request */ struct xdr_netobj op_owner; /* request */ u32 op_seqid; /* request */ @@ -224,8 +234,8 @@ struct nfsd4_open { struct nfs4_stateowner *op_stateowner; /* used during processing */ struct nfs4_acl *op_acl; }; -#define op_iattr u.iattr -#define op_verf u.verf +#define op_iattr iattr +#define op_verf verf struct nfsd4_open_confirm { stateid_t oc_req_stateid /* request */; @@ -259,7 +269,7 @@ struct nfsd4_readdir { nfs4_verifier rd_verf; /* request */ u32 rd_dircount; /* request */ u32 rd_maxcount; /* request */ - u32 rd_bmval[2]; /* request */ + u32 rd_bmval[3]; /* request */ struct svc_rqst *rd_rqstp; /* response */ struct svc_fh * rd_fhp; /* response */ @@ -301,7 +311,7 @@ struct nfsd4_secinfo { struct nfsd4_setattr { stateid_t sa_stateid; /* request */ - u32 sa_bmval[2]; /* request */ + u32 sa_bmval[3]; /* request */ struct iattr sa_iattr; /* request */ struct nfs4_acl *sa_acl; }; @@ -327,7 +337,7 @@ struct nfsd4_setclientid_confirm { /* also used for NVERIFY */ struct nfsd4_verify { - u32 ve_bmval[2]; /* request */ + u32 ve_bmval[3]; /* request */ u32 ve_attrlen; /* request */ char * ve_attrval; /* request */ }; @@ -344,6 +354,54 @@ struct nfsd4_write { nfs4_verifier wr_verifier; /* response */ }; +struct nfsd4_exchange_id { + nfs4_verifier verifier; + struct xdr_netobj clname; + u32 flags; + clientid_t clientid; + u32 seqid; + int spa_how; +}; + +struct nfsd4_channel_attrs { + u32 headerpadsz; + u32 maxreq_sz; + u32 maxresp_sz; + u32 maxresp_cached; + u32 maxops; + u32 maxreqs; + u32 nr_rdma_attrs; + u32 rdma_attrs; +}; + +struct nfsd4_create_session { + clientid_t clientid; + struct nfs4_sessionid sessionid; + u32 seqid; + u32 flags; + struct nfsd4_channel_attrs fore_channel; + struct nfsd4_channel_attrs back_channel; + u32 callback_prog; + u32 uid; + u32 gid; +}; + +struct nfsd4_sequence { + struct nfs4_sessionid sessionid; /* request/response */ + u32 seqid; /* request/response */ + u32 slotid; /* request/response */ + u32 maxslots; /* request/response */ + u32 cachethis; /* request */ +#if 0 + u32 target_maxslots; /* response */ + u32 status_flags; /* response */ +#endif /* not yet */ +}; + +struct nfsd4_destroy_session { + struct nfs4_sessionid sessionid; +}; + struct nfsd4_op { int opnum; __be32 status; @@ -378,6 +436,12 @@ struct nfsd4_op { struct nfsd4_verify verify; struct nfsd4_write write; struct nfsd4_release_lockowner release_lockowner; + + /* NFSv4.1 */ + struct nfsd4_exchange_id exchange_id; + struct nfsd4_create_session create_session; + struct nfsd4_destroy_session destroy_session; + struct nfsd4_sequence sequence; } u; struct nfs4_replay * replay; }; @@ -416,9 +480,22 @@ struct nfsd4_compoundres { u32 taglen; char * tag; u32 opcnt; - __be32 * tagp; /* where to encode tag and opcount */ + __be32 * tagp; /* tag, opcount encode location */ + struct nfsd4_compound_state cstate; }; +static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) +{ + struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; + return args->opcnt == 1; +} + +static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) +{ + return !resp->cstate.slot->sl_cache_entry.ce_cachethis || + nfsd4_is_solo_sequence(resp); +} + #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) static inline void @@ -448,7 +525,23 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_setclientid_confirm *setclientid_confirm); -extern __be32 nfsd4_process_open1(struct nfsd4_open *open); +extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); +extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, + struct nfsd4_sequence *seq); +extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, +struct nfsd4_exchange_id *); + extern __be32 nfsd4_create_session(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_create_session *); +extern __be32 nfsd4_sequence(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_sequence *); +extern __be32 nfsd4_destroy_session(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_destroy_session *); +extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, + struct nfsd4_open *open); extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open); extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h new file mode 100644 index 00000000000..79fec6af3f9 --- /dev/null +++ b/include/linux/nilfs2_fs.h @@ -0,0 +1,801 @@ +/* + * nilfs2_fs.h - NILFS2 on-disk structures and common declarations. + * + * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * Written by Koji Sato <koji@osrg.net> + * Ryusuke Konishi <ryusuke@osrg.net> + */ +/* + * linux/include/linux/ext2_fs.h + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise Pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/include/linux/minix_fs.h + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#ifndef _LINUX_NILFS_FS_H +#define _LINUX_NILFS_FS_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/* + * Inode flags stored in nilfs_inode and on-memory nilfs inode + * + * We define these flags based on ext2-fs because of the + * compatibility reason; to avoid problems in chattr(1) + */ +#define NILFS_SECRM_FL 0x00000001 /* Secure deletion */ +#define NILFS_UNRM_FL 0x00000002 /* Undelete */ +#define NILFS_SYNC_FL 0x00000008 /* Synchronous updates */ +#define NILFS_IMMUTABLE_FL 0x00000010 /* Immutable file */ +#define NILFS_APPEND_FL 0x00000020 /* writes to file may only append */ +#define NILFS_NODUMP_FL 0x00000040 /* do not dump file */ +#define NILFS_NOATIME_FL 0x00000080 /* do not update atime */ +/* Reserved for compression usage... */ +#define NILFS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ +#define NILFS_DIRSYNC_FL 0x00010000 /* dirsync behaviour */ + +#define NILFS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ +#define NILFS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ + + +#define NILFS_INODE_BMAP_SIZE 7 +/** + * struct nilfs_inode - structure of an inode on disk + * @i_blocks: blocks count + * @i_size: size in bytes + * @i_ctime: creation time (seconds) + * @i_mtime: modification time (seconds) + * @i_ctime_nsec: creation time (nano seconds) + * @i_mtime_nsec: modification time (nano seconds) + * @i_uid: user id + * @i_gid: group id + * @i_mode: file mode + * @i_links_count: links count + * @i_flags: file flags + * @i_bmap: block mapping + * @i_xattr: extended attributes + * @i_generation: file generation (for NFS) + * @i_pad: padding + */ +struct nilfs_inode { + __le64 i_blocks; + __le64 i_size; + __le64 i_ctime; + __le64 i_mtime; + __le32 i_ctime_nsec; + __le32 i_mtime_nsec; + __le32 i_uid; + __le32 i_gid; + __le16 i_mode; + __le16 i_links_count; + __le32 i_flags; + __le64 i_bmap[NILFS_INODE_BMAP_SIZE]; +#define i_device_code i_bmap[0] + __le64 i_xattr; + __le32 i_generation; + __le32 i_pad; +}; + +/** + * struct nilfs_super_root - structure of super root + * @sr_sum: check sum + * @sr_bytes: byte count of the structure + * @sr_flags: flags (reserved) + * @sr_nongc_ctime: write time of the last segment not for cleaner operation + * @sr_dat: DAT file inode + * @sr_cpfile: checkpoint file inode + * @sr_sufile: segment usage file inode + */ +struct nilfs_super_root { + __le32 sr_sum; + __le16 sr_bytes; + __le16 sr_flags; + __le64 sr_nongc_ctime; + struct nilfs_inode sr_dat; + struct nilfs_inode sr_cpfile; + struct nilfs_inode sr_sufile; +}; + +#define NILFS_SR_MDT_OFFSET(inode_size, i) \ + ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \ + (inode_size) * (i)) +#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0) +#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1) +#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2) +#define NILFS_SR_BYTES (sizeof(struct nilfs_super_root)) + +/* + * Maximal mount counts + */ +#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */ + +/* + * File system states (sbp->s_state, nilfs->ns_mount_state) + */ +#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */ +#define NILFS_ERROR_FS 0x0002 /* Errors detected */ +#define NILFS_RESIZE_FS 0x0004 /* Resize required */ + +/* + * Mount flags (sbi->s_mount_opt) + */ +#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */ +#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ +#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ +#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ +#define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */ +#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ +#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order + semantics also for data */ + + +/** + * struct nilfs_super_block - structure of super block on disk + */ +struct nilfs_super_block { + __le32 s_rev_level; /* Revision level */ + __le16 s_minor_rev_level; /* minor revision level */ + __le16 s_magic; /* Magic signature */ + + __le16 s_bytes; /* Bytes count of CRC calculation + for this structure. s_reserved + is excluded. */ + __le16 s_flags; /* flags */ + __le32 s_crc_seed; /* Seed value of CRC calculation */ + __le32 s_sum; /* Check sum of super block */ + + __le32 s_log_block_size; /* Block size represented as follows + blocksize = + 1 << (s_log_block_size + 10) */ + __le64 s_nsegments; /* Number of segments in filesystem */ + __le64 s_dev_size; /* block device size in bytes */ + __le64 s_first_data_block; /* 1st seg disk block number */ + __le32 s_blocks_per_segment; /* number of blocks per full segment */ + __le32 s_r_segments_percentage; /* Reserved segments percentage */ + + __le64 s_last_cno; /* Last checkpoint number */ + __le64 s_last_pseg; /* disk block addr pseg written last */ + __le64 s_last_seq; /* seq. number of seg written last */ + __le64 s_free_blocks_count; /* Free blocks count */ + + __le64 s_ctime; /* Creation time (execution time of + newfs) */ + __le64 s_mtime; /* Mount time */ + __le64 s_wtime; /* Write time */ + __le16 s_mnt_count; /* Mount count */ + __le16 s_max_mnt_count; /* Maximal mount count */ + __le16 s_state; /* File system state */ + __le16 s_errors; /* Behaviour when detecting errors */ + __le64 s_lastcheck; /* time of last check */ + + __le32 s_checkinterval; /* max. time between checks */ + __le32 s_creator_os; /* OS */ + __le16 s_def_resuid; /* Default uid for reserved blocks */ + __le16 s_def_resgid; /* Default gid for reserved blocks */ + __le32 s_first_ino; /* First non-reserved inode */ + + __le16 s_inode_size; /* Size of an inode */ + __le16 s_dat_entry_size; /* Size of a dat entry */ + __le16 s_checkpoint_size; /* Size of a checkpoint */ + __le16 s_segment_usage_size; /* Size of a segment usage */ + + __u8 s_uuid[16]; /* 128-bit uuid for volume */ + char s_volume_name[16]; /* volume name */ + char s_last_mounted[64]; /* directory where last mounted */ + + __le32 s_c_interval; /* Commit interval of segment */ + __le32 s_c_block_max; /* Threshold of data amount for + the segment construction */ + __u32 s_reserved[192]; /* padding to the end of the block */ +}; + +/* + * Codes for operating systems + */ +#define NILFS_OS_LINUX 0 +/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */ + +/* + * Revision levels + */ +#define NILFS_CURRENT_REV 2 /* current major revision */ +#define NILFS_MINOR_REV 0 /* minor revision */ + +/* + * Bytes count of super_block for CRC-calculation + */ +#define NILFS_SB_BYTES \ + ((long)&((struct nilfs_super_block *)0)->s_reserved) + +/* + * Special inode number + */ +#define NILFS_ROOT_INO 2 /* Root file inode */ +#define NILFS_DAT_INO 3 /* DAT file */ +#define NILFS_CPFILE_INO 4 /* checkpoint file */ +#define NILFS_SUFILE_INO 5 /* segment usage file */ +#define NILFS_IFILE_INO 6 /* ifile */ +#define NILFS_ATIME_INO 7 /* Atime file (reserved) */ +#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */ +#define NILFS_SKETCH_INO 10 /* Sketch file */ +#define NILFS_USER_INO 11 /* Fisrt user's file inode number */ + +#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */ +#define NILFS_SUPER_MAGIC 0x3434 /* NILFS filesystem magic number */ + +#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in + a full segment */ +#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in + a partial segment */ +#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved + segments */ + +/* + * bytes offset of secondary super block + */ +#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) + +/* + * Maximal count of links to a file + */ +#define NILFS_LINK_MAX 32000 + +/* + * Structure of a directory entry + * (Same as ext2) + */ + +#define NILFS_NAME_LEN 255 + +/* + * The new version of the directory entry. Since V0 structures are + * stored in intel byte order, and the name_len field could never be + * bigger than 255 chars, it's safe to reclaim the extra byte for the + * file_type field. + */ +struct nilfs_dir_entry { + __le64 inode; /* Inode number */ + __le16 rec_len; /* Directory entry length */ + __u8 name_len; /* Name length */ + __u8 file_type; + char name[NILFS_NAME_LEN]; /* File name */ + char pad; +}; + +/* + * NILFS directory file types. Only the low 3 bits are used. The + * other bits are reserved for now. + */ +enum { + NILFS_FT_UNKNOWN, + NILFS_FT_REG_FILE, + NILFS_FT_DIR, + NILFS_FT_CHRDEV, + NILFS_FT_BLKDEV, + NILFS_FT_FIFO, + NILFS_FT_SOCK, + NILFS_FT_SYMLINK, + NILFS_FT_MAX +}; + +/* + * NILFS_DIR_PAD defines the directory entries boundaries + * + * NOTE: It must be a multiple of 8 + */ +#define NILFS_DIR_PAD 8 +#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) +#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ + ~NILFS_DIR_ROUND) + + +/** + * struct nilfs_finfo - file information + * @fi_ino: inode number + * @fi_cno: checkpoint number + * @fi_nblocks: number of blocks (including intermediate blocks) + * @fi_ndatablk: number of file data blocks + */ +struct nilfs_finfo { + __le64 fi_ino; + __le64 fi_cno; + __le32 fi_nblocks; + __le32 fi_ndatablk; + /* array of virtual block numbers */ +}; + +/** + * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned + * @bi_vblocknr: virtual block number + * @bi_blkoff: block offset + */ +struct nilfs_binfo_v { + __le64 bi_vblocknr; + __le64 bi_blkoff; +}; + +/** + * struct nilfs_binfo_dat - information for the block which belongs to the DAT file + * @bi_blkoff: block offset + * @bi_level: level + * @bi_pad: padding + */ +struct nilfs_binfo_dat { + __le64 bi_blkoff; + __u8 bi_level; + __u8 bi_pad[7]; +}; + +/** + * union nilfs_binfo: block information + * @bi_v: nilfs_binfo_v structure + * @bi_dat: nilfs_binfo_dat structure + */ +union nilfs_binfo { + struct nilfs_binfo_v bi_v; + struct nilfs_binfo_dat bi_dat; +}; + +/** + * struct nilfs_segment_summary - segment summary + * @ss_datasum: checksum of data + * @ss_sumsum: checksum of segment summary + * @ss_magic: magic number + * @ss_bytes: size of this structure in bytes + * @ss_flags: flags + * @ss_seq: sequence number + * @ss_create: creation timestamp + * @ss_next: next segment + * @ss_nblocks: number of blocks + * @ss_nfinfo: number of finfo structures + * @ss_sumbytes: total size of segment summary in bytes + * @ss_pad: padding + */ +struct nilfs_segment_summary { + __le32 ss_datasum; + __le32 ss_sumsum; + __le32 ss_magic; + __le16 ss_bytes; + __le16 ss_flags; + __le64 ss_seq; + __le64 ss_create; + __le64 ss_next; + __le32 ss_nblocks; + __le32 ss_nfinfo; + __le32 ss_sumbytes; + __le32 ss_pad; + /* array of finfo structures */ +}; + +#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */ + +/* + * Segment summary flags + */ +#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */ +#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */ +#define NILFS_SS_SR 0x0004 /* has super root */ +#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */ +#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ + +/** + * struct nilfs_palloc_group_desc - block group descriptor + * @pg_nfrees: number of free entries in block group + */ +struct nilfs_palloc_group_desc { + __le32 pg_nfrees; +}; + +/** + * struct nilfs_dat_entry - disk address translation entry + * @dt_blocknr: block number + * @dt_start: start checkpoint number + * @dt_end: end checkpoint number + * @dt_rsv: reserved for future use + */ +struct nilfs_dat_entry { + __le64 de_blocknr; + __le64 de_start; + __le64 de_end; + __le64 de_rsv; +}; + +/** + * struct nilfs_dat_group_desc - block group descriptor + * @dg_nfrees: number of free virtual block numbers in block group + */ +struct nilfs_dat_group_desc { + __le32 dg_nfrees; +}; + + +/** + * struct nilfs_snapshot_list - snapshot list + * @ssl_next: next checkpoint number on snapshot list + * @ssl_prev: previous checkpoint number on snapshot list + */ +struct nilfs_snapshot_list { + __le64 ssl_next; + __le64 ssl_prev; +}; + +/** + * struct nilfs_checkpoint - checkpoint structure + * @cp_flags: flags + * @cp_checkpoints_count: checkpoints count in a block + * @cp_snapshot_list: snapshot list + * @cp_cno: checkpoint number + * @cp_create: creation timestamp + * @cp_nblk_inc: number of blocks incremented by this checkpoint + * @cp_inodes_count: inodes count + * @cp_blocks_count: blocks count + * @cp_ifile_inode: inode of ifile + */ +struct nilfs_checkpoint { + __le32 cp_flags; + __le32 cp_checkpoints_count; + struct nilfs_snapshot_list cp_snapshot_list; + __le64 cp_cno; + __le64 cp_create; + __le64 cp_nblk_inc; + __le64 cp_inodes_count; + __le64 cp_blocks_count; /* Reserved (might be deleted) */ + + /* Do not change the byte offset of ifile inode. + To keep the compatibility of the disk format, + additional fields should be added behind cp_ifile_inode. */ + struct nilfs_inode cp_ifile_inode; +}; + +/* checkpoint flags */ +enum { + NILFS_CHECKPOINT_SNAPSHOT, + NILFS_CHECKPOINT_INVALID, + NILFS_CHECKPOINT_SKETCH, + NILFS_CHECKPOINT_MINOR, +}; + +#define NILFS_CHECKPOINT_FNS(flag, name) \ +static inline void \ +nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \ +{ \ + cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \ + (1UL << NILFS_CHECKPOINT_##flag)); \ +} \ +static inline void \ +nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \ +{ \ + cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \ + ~(1UL << NILFS_CHECKPOINT_##flag)); \ +} \ +static inline int \ +nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \ +{ \ + return !!(le32_to_cpu(cp->cp_flags) & \ + (1UL << NILFS_CHECKPOINT_##flag)); \ +} + +NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot) +NILFS_CHECKPOINT_FNS(INVALID, invalid) +NILFS_CHECKPOINT_FNS(MINOR, minor) + +/** + * struct nilfs_cpinfo - checkpoint information + * @ci_flags: flags + * @ci_pad: padding + * @ci_cno: checkpoint number + * @ci_create: creation timestamp + * @ci_nblk_inc: number of blocks incremented by this checkpoint + * @ci_inodes_count: inodes count + * @ci_blocks_count: blocks count + * @ci_next: next checkpoint number in snapshot list + */ +struct nilfs_cpinfo { + __u32 ci_flags; + __u32 ci_pad; + __u64 ci_cno; + __u64 ci_create; + __u64 ci_nblk_inc; + __u64 ci_inodes_count; + __u64 ci_blocks_count; + __u64 ci_next; +}; + +#define NILFS_CPINFO_FNS(flag, name) \ +static inline int \ +nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \ +{ \ + return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \ +} + +NILFS_CPINFO_FNS(SNAPSHOT, snapshot) +NILFS_CPINFO_FNS(INVALID, invalid) +NILFS_CPINFO_FNS(MINOR, minor) + + +/** + * struct nilfs_cpfile_header - checkpoint file header + * @ch_ncheckpoints: number of checkpoints + * @ch_nsnapshots: number of snapshots + * @ch_snapshot_list: snapshot list + */ +struct nilfs_cpfile_header { + __le64 ch_ncheckpoints; + __le64 ch_nsnapshots; + struct nilfs_snapshot_list ch_snapshot_list; +}; + +#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \ + ((sizeof(struct nilfs_cpfile_header) + \ + sizeof(struct nilfs_checkpoint) - 1) / \ + sizeof(struct nilfs_checkpoint)) + +/** + * struct nilfs_segment_usage - segment usage + * @su_lastmod: last modified timestamp + * @su_nblocks: number of blocks in segment + * @su_flags: flags + */ +struct nilfs_segment_usage { + __le64 su_lastmod; + __le32 su_nblocks; + __le32 su_flags; +}; + +/* segment usage flag */ +enum { + NILFS_SEGMENT_USAGE_ACTIVE, + NILFS_SEGMENT_USAGE_DIRTY, + NILFS_SEGMENT_USAGE_ERROR, + + /* ... */ +}; + +#define NILFS_SEGMENT_USAGE_FNS(flag, name) \ +static inline void \ +nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \ +{ \ + su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \ + (1UL << NILFS_SEGMENT_USAGE_##flag));\ +} \ +static inline void \ +nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \ +{ \ + su->su_flags = \ + cpu_to_le32(le32_to_cpu(su->su_flags) & \ + ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \ +} \ +static inline int \ +nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \ +{ \ + return !!(le32_to_cpu(su->su_flags) & \ + (1UL << NILFS_SEGMENT_USAGE_##flag)); \ +} + +NILFS_SEGMENT_USAGE_FNS(ACTIVE, active) +NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty) +NILFS_SEGMENT_USAGE_FNS(ERROR, error) + +static inline void +nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su) +{ + su->su_lastmod = cpu_to_le64(0); + su->su_nblocks = cpu_to_le32(0); + su->su_flags = cpu_to_le32(0); +} + +static inline int +nilfs_segment_usage_clean(const struct nilfs_segment_usage *su) +{ + return !le32_to_cpu(su->su_flags); +} + +/** + * struct nilfs_sufile_header - segment usage file header + * @sh_ncleansegs: number of clean segments + * @sh_ndirtysegs: number of dirty segments + * @sh_last_alloc: last allocated segment number + */ +struct nilfs_sufile_header { + __le64 sh_ncleansegs; + __le64 sh_ndirtysegs; + __le64 sh_last_alloc; + /* ... */ +}; + +#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \ + ((sizeof(struct nilfs_sufile_header) + \ + sizeof(struct nilfs_segment_usage) - 1) / \ + sizeof(struct nilfs_segment_usage)) + +/** + * nilfs_suinfo - segment usage information + * @sui_lastmod: + * @sui_nblocks: + * @sui_flags: + */ +struct nilfs_suinfo { + __u64 sui_lastmod; + __u32 sui_nblocks; + __u32 sui_flags; +}; + +#define NILFS_SUINFO_FNS(flag, name) \ +static inline int \ +nilfs_suinfo_##name(const struct nilfs_suinfo *si) \ +{ \ + return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \ +} + +NILFS_SUINFO_FNS(ACTIVE, active) +NILFS_SUINFO_FNS(DIRTY, dirty) +NILFS_SUINFO_FNS(ERROR, error) + +static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) +{ + return !si->sui_flags; +} + +/* ioctl */ +enum { + NILFS_CHECKPOINT, + NILFS_SNAPSHOT, +}; + +/** + * struct nilfs_cpmode - + * @cc_cno: + * @cc_mode: + */ +struct nilfs_cpmode { + __u64 cm_cno; + __u32 cm_mode; + __u32 cm_pad; +}; + +/** + * struct nilfs_argv - argument vector + * @v_base: + * @v_nmembs: + * @v_size: + * @v_flags: + * @v_index: + */ +struct nilfs_argv { + __u64 v_base; + __u32 v_nmembs; /* number of members */ + __u16 v_size; /* size of members */ + __u16 v_flags; + __u64 v_index; +}; + +/** + * struct nilfs_period - + * @p_start: + * @p_end: + */ +struct nilfs_period { + __u64 p_start; + __u64 p_end; +}; + +/** + * struct nilfs_cpstat - + * @cs_cno: checkpoint number + * @cs_ncps: number of checkpoints + * @cs_nsss: number of snapshots + */ +struct nilfs_cpstat { + __u64 cs_cno; + __u64 cs_ncps; + __u64 cs_nsss; +}; + +/** + * struct nilfs_sustat - + * @ss_nsegs: number of segments + * @ss_ncleansegs: number of clean segments + * @ss_ndirtysegs: number of dirty segments + * @ss_ctime: creation time of the last segment + * @ss_nongc_ctime: creation time of the last segment not for GC + * @ss_prot_seq: least sequence number of segments which must not be reclaimed + */ +struct nilfs_sustat { + __u64 ss_nsegs; + __u64 ss_ncleansegs; + __u64 ss_ndirtysegs; + __u64 ss_ctime; + __u64 ss_nongc_ctime; + __u64 ss_prot_seq; +}; + +/** + * struct nilfs_vinfo - virtual block number information + * @vi_vblocknr: + * @vi_start: + * @vi_end: + * @vi_blocknr: + */ +struct nilfs_vinfo { + __u64 vi_vblocknr; + __u64 vi_start; + __u64 vi_end; + __u64 vi_blocknr; +}; + +/** + * struct nilfs_vdesc - + */ +struct nilfs_vdesc { + __u64 vd_ino; + __u64 vd_cno; + __u64 vd_vblocknr; + struct nilfs_period vd_period; + __u64 vd_blocknr; + __u64 vd_offset; + __u32 vd_flags; + __u32 vd_pad; +}; + +/** + * struct nilfs_bdesc - + */ +struct nilfs_bdesc { + __u64 bd_ino; + __u64 bd_oblocknr; + __u64 bd_blocknr; + __u64 bd_offset; + __u32 bd_level; + __u32 bd_pad; +}; + +#define NILFS_IOCTL_IDENT 'n' + +#define NILFS_IOCTL_CHANGE_CPMODE \ + _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode) +#define NILFS_IOCTL_DELETE_CHECKPOINT \ + _IOW(NILFS_IOCTL_IDENT, 0x81, __u64) +#define NILFS_IOCTL_GET_CPINFO \ + _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv) +#define NILFS_IOCTL_GET_CPSTAT \ + _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat) +#define NILFS_IOCTL_GET_SUINFO \ + _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv) +#define NILFS_IOCTL_GET_SUSTAT \ + _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat) +#define NILFS_IOCTL_GET_VINFO \ + _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv) +#define NILFS_IOCTL_GET_BDESCS \ + _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv) +#define NILFS_IOCTL_CLEAN_SEGMENTS \ + _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5]) +#define NILFS_IOCTL_SYNC \ + _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) +#define NILFS_IOCTL_RESIZE \ + _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) + +#endif /* _LINUX_NILFS_FS_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 61df1779b2a..62214c7d2d9 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -82,6 +82,7 @@ enum pageflags { PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ + PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ #ifdef CONFIG_PAGEFLAGS_EXTENDED PG_head, /* A head page */ @@ -108,6 +109,12 @@ enum pageflags { /* Filesystems */ PG_checked = PG_owner_priv_1, + /* Two page bits are conscripted by FS-Cache to maintain local caching + * state. These bits are set on pages belonging to the netfs's inodes + * when those inodes are being locally cached. + */ + PG_fscache = PG_private_2, /* page backed by cache */ + /* XEN */ PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, @@ -182,7 +189,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } struct page; /* forward declaration */ -TESTPAGEFLAG(Locked, locked) +TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) @@ -194,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) -PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) - __SETPAGEFLAG(Private, private) PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) __PAGEFLAG(SlobPage, slob_page) @@ -205,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen) __PAGEFLAG(SlubDebug, slub_debug) /* + * Private page markings that may be used by the filesystem that owns the page + * for its own purposes. + * - PG_private and PG_private_2 cause releasepage() and co to be invoked + */ +PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) + __CLEARPAGEFLAG(Private, private) +PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) +PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) + +/* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ @@ -384,9 +399,10 @@ static inline void __ClearPageTail(struct page *page) * these flags set. It they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ - (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ - 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ + (1 << PG_lru | 1 << PG_locked | \ + 1 << PG_private | 1 << PG_private_2 | \ + 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ + 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ __PG_UNEVICTABLE | __PG_MLOCKED) /* @@ -397,4 +413,16 @@ static inline void __ClearPageTail(struct page *page) #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) #endif /* !__GENERATING_BOUNDS_H */ + +/** + * page_has_private - Determine if page has private stuff + * @page: The page to be checked + * + * Determine if a page has private stuff, indicating that release routines + * should be invoked upon it. + */ +#define page_has_private(page) \ + ((page)->flags & ((1 << PG_private) | \ + (1 << PG_private_2))) + #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 076a7dc67c2..34da5230faa 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -384,6 +384,11 @@ static inline void wait_on_page_writeback(struct page *page) extern void end_page_writeback(struct page *page); /* + * Add an arbitrary waiter to a page's wait queue + */ +extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); + +/* * Fault a userspace page into pagetables. Return non-zero on a fault. * * This assumes that two userspace pages are always sufficient. That's diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h index ea8c6d84996..cc1767f5cca 100644 --- a/include/linux/parport_pc.h +++ b/include/linux/parport_pc.h @@ -228,10 +228,11 @@ extern void parport_pc_release_resources(struct parport *p); extern int parport_pc_claim_resources(struct parport *p); /* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ -extern struct parport *parport_pc_probe_port (unsigned long base, - unsigned long base_hi, - int irq, int dma, - struct device *dev); -extern void parport_pc_unregister_port (struct parport *p); +extern struct parport *parport_pc_probe_port(unsigned long base, + unsigned long base_hi, + int irq, int dma, + struct device *dev, + int irqflags); +extern void parport_pc_unregister_port(struct parport *p); #endif diff --git a/include/linux/pci.h b/include/linux/pci.h index a7fe4bbd7ff..72698d89e76 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -674,6 +674,11 @@ int __must_check pci_reenable_device(struct pci_dev *); int __must_check pcim_enable_device(struct pci_dev *pdev); void pcim_pin_device(struct pci_dev *pdev); +static inline int pci_is_enabled(struct pci_dev *pdev) +{ + return (atomic_read(&pdev->enable_cnt) > 0); +} + static inline int pci_is_managed(struct pci_dev *pdev) { return pdev->is_managed; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 170f8b1f22d..ee98cd57088 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -944,6 +944,32 @@ #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 #define PCI_DEVICE_ID_SUN_CASSINI 0xabba +#define PCI_VENDOR_ID_NI 0x1093 +#define PCI_DEVICE_ID_NI_PCI2322 0xd130 +#define PCI_DEVICE_ID_NI_PCI2324 0xd140 +#define PCI_DEVICE_ID_NI_PCI2328 0xd150 +#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 +#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 +#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 +#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 +#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 +#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 +#define PCI_DEVICE_ID_NI_PCI2322I 0xd250 +#define PCI_DEVICE_ID_NI_PCI2324I 0xd270 +#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 +#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 +#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db +#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd +#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df +#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 +#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 +#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 +#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 +#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 +#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea +#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec +#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee + #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_643 0x0643 #define PCI_DEVICE_ID_CMD_646 0x0646 diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h index cb7d10f3076..d4cf7a2ceb3 100644 --- a/include/linux/pda_power.h +++ b/include/linux/pda_power.h @@ -31,6 +31,8 @@ struct pda_power_pdata { unsigned int wait_for_status; /* msecs, default is 500 */ unsigned int wait_for_charger; /* msecs, default is 500 */ unsigned int polling_interval; /* msecs, default is 2000 */ + + unsigned long ac_max_uA; /* current to draw when on AC */ }; #endif /* __PDA_POWER_H__ */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 8ff25e0e7f7..594c494ac3f 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -73,6 +73,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_POWER_NOW, + POWER_SUPPLY_PROP_POWER_AVG, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, POWER_SUPPLY_PROP_CHARGE_FULL, diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 3945f803d51..7c775751392 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm); */ void pwm_disable(struct pwm_device *pwm); -#endif /* __ASM_ARCH_PWM_H */ +#endif /* __LINUX_PWM_H */ diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h deleted file mode 100644 index e98900671ca..00000000000 --- a/include/linux/raid/bitmap.h +++ /dev/null @@ -1,288 +0,0 @@ -/* - * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 - * - * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. - */ -#ifndef BITMAP_H -#define BITMAP_H 1 - -#define BITMAP_MAJOR_LO 3 -/* version 4 insists the bitmap is in little-endian order - * with version 3, it is host-endian which is non-portable - */ -#define BITMAP_MAJOR_HI 4 -#define BITMAP_MAJOR_HOSTENDIAN 3 - -#define BITMAP_MINOR 39 - -/* - * in-memory bitmap: - * - * Use 16 bit block counters to track pending writes to each "chunk". - * The 2 high order bits are special-purpose, the first is a flag indicating - * whether a resync is needed. The second is a flag indicating whether a - * resync is active. - * This means that the counter is actually 14 bits: - * - * +--------+--------+------------------------------------------------+ - * | resync | resync | counter | - * | needed | active | | - * | (0-1) | (0-1) | (0-16383) | - * +--------+--------+------------------------------------------------+ - * - * The "resync needed" bit is set when: - * a '1' bit is read from storage at startup. - * a write request fails on some drives - * a resync is aborted on a chunk with 'resync active' set - * It is cleared (and resync-active set) when a resync starts across all drives - * of the chunk. - * - * - * The "resync active" bit is set when: - * a resync is started on all drives, and resync_needed is set. - * resync_needed will be cleared (as long as resync_active wasn't already set). - * It is cleared when a resync completes. - * - * The counter counts pending write requests, plus the on-disk bit. - * When the counter is '1' and the resync bits are clear, the on-disk - * bit can be cleared aswell, thus setting the counter to 0. - * When we set a bit, or in the counter (to start a write), if the fields is - * 0, we first set the disk bit and set the counter to 1. - * - * If the counter is 0, the on-disk bit is clear and the stipe is clean - * Anything that dirties the stipe pushes the counter to 2 (at least) - * and sets the on-disk bit (lazily). - * If a periodic sweep find the counter at 2, it is decremented to 1. - * If the sweep find the counter at 1, the on-disk bit is cleared and the - * counter goes to zero. - * - * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block - * counters as a fallback when "page" memory cannot be allocated: - * - * Normal case (page memory allocated): - * - * page pointer (32-bit) - * - * [ ] ------+ - * | - * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) - * c1 c2 c2048 - * - * Hijacked case (page memory allocation failed): - * - * hijacked page pointer (32-bit) - * - * [ ][ ] (no page memory allocated) - * counter #1 (16-bit) counter #2 (16-bit) - * - */ - -#ifdef __KERNEL__ - -#define PAGE_BITS (PAGE_SIZE << 3) -#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) - -typedef __u16 bitmap_counter_t; -#define COUNTER_BITS 16 -#define COUNTER_BIT_SHIFT 4 -#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8) -#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) - -#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) -#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) -#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) -#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) -#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) -#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) - -/* how many counters per page? */ -#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) -/* same, except a shift value for more efficient bitops */ -#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) -/* same, except a mask value for more efficient bitops */ -#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) - -#define BITMAP_BLOCK_SIZE 512 -#define BITMAP_BLOCK_SHIFT 9 - -/* how many blocks per chunk? (this is variable) */ -#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT) -#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT) -#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1) - -/* when hijacked, the counters and bits represent even larger "chunks" */ -/* there will be 1024 chunks represented by each counter in the page pointers */ -#define PAGEPTR_BLOCK_RATIO(bitmap) \ - (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1) -#define PAGEPTR_BLOCK_SHIFT(bitmap) \ - (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1) -#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1) - -/* - * on-disk bitmap: - * - * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap - * file a page at a time. There's a superblock at the start of the file. - */ - -/* map chunks (bits) to file pages - offset by the size of the superblock */ -#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3)) - -#endif - -/* - * bitmap structures: - */ - -#define BITMAP_MAGIC 0x6d746962 - -/* use these for bitmap->flags and bitmap->sb->state bit-fields */ -enum bitmap_state { - BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ - BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */ - BITMAP_HOSTENDIAN = 0x8000, -}; - -/* the superblock at the front of the bitmap file -- little endian */ -typedef struct bitmap_super_s { - __le32 magic; /* 0 BITMAP_MAGIC */ - __le32 version; /* 4 the bitmap major for now, could change... */ - __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */ - __le64 events; /* 24 event counter for the bitmap (1)*/ - __le64 events_cleared;/*32 event counter when last bit cleared (2) */ - __le64 sync_size; /* 40 the size of the md device's sync range(3) */ - __le32 state; /* 48 bitmap state information */ - __le32 chunksize; /* 52 the bitmap chunk size in bytes */ - __le32 daemon_sleep; /* 56 seconds between disk flushes */ - __le32 write_behind; /* 60 number of outstanding write-behind writes */ - - __u8 pad[256 - 64]; /* set to zero */ -} bitmap_super_t; - -/* notes: - * (1) This event counter is updated before the eventcounter in the md superblock - * When a bitmap is loaded, it is only accepted if this event counter is equal - * to, or one greater than, the event counter in the superblock. - * (2) This event counter is updated when the other one is *if*and*only*if* the - * array is not degraded. As bits are not cleared when the array is degraded, - * this represents the last time that any bits were cleared. - * If a device is being added that has an event count with this value or - * higher, it is accepted as conforming to the bitmap. - * (3)This is the number of sectors represented by the bitmap, and is the range that - * resync happens across. For raid1 and raid5/6 it is the size of individual - * devices. For raid10 it is the size of the array. - */ - -#ifdef __KERNEL__ - -/* the in-memory bitmap is represented by bitmap_pages */ -struct bitmap_page { - /* - * map points to the actual memory page - */ - char *map; - /* - * in emergencies (when map cannot be alloced), hijack the map - * pointer and use it as two counters itself - */ - unsigned int hijacked:1; - /* - * count of dirty bits on the page - */ - unsigned int count:31; -}; - -/* keep track of bitmap file pages that have pending writes on them */ -struct page_list { - struct list_head list; - struct page *page; -}; - -/* the main bitmap structure - one per mddev */ -struct bitmap { - struct bitmap_page *bp; - unsigned long pages; /* total number of pages in the bitmap */ - unsigned long missing_pages; /* number of pages not yet allocated */ - - mddev_t *mddev; /* the md device that the bitmap is for */ - - int counter_bits; /* how many bits per block counter */ - - /* bitmap chunksize -- how much data does each bit represent? */ - unsigned long chunksize; - unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ - unsigned long chunks; /* total number of data chunks for the array */ - - /* We hold a count on the chunk currently being synced, and drop - * it when the last block is started. If the resync is aborted - * midway, we need to be able to drop that count, so we remember - * the counted chunk.. - */ - unsigned long syncchunk; - - __u64 events_cleared; - int need_sync; - - /* bitmap spinlock */ - spinlock_t lock; - - long offset; /* offset from superblock if file is NULL */ - struct file *file; /* backing disk file */ - struct page *sb_page; /* cached copy of the bitmap file superblock */ - struct page **filemap; /* list of cache pages for the file */ - unsigned long *filemap_attr; /* attributes associated w/ filemap pages */ - unsigned long file_pages; /* number of pages in the file */ - int last_page_size; /* bytes in the last page */ - - unsigned long flags; - - int allclean; - - unsigned long max_write_behind; /* write-behind mode */ - atomic_t behind_writes; - - /* - * the bitmap daemon - periodically wakes up and sweeps the bitmap - * file, cleaning up bits and flushing out pages to disk as necessary - */ - unsigned long daemon_lastrun; /* jiffies of last run */ - unsigned long daemon_sleep; /* how many seconds between updates? */ - unsigned long last_end_sync; /* when we lasted called end_sync to - * update bitmap with resync progress */ - - atomic_t pending_writes; /* pending writes to the bitmap file */ - wait_queue_head_t write_wait; - wait_queue_head_t overflow_wait; - -}; - -/* the bitmap API */ - -/* these are used only by md/bitmap */ -int bitmap_create(mddev_t *mddev); -void bitmap_flush(mddev_t *mddev); -void bitmap_destroy(mddev_t *mddev); - -void bitmap_print_sb(struct bitmap *bitmap); -void bitmap_update_sb(struct bitmap *bitmap); - -int bitmap_setallbits(struct bitmap *bitmap); -void bitmap_write_all(struct bitmap *bitmap); - -void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e); - -/* these are exported */ -int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, - unsigned long sectors, int behind); -void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, - unsigned long sectors, int success, int behind); -int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded); -void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); -void bitmap_close_sync(struct bitmap *bitmap); -void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); - -void bitmap_unplug(struct bitmap *bitmap); -void bitmap_daemon_work(struct bitmap *bitmap); -#endif - -#endif diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h deleted file mode 100644 index f38b9c586af..00000000000 --- a/include/linux/raid/linear.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _LINEAR_H -#define _LINEAR_H - -#include <linux/raid/md.h> - -struct dev_info { - mdk_rdev_t *rdev; - sector_t num_sectors; - sector_t start_sector; -}; - -typedef struct dev_info dev_info_t; - -struct linear_private_data -{ - struct linear_private_data *prev; /* earlier version */ - dev_info_t **hash_table; - sector_t spacing; - sector_t array_sectors; - int sector_shift; /* shift before dividing - * by spacing - */ - dev_info_t disks[0]; -}; - - -typedef struct linear_private_data linear_conf_t; - -#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private) - -#endif diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h deleted file mode 100644 index 82bea14cae1..00000000000 --- a/include/linux/raid/md.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - md.h : Multiple Devices driver for Linux - Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman - Copyright (C) 1994-96 Marc ZYNGIER - <zyngier@ufr-info-p7.ibp.fr> or - <maz@gloups.fdn.fr> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - You should have received a copy of the GNU General Public License - (for example /usr/src/linux/COPYING); if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ - -#ifndef _MD_H -#define _MD_H - -#include <linux/blkdev.h> -#include <linux/seq_file.h> - -/* - * 'md_p.h' holds the 'physical' layout of RAID devices - * 'md_u.h' holds the user <=> kernel API - * - * 'md_k.h' holds kernel internal definitions - */ - -#include <linux/raid/md_p.h> -#include <linux/raid/md_u.h> -#include <linux/raid/md_k.h> - -#ifdef CONFIG_MD - -/* - * Different major versions are not compatible. - * Different minor versions are only downward compatible. - * Different patchlevel versions are downward and upward compatible. - */ -#define MD_MAJOR_VERSION 0 -#define MD_MINOR_VERSION 90 -/* - * MD_PATCHLEVEL_VERSION indicates kernel functionality. - * >=1 means different superblock formats are selectable using SET_ARRAY_INFO - * and major_version/minor_version accordingly - * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT - * in the super status byte - * >=3 means that bitmap superblock version 4 is supported, which uses - * little-ending representation rather than host-endian - */ -#define MD_PATCHLEVEL_VERSION 3 - -extern int mdp_major; - -extern int register_md_personality(struct mdk_personality *p); -extern int unregister_md_personality(struct mdk_personality *p); -extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), - mddev_t *mddev, const char *name); -extern void md_unregister_thread(mdk_thread_t *thread); -extern void md_wakeup_thread(mdk_thread_t *thread); -extern void md_check_recovery(mddev_t *mddev); -extern void md_write_start(mddev_t *mddev, struct bio *bi); -extern void md_write_end(mddev_t *mddev); -extern void md_done_sync(mddev_t *mddev, int blocks, int ok); -extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); - -extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, - sector_t sector, int size, struct page *page); -extern void md_super_wait(mddev_t *mddev); -extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, - struct page *page, int rw); -extern void md_do_sync(mddev_t *mddev); -extern void md_new_event(mddev_t *mddev); -extern int md_allow_write(mddev_t *mddev); -extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); - -#endif /* CONFIG_MD */ -#endif - diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h deleted file mode 100644 index 9743e4dbc91..00000000000 --- a/include/linux/raid/md_k.h +++ /dev/null @@ -1,402 +0,0 @@ -/* - md_k.h : kernel internal structure of the Linux MD driver - Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - You should have received a copy of the GNU General Public License - (for example /usr/src/linux/COPYING); if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ - -#ifndef _MD_K_H -#define _MD_K_H - -/* and dm-bio-list.h is not under include/linux because.... ??? */ -#include "../../../drivers/md/dm-bio-list.h" - -#ifdef CONFIG_BLOCK - -#define LEVEL_MULTIPATH (-4) -#define LEVEL_LINEAR (-1) -#define LEVEL_FAULTY (-5) - -/* we need a value for 'no level specified' and 0 - * means 'raid0', so we need something else. This is - * for internal use only - */ -#define LEVEL_NONE (-1000000) - -#define MaxSector (~(sector_t)0) - -typedef struct mddev_s mddev_t; -typedef struct mdk_rdev_s mdk_rdev_t; - -/* - * options passed in raidrun: - */ - -/* Currently this must fit in an 'int' */ -#define MAX_CHUNK_SIZE (1<<30) - -/* - * MD's 'extended' device - */ -struct mdk_rdev_s -{ - struct list_head same_set; /* RAID devices within the same set */ - - sector_t size; /* Device size (in blocks) */ - mddev_t *mddev; /* RAID array if running */ - long last_events; /* IO event timestamp */ - - struct block_device *bdev; /* block device handle */ - - struct page *sb_page; - int sb_loaded; - __u64 sb_events; - sector_t data_offset; /* start of data in array */ - sector_t sb_start; /* offset of the super block (in 512byte sectors) */ - int sb_size; /* bytes in the superblock */ - int preferred_minor; /* autorun support */ - - struct kobject kobj; - - /* A device can be in one of three states based on two flags: - * Not working: faulty==1 in_sync==0 - * Fully working: faulty==0 in_sync==1 - * Working, but not - * in sync with array - * faulty==0 in_sync==0 - * - * It can never have faulty==1, in_sync==1 - * This reduces the burden of testing multiple flags in many cases - */ - - unsigned long flags; -#define Faulty 1 /* device is known to have a fault */ -#define In_sync 2 /* device is in_sync with rest of array */ -#define WriteMostly 4 /* Avoid reading if at all possible */ -#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ -#define AllReserved 6 /* If whole device is reserved for - * one array */ -#define AutoDetected 7 /* added by auto-detect */ -#define Blocked 8 /* An error occured on an externally - * managed array, don't allow writes - * until it is cleared */ -#define StateChanged 9 /* Faulty or Blocked has changed during - * interrupt, so it needs to be - * notified by the thread */ - wait_queue_head_t blocked_wait; - - int desc_nr; /* descriptor index in the superblock */ - int raid_disk; /* role of device in array */ - int saved_raid_disk; /* role that device used to have in the - * array and could again if we did a partial - * resync from the bitmap - */ - sector_t recovery_offset;/* If this device has been partially - * recovered, this is where we were - * up to. - */ - - atomic_t nr_pending; /* number of pending requests. - * only maintained for arrays that - * support hot removal - */ - atomic_t read_errors; /* number of consecutive read errors that - * we have tried to ignore. - */ - atomic_t corrected_errors; /* number of corrected read errors, - * for reporting to userspace and storing - * in superblock. - */ - struct work_struct del_work; /* used for delayed sysfs removal */ - - struct sysfs_dirent *sysfs_state; /* handle for 'state' - * sysfs entry */ -}; - -struct mddev_s -{ - void *private; - struct mdk_personality *pers; - dev_t unit; - int md_minor; - struct list_head disks; - unsigned long flags; -#define MD_CHANGE_DEVS 0 /* Some device status has changed */ -#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ -#define MD_CHANGE_PENDING 2 /* superblock update in progress */ - - int ro; - - struct gendisk *gendisk; - - struct kobject kobj; - int hold_active; -#define UNTIL_IOCTL 1 -#define UNTIL_STOP 2 - - /* Superblock information */ - int major_version, - minor_version, - patch_version; - int persistent; - int external; /* metadata is - * managed externally */ - char metadata_type[17]; /* externally set*/ - int chunk_size; - time_t ctime, utime; - int level, layout; - char clevel[16]; - int raid_disks; - int max_disks; - sector_t size; /* used size of component devices */ - sector_t array_sectors; /* exported array size */ - __u64 events; - - char uuid[16]; - - /* If the array is being reshaped, we need to record the - * new shape and an indication of where we are up to. - * This is written to the superblock. - * If reshape_position is MaxSector, then no reshape is happening (yet). - */ - sector_t reshape_position; - int delta_disks, new_level, new_layout, new_chunk; - - struct mdk_thread_s *thread; /* management thread */ - struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ - sector_t curr_resync; /* last block scheduled */ - unsigned long resync_mark; /* a recent timestamp */ - sector_t resync_mark_cnt;/* blocks written at resync_mark */ - sector_t curr_mark_cnt; /* blocks scheduled now */ - - sector_t resync_max_sectors; /* may be set by personality */ - - sector_t resync_mismatches; /* count of sectors where - * parity/replica mismatch found - */ - - /* allow user-space to request suspension of IO to regions of the array */ - sector_t suspend_lo; - sector_t suspend_hi; - /* if zero, use the system-wide default */ - int sync_speed_min; - int sync_speed_max; - - /* resync even though the same disks are shared among md-devices */ - int parallel_resync; - - int ok_start_degraded; - /* recovery/resync flags - * NEEDED: we might need to start a resync/recover - * RUNNING: a thread is running, or about to be started - * SYNC: actually doing a resync, not a recovery - * RECOVER: doing recovery, or need to try it. - * INTR: resync needs to be aborted for some reason - * DONE: thread is done and is waiting to be reaped - * REQUEST: user-space has requested a sync (used with SYNC) - * CHECK: user-space request for for check-only, no repair - * RESHAPE: A reshape is happening - * - * If neither SYNC or RESHAPE are set, then it is a recovery. - */ -#define MD_RECOVERY_RUNNING 0 -#define MD_RECOVERY_SYNC 1 -#define MD_RECOVERY_RECOVER 2 -#define MD_RECOVERY_INTR 3 -#define MD_RECOVERY_DONE 4 -#define MD_RECOVERY_NEEDED 5 -#define MD_RECOVERY_REQUESTED 6 -#define MD_RECOVERY_CHECK 7 -#define MD_RECOVERY_RESHAPE 8 -#define MD_RECOVERY_FROZEN 9 - - unsigned long recovery; - int recovery_disabled; /* if we detect that recovery - * will always fail, set this - * so we don't loop trying */ - - int in_sync; /* know to not need resync */ - struct mutex reconfig_mutex; - atomic_t active; /* general refcount */ - atomic_t openers; /* number of active opens */ - - int changed; /* true if we might need to reread partition info */ - int degraded; /* whether md should consider - * adding a spare - */ - int barriers_work; /* initialised to true, cleared as soon - * as a barrier request to slave - * fails. Only supported - */ - struct bio *biolist; /* bios that need to be retried - * because BIO_RW_BARRIER is not supported - */ - - atomic_t recovery_active; /* blocks scheduled, but not written */ - wait_queue_head_t recovery_wait; - sector_t recovery_cp; - sector_t resync_min; /* user requested sync - * starts here */ - sector_t resync_max; /* resync should pause - * when it gets here */ - - struct sysfs_dirent *sysfs_state; /* handle for 'array_state' - * file in sysfs. - */ - struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ - - struct work_struct del_work; /* used for delayed sysfs removal */ - - spinlock_t write_lock; - wait_queue_head_t sb_wait; /* for waiting on superblock updates */ - atomic_t pending_writes; /* number of active superblock writes */ - - unsigned int safemode; /* if set, update "clean" superblock - * when no writes pending. - */ - unsigned int safemode_delay; - struct timer_list safemode_timer; - atomic_t writes_pending; - struct request_queue *queue; /* for plugging ... */ - - atomic_t write_behind; /* outstanding async IO */ - unsigned int max_write_behind; /* 0 = sync */ - - struct bitmap *bitmap; /* the bitmap for the device */ - struct file *bitmap_file; /* the bitmap file */ - long bitmap_offset; /* offset from superblock of - * start of bitmap. May be - * negative, but not '0' - */ - long default_bitmap_offset; /* this is the offset to use when - * hot-adding a bitmap. It should - * eventually be settable by sysfs. - */ - - struct list_head all_mddevs; -}; - - -static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) -{ - int faulty = test_bit(Faulty, &rdev->flags); - if (atomic_dec_and_test(&rdev->nr_pending) && faulty) - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); -} - -static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) -{ - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); -} - -struct mdk_personality -{ - char *name; - int level; - struct list_head list; - struct module *owner; - int (*make_request)(struct request_queue *q, struct bio *bio); - int (*run)(mddev_t *mddev); - int (*stop)(mddev_t *mddev); - void (*status)(struct seq_file *seq, mddev_t *mddev); - /* error_handler must set ->faulty and clear ->in_sync - * if appropriate, and should abort recovery if needed - */ - void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); - int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); - int (*hot_remove_disk) (mddev_t *mddev, int number); - int (*spare_active) (mddev_t *mddev); - sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); - int (*resize) (mddev_t *mddev, sector_t sectors); - int (*check_reshape) (mddev_t *mddev); - int (*start_reshape) (mddev_t *mddev); - int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); - /* quiesce moves between quiescence states - * 0 - fully active - * 1 - no new requests allowed - * others - reserved - */ - void (*quiesce) (mddev_t *mddev, int state); -}; - - -struct md_sysfs_entry { - struct attribute attr; - ssize_t (*show)(mddev_t *, char *); - ssize_t (*store)(mddev_t *, const char *, size_t); -}; - - -static inline char * mdname (mddev_t * mddev) -{ - return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; -} - -/* - * iterates through some rdev ringlist. It's safe to remove the - * current 'rdev'. Dont touch 'tmp' though. - */ -#define rdev_for_each_list(rdev, tmp, head) \ - list_for_each_entry_safe(rdev, tmp, head, same_set) - -/* - * iterates through the 'same array disks' ringlist - */ -#define rdev_for_each(rdev, tmp, mddev) \ - list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) - -#define rdev_for_each_rcu(rdev, mddev) \ - list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) - -typedef struct mdk_thread_s { - void (*run) (mddev_t *mddev); - mddev_t *mddev; - wait_queue_head_t wqueue; - unsigned long flags; - struct task_struct *tsk; - unsigned long timeout; -} mdk_thread_t; - -#define THREAD_WAKEUP 0 - -#define __wait_event_lock_irq(wq, condition, lock, cmd) \ -do { \ - wait_queue_t __wait; \ - init_waitqueue_entry(&__wait, current); \ - \ - add_wait_queue(&wq, &__wait); \ - for (;;) { \ - set_current_state(TASK_UNINTERRUPTIBLE); \ - if (condition) \ - break; \ - spin_unlock_irq(&lock); \ - cmd; \ - schedule(); \ - spin_lock_irq(&lock); \ - } \ - current->state = TASK_RUNNING; \ - remove_wait_queue(&wq, &__wait); \ -} while (0) - -#define wait_event_lock_irq(wq, condition, lock, cmd) \ -do { \ - if (condition) \ - break; \ - __wait_event_lock_irq(wq, condition, lock, cmd); \ -} while (0) - -static inline void safe_put_page(struct page *p) -{ - if (p) put_page(p); -} - -#endif /* CONFIG_BLOCK */ -#endif - diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h index 7192035fc4b..fb1abb3367e 100644 --- a/include/linux/raid/md_u.h +++ b/include/linux/raid/md_u.h @@ -15,6 +15,24 @@ #ifndef _MD_U_H #define _MD_U_H +/* + * Different major versions are not compatible. + * Different minor versions are only downward compatible. + * Different patchlevel versions are downward and upward compatible. + */ +#define MD_MAJOR_VERSION 0 +#define MD_MINOR_VERSION 90 +/* + * MD_PATCHLEVEL_VERSION indicates kernel functionality. + * >=1 means different superblock formats are selectable using SET_ARRAY_INFO + * and major_version/minor_version accordingly + * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT + * in the super status byte + * >=3 means that bitmap superblock version 4 is supported, which uses + * little-ending representation rather than host-endian + */ +#define MD_PATCHLEVEL_VERSION 3 + /* ioctls */ /* status */ @@ -46,6 +64,12 @@ #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) +/* 63 partitions with the alternate major number (mdp) */ +#define MdpMinorShift 6 +#ifdef __KERNEL__ +extern int mdp_major; +#endif + typedef struct mdu_version_s { int major; int minor; @@ -85,6 +109,17 @@ typedef struct mdu_array_info_s { } mdu_array_info_t; +/* non-obvious values for 'level' */ +#define LEVEL_MULTIPATH (-4) +#define LEVEL_LINEAR (-1) +#define LEVEL_FAULTY (-5) + +/* we need a value for 'no level specified' and 0 + * means 'raid0', so we need something else. This is + * for internal use only + */ +#define LEVEL_NONE (-1000000) + typedef struct mdu_disk_info_s { /* * configuration/status of one particular disk diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h deleted file mode 100644 index 6f53fc177a4..00000000000 --- a/include/linux/raid/multipath.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _MULTIPATH_H -#define _MULTIPATH_H - -#include <linux/raid/md.h> - -struct multipath_info { - mdk_rdev_t *rdev; -}; - -struct multipath_private_data { - mddev_t *mddev; - struct multipath_info *multipaths; - int raid_disks; - int working_disks; - spinlock_t device_lock; - struct list_head retry_list; - - mempool_t *pool; -}; - -typedef struct multipath_private_data multipath_conf_t; - -/* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private) - -/* - * this is our 'private' 'collective' MULTIPATH buffer head. - * it contains information about what kind of IO operations were started - * for this MULTIPATH operation, and about their status: - */ - -struct multipath_bh { - mddev_t *mddev; - struct bio *master_bio; - struct bio bio; - int path; - struct list_head retry_list; -}; -#endif diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h new file mode 100644 index 00000000000..d92480f8285 --- /dev/null +++ b/include/linux/raid/pq.h @@ -0,0 +1,132 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2003 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Boston MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +#ifndef LINUX_RAID_RAID6_H +#define LINUX_RAID_RAID6_H + +#ifdef __KERNEL__ + +/* Set to 1 to use kernel-wide empty_zero_page */ +#define RAID6_USE_EMPTY_ZERO_PAGE 0 +#include <linux/blkdev.h> + +/* We need a pre-zeroed page... if we don't want to use the kernel-provided + one define it here */ +#if RAID6_USE_EMPTY_ZERO_PAGE +# define raid6_empty_zero_page empty_zero_page +#else +extern const char raid6_empty_zero_page[PAGE_SIZE]; +#endif + +#else /* ! __KERNEL__ */ +/* Used for testing in user space */ + +#include <errno.h> +#include <inttypes.h> +#include <limits.h> +#include <stddef.h> +#include <sys/mman.h> +#include <sys/types.h> + +/* Not standard, but glibc defines it */ +#define BITS_PER_LONG __WORDSIZE + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +#ifndef PAGE_SIZE +# define PAGE_SIZE 4096 +#endif +extern const char raid6_empty_zero_page[PAGE_SIZE]; + +#define __init +#define __exit +#define __attribute_const__ __attribute__((const)) +#define noinline __attribute__((noinline)) + +#define preempt_enable() +#define preempt_disable() +#define cpu_has_feature(x) 1 +#define enable_kernel_altivec() +#define disable_kernel_altivec() + +#define EXPORT_SYMBOL(sym) +#define MODULE_LICENSE(licence) +#define subsys_initcall(x) +#define module_exit(x) +#endif /* __KERNEL__ */ + +/* Routine choices */ +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + int (*valid)(void); /* Returns 1 if this routine set is usable */ + const char *name; /* Name of this routine set */ + int prefer; /* Has special performance attribute */ +}; + +/* Selected algorithm */ +extern struct raid6_calls raid6_call; + +/* Algorithm list */ +extern const struct raid6_calls * const raid6_algos[]; +int raid6_select_algo(void); + +/* Return values from chk_syndrome */ +#define RAID6_OK 0 +#define RAID6_P_BAD 1 +#define RAID6_Q_BAD 2 +#define RAID6_PQ_BAD 3 + +/* Galois field tables */ +extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); +extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); +extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); +extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); + +/* Recovery routines */ +void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); +void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs); +void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); + +/* Some definitions to allow code to be compiled for testing in userspace */ +#ifndef __KERNEL__ + +# define jiffies raid6_jiffies() +# define printk printf +# define GFP_KERNEL 0 +# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \ + PROT_READ|PROT_WRITE, \ + MAP_PRIVATE|MAP_ANONYMOUS,\ + 0, 0)) +# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE) + +static inline void cpu_relax(void) +{ + /* Nothing */ +} + +#undef HZ +#define HZ 1000 +static inline uint32_t raid6_jiffies(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec*1000 + tv.tv_usec/1000; +} + +#endif /* ! __KERNEL__ */ + +#endif /* LINUX_RAID_RAID6_H */ diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h deleted file mode 100644 index fd42aa87c39..00000000000 --- a/include/linux/raid/raid0.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _RAID0_H -#define _RAID0_H - -#include <linux/raid/md.h> - -struct strip_zone -{ - sector_t zone_start; /* Zone offset in md_dev (in sectors) */ - sector_t dev_start; /* Zone offset in real dev (in sectors) */ - sector_t sectors; /* Zone size in sectors */ - int nb_dev; /* # of devices attached to the zone */ - mdk_rdev_t **dev; /* Devices attached to the zone */ -}; - -struct raid0_private_data -{ - struct strip_zone **hash_table; /* Table of indexes into strip_zone */ - struct strip_zone *strip_zone; - mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ - int nr_strip_zones; - - sector_t spacing; - int sector_shift; /* shift this before divide by spacing */ -}; - -typedef struct raid0_private_data raid0_conf_t; - -#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private) - -#endif diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h deleted file mode 100644 index 0a9ba7c3302..00000000000 --- a/include/linux/raid/raid1.h +++ /dev/null @@ -1,134 +0,0 @@ -#ifndef _RAID1_H -#define _RAID1_H - -#include <linux/raid/md.h> - -typedef struct mirror_info mirror_info_t; - -struct mirror_info { - mdk_rdev_t *rdev; - sector_t head_position; -}; - -/* - * memory pools need a pointer to the mddev, so they can force an unplug - * when memory is tight, and a count of the number of drives that the - * pool was allocated for, so they know how much to allocate and free. - * mddev->raid_disks cannot be used, as it can change while a pool is active - * These two datums are stored in a kmalloced struct. - */ - -struct pool_info { - mddev_t *mddev; - int raid_disks; -}; - - -typedef struct r1bio_s r1bio_t; - -struct r1_private_data_s { - mddev_t *mddev; - mirror_info_t *mirrors; - int raid_disks; - int last_used; - sector_t next_seq_sect; - spinlock_t device_lock; - - struct list_head retry_list; - /* queue pending writes and submit them on unplug */ - struct bio_list pending_bio_list; - /* queue of writes that have been unplugged */ - struct bio_list flushing_bio_list; - - /* for use when syncing mirrors: */ - - spinlock_t resync_lock; - int nr_pending; - int nr_waiting; - int nr_queued; - int barrier; - sector_t next_resync; - int fullsync; /* set to 1 if a full sync is needed, - * (fresh device added). - * Cleared when a sync completes. - */ - - wait_queue_head_t wait_barrier; - - struct pool_info *poolinfo; - - struct page *tmppage; - - mempool_t *r1bio_pool; - mempool_t *r1buf_pool; -}; - -typedef struct r1_private_data_s conf_t; - -/* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((conf_t *) mddev->private) - -/* - * this is our 'private' RAID1 bio. - * - * it contains information about what kind of IO operations were started - * for this RAID1 operation, and about their status: - */ - -struct r1bio_s { - atomic_t remaining; /* 'have we finished' count, - * used from IRQ handlers - */ - atomic_t behind_remaining; /* number of write-behind ios remaining - * in this BehindIO request - */ - sector_t sector; - int sectors; - unsigned long state; - mddev_t *mddev; - /* - * original bio going to /dev/mdx - */ - struct bio *master_bio; - /* - * if the IO is in READ direction, then this is where we read - */ - int read_disk; - - struct list_head retry_list; - struct bitmap_update *bitmap_update; - /* - * if the IO is in WRITE direction, then multiple bios are used. - * We choose the number when they are allocated. - */ - struct bio *bios[0]; - /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ -}; - -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) - -/* bits for r1bio.state */ -#define R1BIO_Uptodate 0 -#define R1BIO_IsSync 1 -#define R1BIO_Degraded 2 -#define R1BIO_BehindIO 3 -#define R1BIO_Barrier 4 -#define R1BIO_BarrierRetry 5 -/* For write-behind requests, we call bi_end_io when - * the last non-write-behind device completes, providing - * any write was successful. Otherwise we call when - * any write-behind write succeeds, otherwise we call - * with failure when last write completes (and all failed). - * Record that bi_end_io was called with this flag... - */ -#define R1BIO_Returned 6 - -#endif diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h deleted file mode 100644 index e9091cfeb28..00000000000 --- a/include/linux/raid/raid10.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef _RAID10_H -#define _RAID10_H - -#include <linux/raid/md.h> - -typedef struct mirror_info mirror_info_t; - -struct mirror_info { - mdk_rdev_t *rdev; - sector_t head_position; -}; - -typedef struct r10bio_s r10bio_t; - -struct r10_private_data_s { - mddev_t *mddev; - mirror_info_t *mirrors; - int raid_disks; - spinlock_t device_lock; - - /* geometry */ - int near_copies; /* number of copies layed out raid0 style */ - int far_copies; /* number of copies layed out - * at large strides across drives - */ - int far_offset; /* far_copies are offset by 1 stripe - * instead of many - */ - int copies; /* near_copies * far_copies. - * must be <= raid_disks - */ - sector_t stride; /* distance between far copies. - * This is size / far_copies unless - * far_offset, in which case it is - * 1 stripe. - */ - - int chunk_shift; /* shift from chunks to sectors */ - sector_t chunk_mask; - - struct list_head retry_list; - /* queue pending writes and submit them on unplug */ - struct bio_list pending_bio_list; - - - spinlock_t resync_lock; - int nr_pending; - int nr_waiting; - int nr_queued; - int barrier; - sector_t next_resync; - int fullsync; /* set to 1 if a full sync is needed, - * (fresh device added). - * Cleared when a sync completes. - */ - - wait_queue_head_t wait_barrier; - - mempool_t *r10bio_pool; - mempool_t *r10buf_pool; - struct page *tmppage; -}; - -typedef struct r10_private_data_s conf_t; - -/* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((conf_t *) mddev->private) - -/* - * this is our 'private' RAID10 bio. - * - * it contains information about what kind of IO operations were started - * for this RAID10 operation, and about their status: - */ - -struct r10bio_s { - atomic_t remaining; /* 'have we finished' count, - * used from IRQ handlers - */ - sector_t sector; /* virtual sector number */ - int sectors; - unsigned long state; - mddev_t *mddev; - /* - * original bio going to /dev/mdx - */ - struct bio *master_bio; - /* - * if the IO is in READ direction, then this is where we read - */ - int read_slot; - - struct list_head retry_list; - /* - * if the IO is in WRITE direction, then multiple bios are used, - * one for each copy. - * When resyncing we also use one for each copy. - * When reconstructing, we use 2 bios, one for read, one for write. - * We choose the number when they are allocated. - */ - struct { - struct bio *bio; - sector_t addr; - int devnum; - } devs[0]; -}; - -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) - -/* bits for r10bio.state */ -#define R10BIO_Uptodate 0 -#define R10BIO_IsSync 1 -#define R10BIO_IsRecover 2 -#define R10BIO_Degraded 3 -#endif diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h deleted file mode 100644 index 3b267279245..00000000000 --- a/include/linux/raid/raid5.h +++ /dev/null @@ -1,402 +0,0 @@ -#ifndef _RAID5_H -#define _RAID5_H - -#include <linux/raid/md.h> -#include <linux/raid/xor.h> - -/* - * - * Each stripe contains one buffer per disc. Each buffer can be in - * one of a number of states stored in "flags". Changes between - * these states happen *almost* exclusively under a per-stripe - * spinlock. Some very specific changes can happen in bi_end_io, and - * these are not protected by the spin lock. - * - * The flag bits that are used to represent these states are: - * R5_UPTODATE and R5_LOCKED - * - * State Empty == !UPTODATE, !LOCK - * We have no data, and there is no active request - * State Want == !UPTODATE, LOCK - * A read request is being submitted for this block - * State Dirty == UPTODATE, LOCK - * Some new data is in this buffer, and it is being written out - * State Clean == UPTODATE, !LOCK - * We have valid data which is the same as on disc - * - * The possible state transitions are: - * - * Empty -> Want - on read or write to get old data for parity calc - * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE) - * Empty -> Clean - on compute_block when computing a block for failed drive - * Want -> Empty - on failed read - * Want -> Clean - on successful completion of read request - * Dirty -> Clean - on successful completion of write request - * Dirty -> Clean - on failed write - * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW) - * - * The Want->Empty, Want->Clean, Dirty->Clean, transitions - * all happen in b_end_io at interrupt time. - * Each sets the Uptodate bit before releasing the Lock bit. - * This leaves one multi-stage transition: - * Want->Dirty->Clean - * This is safe because thinking that a Clean buffer is actually dirty - * will at worst delay some action, and the stripe will be scheduled - * for attention after the transition is complete. - * - * There is one possibility that is not covered by these states. That - * is if one drive has failed and there is a spare being rebuilt. We - * can't distinguish between a clean block that has been generated - * from parity calculations, and a clean block that has been - * successfully written to the spare ( or to parity when resyncing). - * To distingush these states we have a stripe bit STRIPE_INSYNC that - * is set whenever a write is scheduled to the spare, or to the parity - * disc if there is no spare. A sync request clears this bit, and - * when we find it set with no buffers locked, we know the sync is - * complete. - * - * Buffers for the md device that arrive via make_request are attached - * to the appropriate stripe in one of two lists linked on b_reqnext. - * One list (bh_read) for read requests, one (bh_write) for write. - * There should never be more than one buffer on the two lists - * together, but we are not guaranteed of that so we allow for more. - * - * If a buffer is on the read list when the associated cache buffer is - * Uptodate, the data is copied into the read buffer and it's b_end_io - * routine is called. This may happen in the end_request routine only - * if the buffer has just successfully been read. end_request should - * remove the buffers from the list and then set the Uptodate bit on - * the buffer. Other threads may do this only if they first check - * that the Uptodate bit is set. Once they have checked that they may - * take buffers off the read queue. - * - * When a buffer on the write list is committed for write it is copied - * into the cache buffer, which is then marked dirty, and moved onto a - * third list, the written list (bh_written). Once both the parity - * block and the cached buffer are successfully written, any buffer on - * a written list can be returned with b_end_io. - * - * The write list and read list both act as fifos. The read list is - * protected by the device_lock. The write and written lists are - * protected by the stripe lock. The device_lock, which can be - * claimed while the stipe lock is held, is only for list - * manipulations and will only be held for a very short time. It can - * be claimed from interrupts. - * - * - * Stripes in the stripe cache can be on one of two lists (or on - * neither). The "inactive_list" contains stripes which are not - * currently being used for any request. They can freely be reused - * for another stripe. The "handle_list" contains stripes that need - * to be handled in some way. Both of these are fifo queues. Each - * stripe is also (potentially) linked to a hash bucket in the hash - * table so that it can be found by sector number. Stripes that are - * not hashed must be on the inactive_list, and will normally be at - * the front. All stripes start life this way. - * - * The inactive_list, handle_list and hash bucket lists are all protected by the - * device_lock. - * - stripes on the inactive_list never have their stripe_lock held. - * - stripes have a reference counter. If count==0, they are on a list. - * - If a stripe might need handling, STRIPE_HANDLE is set. - * - When refcount reaches zero, then if STRIPE_HANDLE it is put on - * handle_list else inactive_list - * - * This, combined with the fact that STRIPE_HANDLE is only ever - * cleared while a stripe has a non-zero count means that if the - * refcount is 0 and STRIPE_HANDLE is set, then it is on the - * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then - * the stripe is on inactive_list. - * - * The possible transitions are: - * activate an unhashed/inactive stripe (get_active_stripe()) - * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev - * activate a hashed, possibly active stripe (get_active_stripe()) - * lockdev check-hash if(!cnt++)unlink-stripe unlockdev - * attach a request to an active stripe (add_stripe_bh()) - * lockdev attach-buffer unlockdev - * handle a stripe (handle_stripe()) - * lockstripe clrSTRIPE_HANDLE ... - * (lockdev check-buffers unlockdev) .. - * change-state .. - * record io/ops needed unlockstripe schedule io/ops - * release an active stripe (release_stripe()) - * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev - * - * The refcount counts each thread that have activated the stripe, - * plus raid5d if it is handling it, plus one for each active request - * on a cached buffer, and plus one if the stripe is undergoing stripe - * operations. - * - * Stripe operations are performed outside the stripe lock, - * the stripe operations are: - * -copying data between the stripe cache and user application buffers - * -computing blocks to save a disk access, or to recover a missing block - * -updating the parity on a write operation (reconstruct write and - * read-modify-write) - * -checking parity correctness - * -running i/o to disk - * These operations are carried out by raid5_run_ops which uses the async_tx - * api to (optionally) offload operations to dedicated hardware engines. - * When requesting an operation handle_stripe sets the pending bit for the - * operation and increments the count. raid5_run_ops is then run whenever - * the count is non-zero. - * There are some critical dependencies between the operations that prevent some - * from being requested while another is in flight. - * 1/ Parity check operations destroy the in cache version of the parity block, - * so we prevent parity dependent operations like writes and compute_blocks - * from starting while a check is in progress. Some dma engines can perform - * the check without damaging the parity block, in these cases the parity - * block is re-marked up to date (assuming the check was successful) and is - * not re-read from disk. - * 2/ When a write operation is requested we immediately lock the affected - * blocks, and mark them as not up to date. This causes new read requests - * to be held off, as well as parity checks and compute block operations. - * 3/ Once a compute block operation has been requested handle_stripe treats - * that block as if it is up to date. raid5_run_ops guaruntees that any - * operation that is dependent on the compute block result is initiated after - * the compute block completes. - */ - -/* - * Operations state - intermediate states that are visible outside of sh->lock - * In general _idle indicates nothing is running, _run indicates a data - * processing operation is active, and _result means the data processing result - * is stable and can be acted upon. For simple operations like biofill and - * compute that only have an _idle and _run state they are indicated with - * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) - */ -/** - * enum check_states - handles syncing / repairing a stripe - * @check_state_idle - check operations are quiesced - * @check_state_run - check operation is running - * @check_state_result - set outside lock when check result is valid - * @check_state_compute_run - check failed and we are repairing - * @check_state_compute_result - set outside lock when compute result is valid - */ -enum check_states { - check_state_idle = 0, - check_state_run, /* parity check */ - check_state_check_result, - check_state_compute_run, /* parity repair */ - check_state_compute_result, -}; - -/** - * enum reconstruct_states - handles writing or expanding a stripe - */ -enum reconstruct_states { - reconstruct_state_idle = 0, - reconstruct_state_prexor_drain_run, /* prexor-write */ - reconstruct_state_drain_run, /* write */ - reconstruct_state_run, /* expand */ - reconstruct_state_prexor_drain_result, - reconstruct_state_drain_result, - reconstruct_state_result, -}; - -struct stripe_head { - struct hlist_node hash; - struct list_head lru; /* inactive_list or handle_list */ - struct raid5_private_data *raid_conf; - sector_t sector; /* sector of this row */ - int pd_idx; /* parity disk index */ - unsigned long state; /* state flags */ - atomic_t count; /* nr of active thread/requests */ - spinlock_t lock; - int bm_seq; /* sequence number for bitmap flushes */ - int disks; /* disks in stripe */ - enum check_states check_state; - enum reconstruct_states reconstruct_state; - /* stripe_operations - * @target - STRIPE_OP_COMPUTE_BLK target - */ - struct stripe_operations { - int target; - u32 zero_sum_result; - } ops; - struct r5dev { - struct bio req; - struct bio_vec vec; - struct page *page; - struct bio *toread, *read, *towrite, *written; - sector_t sector; /* sector of this page */ - unsigned long flags; - } dev[1]; /* allocated with extra space depending of RAID geometry */ -}; - -/* stripe_head_state - collects and tracks the dynamic state of a stripe_head - * for handle_stripe. It is only valid under spin_lock(sh->lock); - */ -struct stripe_head_state { - int syncing, expanding, expanded; - int locked, uptodate, to_read, to_write, failed, written; - int to_fill, compute, req_compute, non_overwrite; - int failed_num; - unsigned long ops_request; -}; - -/* r6_state - extra state data only relevant to r6 */ -struct r6_state { - int p_failed, q_failed, qd_idx, failed_num[2]; -}; - -/* Flags */ -#define R5_UPTODATE 0 /* page contains current data */ -#define R5_LOCKED 1 /* IO has been submitted on "req" */ -#define R5_OVERWRITE 2 /* towrite covers whole page */ -/* and some that are internal to handle_stripe */ -#define R5_Insync 3 /* rdev && rdev->in_sync at start */ -#define R5_Wantread 4 /* want to schedule a read */ -#define R5_Wantwrite 5 -#define R5_Overlap 7 /* There is a pending overlapping request on this block */ -#define R5_ReadError 8 /* seen a read error here recently */ -#define R5_ReWrite 9 /* have tried to over-write the readerror */ - -#define R5_Expanded 10 /* This block now has post-expand data */ -#define R5_Wantcompute 11 /* compute_block in progress treat as - * uptodate - */ -#define R5_Wantfill 12 /* dev->toread contains a bio that needs - * filling - */ -#define R5_Wantdrain 13 /* dev->towrite needs to be drained */ -/* - * Write method - */ -#define RECONSTRUCT_WRITE 1 -#define READ_MODIFY_WRITE 2 -/* not a write method, but a compute_parity mode */ -#define CHECK_PARITY 3 - -/* - * Stripe state - */ -#define STRIPE_HANDLE 2 -#define STRIPE_SYNCING 3 -#define STRIPE_INSYNC 4 -#define STRIPE_PREREAD_ACTIVE 5 -#define STRIPE_DELAYED 6 -#define STRIPE_DEGRADED 7 -#define STRIPE_BIT_DELAY 8 -#define STRIPE_EXPANDING 9 -#define STRIPE_EXPAND_SOURCE 10 -#define STRIPE_EXPAND_READY 11 -#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ -#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ -#define STRIPE_BIOFILL_RUN 14 -#define STRIPE_COMPUTE_RUN 15 -/* - * Operation request flags - */ -#define STRIPE_OP_BIOFILL 0 -#define STRIPE_OP_COMPUTE_BLK 1 -#define STRIPE_OP_PREXOR 2 -#define STRIPE_OP_BIODRAIN 3 -#define STRIPE_OP_POSTXOR 4 -#define STRIPE_OP_CHECK 5 - -/* - * Plugging: - * - * To improve write throughput, we need to delay the handling of some - * stripes until there has been a chance that several write requests - * for the one stripe have all been collected. - * In particular, any write request that would require pre-reading - * is put on a "delayed" queue until there are no stripes currently - * in a pre-read phase. Further, if the "delayed" queue is empty when - * a stripe is put on it then we "plug" the queue and do not process it - * until an unplug call is made. (the unplug_io_fn() is called). - * - * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add - * it to the count of prereading stripes. - * When write is initiated, or the stripe refcnt == 0 (just in case) we - * clear the PREREAD_ACTIVE flag and decrement the count - * Whenever the 'handle' queue is empty and the device is not plugged, we - * move any strips from delayed to handle and clear the DELAYED flag and set - * PREREAD_ACTIVE. - * In stripe_handle, if we find pre-reading is necessary, we do it if - * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. - * HANDLE gets cleared if stripe_handle leave nothing locked. - */ - - -struct disk_info { - mdk_rdev_t *rdev; -}; - -struct raid5_private_data { - struct hlist_head *stripe_hashtbl; - mddev_t *mddev; - struct disk_info *spare; - int chunk_size, level, algorithm; - int max_degraded; - int raid_disks; - int max_nr_stripes; - - /* used during an expand */ - sector_t expand_progress; /* MaxSector when no expand happening */ - sector_t expand_lo; /* from here up to expand_progress it out-of-bounds - * as we haven't flushed the metadata yet - */ - int previous_raid_disks; - - struct list_head handle_list; /* stripes needing handling */ - struct list_head hold_list; /* preread ready stripes */ - struct list_head delayed_list; /* stripes that have plugged requests */ - struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ - struct bio *retry_read_aligned; /* currently retrying aligned bios */ - struct bio *retry_read_aligned_list; /* aligned bios retry list */ - atomic_t preread_active_stripes; /* stripes with scheduled io */ - atomic_t active_aligned_reads; - atomic_t pending_full_writes; /* full write backlog */ - int bypass_count; /* bypassed prereads */ - int bypass_threshold; /* preread nice */ - struct list_head *last_hold; /* detect hold_list promotions */ - - atomic_t reshape_stripes; /* stripes with pending writes for reshape */ - /* unfortunately we need two cache names as we temporarily have - * two caches. - */ - int active_name; - char cache_name[2][20]; - struct kmem_cache *slab_cache; /* for allocating stripes */ - - int seq_flush, seq_write; - int quiesce; - - int fullsync; /* set to 1 if a full sync is needed, - * (fresh device added). - * Cleared when a sync completes. - */ - - struct page *spare_page; /* Used when checking P/Q in raid6 */ - - /* - * Free stripes pool - */ - atomic_t active_stripes; - struct list_head inactive_list; - wait_queue_head_t wait_for_stripe; - wait_queue_head_t wait_for_overlap; - int inactive_blocked; /* release of inactive stripes blocked, - * waiting for 25% to be free - */ - int pool_size; /* number of disks in stripeheads in pool */ - spinlock_t device_lock; - struct disk_info *disks; -}; - -typedef struct raid5_private_data raid5_conf_t; - -#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private) - -/* - * Our supported algorithms - */ -#define ALGORITHM_LEFT_ASYMMETRIC 0 -#define ALGORITHM_RIGHT_ASYMMETRIC 1 -#define ALGORITHM_LEFT_SYMMETRIC 2 -#define ALGORITHM_RIGHT_SYMMETRIC 3 - -#endif diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 3e120587ead..5a210959e3f 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -1,8 +1,6 @@ #ifndef _XOR_H #define _XOR_H -#include <linux/raid/md.h> - #define MAX_XOR_BLOCKS 4 extern void xor_blocks(unsigned int count, unsigned int bytes, diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 80044a4f3ab..bfd92e1e5d2 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -36,7 +36,6 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> -#include <linux/percpu.h> #include <linux/cpumask.h> #include <linux/seqlock.h> @@ -108,25 +107,14 @@ struct rcu_data { struct rcu_head barrier; }; -DECLARE_PER_CPU(struct rcu_data, rcu_data); -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); - /* * Increment the quiescent state counter. * The counter is a bit degenerated: We do not need to know * how many quiescent states passed, just if there was at least * one since the start of the grace period. Thus just a flag. */ -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; -} -static inline void rcu_bh_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; -} +extern void rcu_qsctr_inc(int cpu); +extern void rcu_bh_qsctr_inc(int cpu); extern int rcu_pending(int cpu); extern int rcu_needs_cpu(int cpu); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 528343e6da5..15fbb3ca634 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -36,7 +36,6 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> -#include <linux/percpu.h> #include <linux/cpumask.h> #include <linux/seqlock.h> #include <linux/lockdep.h> diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 74304b4538d..fce522782ff 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -36,34 +36,19 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> -#include <linux/percpu.h> +#include <linux/smp.h> #include <linux/cpumask.h> #include <linux/seqlock.h> -struct rcu_dyntick_sched { - int dynticks; - int dynticks_snap; - int sched_qs; - int sched_qs_snap; - int sched_dynticks_snap; -}; - -DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); - -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); - - rdssp->sched_qs++; -} -#define rcu_bh_qsctr_inc(cpu) +extern void rcu_qsctr_inc(int cpu); +static inline void rcu_bh_qsctr_inc(int cpu) { } /* * Someone might want to pass call_rcu_bh as a function pointer. * So this needs to just be a rename and not a macro function. * (no parentheses) */ -#define call_rcu_bh call_rcu +#define call_rcu_bh call_rcu /** * call_rcu_sched - Queue RCU callback for invocation after sched grace period. @@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); struct softirq_action; #ifdef CONFIG_NO_HZ - -static inline void rcu_enter_nohz(void) -{ - static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - - smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ - __get_cpu_var(rcu_dyntick_sched).dynticks++; - WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); -} - -static inline void rcu_exit_nohz(void) -{ - static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - - __get_cpu_var(rcu_dyntick_sched).dynticks++; - smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ - WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), - &rs); -} - -#else /* CONFIG_NO_HZ */ -#define rcu_enter_nohz() do { } while (0) -#define rcu_exit_nohz() do { } while (0) -#endif /* CONFIG_NO_HZ */ +extern void rcu_enter_nohz(void); +extern void rcu_exit_nohz(void); +#else +# define rcu_enter_nohz() do { } while (0) +# define rcu_exit_nohz() do { } while (0) +#endif /* * A context switch is a grace period for rcupreempt synchronize_rcu() diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index a722fb67bb2..0cdda00f2b2 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -33,7 +33,6 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> -#include <linux/percpu.h> #include <linux/cpumask.h> #include <linux/seqlock.h> @@ -236,30 +235,8 @@ struct rcu_state { #endif /* #ifdef CONFIG_NO_HZ */ }; -extern struct rcu_state rcu_state; -DECLARE_PER_CPU(struct rcu_data, rcu_data); - -extern struct rcu_state rcu_bh_state; -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); - -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. - */ -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; - rdp->passed_quiesc_completed = rdp->completed; -} -static inline void rcu_bh_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; - rdp->passed_quiesc_completed = rdp->completed; -} +extern void rcu_qsctr_inc(int cpu); +extern void rcu_bh_qsctr_inc(int cpu); extern int rcu_pending(int cpu); extern int rcu_needs_cpu(int cpu); diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h index e84b0a9feda..a6d014005d4 100644 --- a/include/linux/regulator/bq24022.h +++ b/include/linux/regulator/bq24022.h @@ -10,6 +10,8 @@ * */ +struct regulator_init_data; + /** * bq24022_mach_info - platform data for bq24022 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging @@ -18,4 +20,5 @@ struct bq24022_mach_info { int gpio_nce; int gpio_iset2; + struct regulator_init_data *init_data; }; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 801bf77ff4e..277f4b964df 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -3,7 +3,7 @@ * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * - * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -88,6 +88,7 @@ * FAIL Regulator output has failed. * OVER_TEMP Regulator over temp. * FORCE_DISABLE Regulator shut down by software. + * VOLTAGE_CHANGE Regulator voltage changed. * * NOTE: These events can be OR'ed together when passed into handler. */ @@ -98,6 +99,7 @@ #define REGULATOR_EVENT_FAIL 0x08 #define REGULATOR_EVENT_OVER_TEMP 0x10 #define REGULATOR_EVENT_FORCE_DISABLE 0x20 +#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 struct regulator; @@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers, void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers); +int regulator_count_voltages(struct regulator *regulator); +int regulator_list_voltage(struct regulator *regulator, unsigned selector); int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); int regulator_get_voltage(struct regulator *regulator); int regulator_set_current_limit(struct regulator *regulator, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 2dae05705f1..4848d8dacd9 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -3,7 +3,7 @@ * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * - * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -21,25 +21,38 @@ struct regulator_dev; struct regulator_init_data; +enum regulator_status { + REGULATOR_STATUS_OFF, + REGULATOR_STATUS_ON, + REGULATOR_STATUS_ERROR, + /* fast/normal/idle/standby are flavors of "on" */ + REGULATOR_STATUS_FAST, + REGULATOR_STATUS_NORMAL, + REGULATOR_STATUS_IDLE, + REGULATOR_STATUS_STANDBY, +}; + /** * struct regulator_ops - regulator operations. * - * This struct describes regulator operations which can be implemented by - * regulator chip drivers. - * - * @enable: Enable the regulator. - * @disable: Disable the regulator. + * @enable: Configure the regulator as enabled. + * @disable: Configure the regulator as disabled. * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. * * @set_voltage: Set the voltage for the regulator within the range specified. * The driver should select the voltage closest to min_uV. * @get_voltage: Return the currently configured voltage for the regulator. + * @list_voltage: Return one of the supported voltages, in microvolts; zero + * if the selector indicates a voltage that is unusable on this system; + * or negative errno. Selectors range from zero to one less than + * regulator_desc.n_voltages. Voltages may be reported in any order. * * @set_current_limit: Configure a limit for a current-limited regulator. - * @get_current_limit: Get the limit for a current-limited regulator. + * @get_current_limit: Get the configured limit for a current-limited regulator. * - * @set_mode: Set the operating mode for the regulator. - * @get_mode: Get the current operating mode for the regulator. + * @get_mode: Get the configured operating mode for the regulator. + * @get_status: Return actual (not as-configured) status of regulator, as a + * REGULATOR_STATUS value (or negative errno) * @get_optimum_mode: Get the most efficient operating mode for the regulator * when running with the specified parameters. * @@ -51,9 +64,15 @@ struct regulator_init_data; * suspended. * @set_suspend_mode: Set the operating mode for the regulator when the * system is suspended. + * + * This struct describes regulator operations which can be implemented by + * regulator chip drivers. */ struct regulator_ops { + /* enumerate supported voltages */ + int (*list_voltage) (struct regulator_dev *, unsigned selector); + /* get/set regulator voltage */ int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); int (*get_voltage) (struct regulator_dev *); @@ -72,6 +91,13 @@ struct regulator_ops { int (*set_mode) (struct regulator_dev *, unsigned int mode); unsigned int (*get_mode) (struct regulator_dev *); + /* report regulator status ... most other accessors report + * control inputs, this reports results of combining inputs + * from Linux (and other sources) with the actual load. + * returns REGULATOR_STATUS_* or negative errno. + */ + int (*get_status)(struct regulator_dev *); + /* get most efficient regulator operating mode for load */ unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, int output_uV, int load_uA); @@ -106,6 +132,7 @@ enum regulator_type { * * @name: Identifying name for the regulator. * @id: Numerical identifier for the regulator. + * @n_voltages: Number of selectors available for ops.list_voltage(). * @ops: Regulator operations table. * @irq: Interrupt number for the regulator. * @type: Indicates if the regulator is a voltage or current regulator. @@ -114,14 +141,48 @@ enum regulator_type { struct regulator_desc { const char *name; int id; + unsigned n_voltages; struct regulator_ops *ops; int irq; enum regulator_type type; struct module *owner; }; +/* + * struct regulator_dev + * + * Voltage / Current regulator class device. One for each + * regulator. + * + * This should *not* be used directly by anything except the regulator + * core and notification injection (which should take the mutex and do + * no other direct access). + */ +struct regulator_dev { + struct regulator_desc *desc; + int use_count; + + /* lists we belong to */ + struct list_head list; /* list of all regulators */ + struct list_head slist; /* list of supplied regulators */ + + /* lists we own */ + struct list_head consumer_list; /* consumers we supply */ + struct list_head supply_list; /* regulators we supply */ + + struct blocking_notifier_head notifier; + struct mutex mutex; /* consumer lock */ + struct module *owner; + struct device dev; + struct regulation_constraints *constraints; + struct regulator_dev *supply; /* for tree */ + + void *reg_data; /* regulator_dev data */ +}; + struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, - struct device *dev, void *driver_data); + struct device *dev, struct regulator_init_data *init_data, + void *driver_data); void regulator_unregister(struct regulator_dev *rdev); int regulator_notifier_call_chain(struct regulator_dev *rdev, diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 1387a5d2190..91b4da31f1b 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -14,9 +14,12 @@ #ifndef __REGULATOR_FIXED_H #define __REGULATOR_FIXED_H +struct regulator_init_data; + struct fixed_voltage_config { const char *supply_name; int microvolts; + struct regulator_init_data *init_data; }; #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 3794773b23d..bac64fa390f 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -3,7 +3,7 @@ * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * - * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -73,7 +73,9 @@ struct regulator_state { * * @always_on: Set if the regulator should never be disabled. * @boot_on: Set if the regulator is enabled when the system is initially - * started. + * started. If the regulator is not enabled by the hardware or + * bootloader then it will be enabled when the constraints are + * applied. * @apply_uV: Apply the voltage constraint when initialising. * * @input_uV: Input voltage for regulator when supplied by another regulator. @@ -83,6 +85,7 @@ struct regulator_state { * @state_standby: State for regulator when system is suspended in standby * mode. * @initial_state: Suspend state to set by default. + * @initial_mode: Mode to set at startup. */ struct regulation_constraints { @@ -111,6 +114,9 @@ struct regulation_constraints { struct regulator_state state_standby; suspend_state_t initial_state; /* suspend state to set at init */ + /* mode to set on startup */ + unsigned int initial_mode; + /* constriant flags */ unsigned always_on:1; /* regulator never off when system is on */ unsigned boot_on:1; /* bootloader/firmware enabled regulator */ @@ -160,4 +166,6 @@ struct regulator_init_data { int regulator_suspend_prepare(suspend_state_t state); +void regulator_has_full_constraints(void); + #endif diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b3b35966008..e1b7b217388 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -8,7 +8,7 @@ struct ring_buffer; struct ring_buffer_iter; /* - * Don't reference this struct directly, use functions below. + * Don't refer to this struct directly, use functions below. */ struct ring_buffer_event { u32 type:2, len:3, time_delta:27; @@ -18,10 +18,13 @@ struct ring_buffer_event { /** * enum ring_buffer_type - internal ring buffer types * - * @RINGBUF_TYPE_PADDING: Left over page padding - * array is ignored - * size is variable depending on how much + * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event + * If time_delta is 0: + * array is ignored + * size is variable depending on how much * padding is needed + * If time_delta is non zero: + * everything else same as RINGBUF_TYPE_DATA * * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta * array[0] = time delta (28 .. 59) @@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) return event->time_delta; } +void ring_buffer_event_discard(struct ring_buffer_event *event); + /* * size is in bytes for each per CPU buffer. */ @@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); -struct ring_buffer_event * -ring_buffer_lock_reserve(struct ring_buffer *buffer, - unsigned long length, - unsigned long *flags); +struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, + unsigned long length); int ring_buffer_unlock_commit(struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags); + struct ring_buffer_event *event); int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data); @@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer); unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); -u64 ring_buffer_time_stamp(int cpu); -void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); +u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); +void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, + int cpu, u64 *ts); +void ring_buffer_set_clock(struct ring_buffer *buffer, + u64 (*clock)(void)); + +size_t ring_buffer_page_len(void *page); -void tracing_on(void); -void tracing_off(void); -void tracing_off_permanent(void); void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); -int ring_buffer_read_page(struct ring_buffer *buffer, - void **data_page, int cpu, int full); +int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, + size_t len, int cpu, int full); enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, diff --git a/include/linux/sched.h b/include/linux/sched.h index 9da5aa0771e..98e1fe51601 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -138,6 +138,8 @@ extern unsigned long nr_uninterruptible(void); extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); +extern unsigned long get_parent_ip(unsigned long addr); + struct seq_file; struct cfs_rq; struct task_group; @@ -298,17 +300,11 @@ extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; -extern unsigned long sysctl_hung_task_check_count; -extern unsigned long sysctl_hung_task_timeout_secs; -extern unsigned long sysctl_hung_task_warnings; extern int softlockup_thresh; #else static inline void softlockup_tick(void) { } -static inline void spawn_softlockup_task(void) -{ -} static inline void touch_softlockup_watchdog(void) { } @@ -317,6 +313,15 @@ static inline void touch_all_softlockup_watchdogs(void) } #endif +#ifdef CONFIG_DETECT_HUNG_TASK +extern unsigned int sysctl_hung_task_panic; +extern unsigned long sysctl_hung_task_check_count; +extern unsigned long sysctl_hung_task_timeout_secs; +extern unsigned long sysctl_hung_task_warnings; +extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, + size_t *lenp, loff_t *ppos); +#endif /* Attach to any functions which should be ignored in wchan output. */ #define __sched __attribute__((__section__(".sched.text"))) @@ -1253,9 +1258,8 @@ struct task_struct { /* ipc stuff */ struct sysv_sem sysvsem; #endif -#ifdef CONFIG_DETECT_SOFTLOCKUP +#ifdef CONFIG_DETECT_HUNG_TASK /* hung task detection */ - unsigned long last_switch_timestamp; unsigned long last_switch_count; #endif /* CPU-specific state of this task */ @@ -1292,6 +1296,11 @@ struct task_struct { /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ spinlock_t alloc_lock; +#ifdef CONFIG_GENERIC_HARDIRQS + /* IRQ handler threads */ + struct irqaction *irqaction; +#endif + /* Protection of the PI data structures: */ spinlock_t pi_lock; @@ -1405,6 +1414,8 @@ struct task_struct { int curr_ret_stack; /* Stack of return addresses for return function tracing */ struct ftrace_ret_stack *ret_stack; + /* time stamp for last schedule */ + unsigned long long ftrace_timestamp; /* * Number of functions that haven't been traced * because of depth overrun. diff --git a/include/linux/security.h b/include/linux/security.h index 54ed15799a8..d5fd6163606 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -32,6 +32,7 @@ #include <linux/sched.h> #include <linux/key.h> #include <linux/xfrm.h> +#include <linux/gfp.h> #include <net/flow.h> /* Maximum number of letters for an LSM name string */ @@ -2953,5 +2954,28 @@ static inline void securityfs_remove(struct dentry *dentry) #endif +#ifdef CONFIG_SECURITY + +static inline char *alloc_secdata(void) +{ + return (char *)get_zeroed_page(GFP_KERNEL); +} + +static inline void free_secdata(void *secdata) +{ + free_page((unsigned long)secdata); +} + +#else + +static inline char *alloc_secdata(void) +{ + return (char *)1; +} + +static inline void free_secdata(void *secdata) +{ } +#endif /* CONFIG_SECURITY */ + #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index df9245c7bd3..57a97e52e58 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -164,6 +164,9 @@ /* NWPSERIAL */ #define PORT_NWPSERIAL 85 +/* MAX3100 */ +#define PORT_MAX3100 86 + #ifdef __KERNEL__ #include <linux/compiler.h> @@ -277,7 +280,7 @@ struct uart_port { struct uart_icount icount; /* statistics */ struct console *cons; /* struct console, if any */ -#ifdef CONFIG_SERIAL_CORE_CONSOLE +#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) unsigned long sysrq; /* sysrq timeout */ #endif diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h new file mode 100644 index 00000000000..4976befb6ae --- /dev/null +++ b/include/linux/serial_max3100.h @@ -0,0 +1,52 @@ +/* + * + * Copyright (C) 2007 Christian Pellegrin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + + +#ifndef _LINUX_SERIAL_MAX3100_H +#define _LINUX_SERIAL_MAX3100_H 1 + + +/** + * struct plat_max3100 - MAX3100 SPI UART platform data + * @loopback: force MAX3100 in loopback + * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 + * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook + * called on suspend and resume to activate it. + * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw + * flow ctrl is possible but you have less CPU usage) + * + * You should use this structure in your machine description to specify + * how the MAX3100 is connected. Example: + * + * static struct plat_max3100 max3100_plat_data = { + * .loopback = 0, + * .crystal = 0, + * .poll_time = 100, + * }; + * + * static struct spi_board_info spi_board_info[] = { + * { + * .modalias = "max3100", + * .platform_data = &max3100_plat_data, + * .irq = IRQ_EINT12, + * .max_speed_hz = 5*1000*1000, + * .chip_select = 0, + * }, + * }; + * + **/ +struct plat_max3100 { + int loopback; + int crystal; + void (*max3100_hw_suspend) (int suspend); + int poll_time; +}; + +#endif diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 6ca6a7b66d7..5ac9b0bcaf9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -14,6 +14,7 @@ #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include <linux/compiler.h> +#include <trace/kmemtrace.h> /* Size description struct for general caches. */ struct cache_sizes { @@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[]; void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); -static inline void *kmalloc(size_t size, gfp_t flags) +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); +extern size_t slab_buffer_size(struct kmem_cache *cachep); +#else +static __always_inline void * +kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) { + return kmem_cache_alloc(cachep, flags); +} +static inline size_t slab_buffer_size(struct kmem_cache *cachep) +{ + return 0; +} +#endif + +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + struct kmem_cache *cachep; + void *ret; + if (__builtin_constant_p(size)) { int i = 0; @@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags) found: #ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) - return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, - flags); + cachep = malloc_sizes[i].cs_dmacachep; + else #endif - return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); + cachep = malloc_sizes[i].cs_cachep; + + ret = kmem_cache_alloc_notrace(cachep, flags); + + trace_kmalloc(_THIS_IP_, ret, + size, slab_buffer_size(cachep), flags); + + return ret; } return __kmalloc(size, flags); } @@ -59,8 +85,25 @@ found: extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, + gfp_t flags, + int nodeid); +#else +static __always_inline void * +kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, + gfp_t flags, + int nodeid) +{ + return kmem_cache_alloc_node(cachep, flags, nodeid); +} +#endif + +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { + struct kmem_cache *cachep; + void *ret; + if (__builtin_constant_p(size)) { int i = 0; @@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) found: #ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) - return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, - flags, node); + cachep = malloc_sizes[i].cs_dmacachep; + else #endif - return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, - flags, node); + cachep = malloc_sizes[i].cs_cachep; + + ret = kmem_cache_alloc_node_notrace(cachep, flags, node); + + trace_kmalloc_node(_THIS_IP_, ret, + size, slab_buffer_size(cachep), + flags, node); + + return ret; } return __kmalloc_node(size, flags, node); } diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 59a3fa476ab..0ec00b39d00 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -3,14 +3,15 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, + gfp_t flags) { return kmem_cache_alloc_node(cachep, flags, -1); } void *__kmalloc_node(size_t size, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { return __kmalloc_node(size, flags, node); } @@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) * kmalloc is the normal method of allocating memory * in the kernel. */ -static inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline void *kmalloc(size_t size, gfp_t flags) { return __kmalloc_node(size, flags, -1); } -static inline void *__kmalloc(size_t size, gfp_t flags) +static __always_inline void *__kmalloc(size_t size, gfp_t flags) { return kmalloc(size, flags); } diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h new file mode 100644 index 00000000000..85958277f83 --- /dev/null +++ b/include/linux/slow-work.h @@ -0,0 +1,95 @@ +/* Worker thread pool for slow items, such as filesystem lookups or mkdirs + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + * + * See Documentation/slow-work.txt + */ + +#ifndef _LINUX_SLOW_WORK_H +#define _LINUX_SLOW_WORK_H + +#ifdef CONFIG_SLOW_WORK + +#include <linux/sysctl.h> + +struct slow_work; + +/* + * The operations used to support slow work items + */ +struct slow_work_ops { + /* get a ref on a work item + * - return 0 if successful, -ve if not + */ + int (*get_ref)(struct slow_work *work); + + /* discard a ref to a work item */ + void (*put_ref)(struct slow_work *work); + + /* execute a work item */ + void (*execute)(struct slow_work *work); +}; + +/* + * A slow work item + * - A reference is held on the parent object by the thread pool when it is + * queued + */ +struct slow_work { + unsigned long flags; +#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ +#define SLOW_WORK_EXECUTING 1 /* item currently executing */ +#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ +#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ + const struct slow_work_ops *ops; /* operations table for this item */ + struct list_head link; /* link in queue */ +}; + +/** + * slow_work_init - Initialise a slow work item + * @work: The work item to initialise + * @ops: The operations to use to handle the slow work item + * + * Initialise a slow work item. + */ +static inline void slow_work_init(struct slow_work *work, + const struct slow_work_ops *ops) +{ + work->flags = 0; + work->ops = ops; + INIT_LIST_HEAD(&work->link); +} + +/** + * slow_work_init - Initialise a very slow work item + * @work: The work item to initialise + * @ops: The operations to use to handle the slow work item + * + * Initialise a very slow work item. This item will be restricted such that + * only a certain number of the pool threads will be able to execute items of + * this type. + */ +static inline void vslow_work_init(struct slow_work *work, + const struct slow_work_ops *ops) +{ + work->flags = 1 << SLOW_WORK_VERY_SLOW; + work->ops = ops; + INIT_LIST_HEAD(&work->link); +} + +extern int slow_work_enqueue(struct slow_work *work); +extern int slow_work_register_user(void); +extern void slow_work_unregister_user(void); + +#ifdef CONFIG_SYSCTL +extern ctl_table slow_work_sysctls[]; +#endif + +#endif /* CONFIG_SLOW_WORK */ +#endif /* _LINUX_SLOW_WORK_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index e37b6aa8a9f..5046f90c117 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,6 +10,7 @@ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/kobject.h> +#include <trace/kmemtrace.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -217,13 +218,30 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); +#else +static __always_inline void * +kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) +{ + return kmem_cache_alloc(s, gfpflags); +} +#endif + static __always_inline void *kmalloc_large(size_t size, gfp_t flags) { - return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); + unsigned int order = get_order(size); + void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); + + trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); + + return ret; } static __always_inline void *kmalloc(size_t size, gfp_t flags) { + void *ret; + if (__builtin_constant_p(size)) { if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); @@ -234,7 +252,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) if (!s) return ZERO_SIZE_PTR; - return kmem_cache_alloc(s, flags); + ret = kmem_cache_alloc_notrace(s, flags); + + trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); + + return ret; } } return __kmalloc(size, flags); @@ -244,8 +266,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, + gfp_t gfpflags, + int node); +#else +static __always_inline void * +kmem_cache_alloc_node_notrace(struct kmem_cache *s, + gfp_t gfpflags, + int node) +{ + return kmem_cache_alloc_node(s, gfpflags, node); +} +#endif + static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { + void *ret; + if (__builtin_constant_p(size) && size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -253,7 +291,12 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) if (!s) return ZERO_SIZE_PTR; - return kmem_cache_alloc_node(s, flags, node); + ret = kmem_cache_alloc_node_notrace(s, flags, node); + + trace_kmalloc_node(_THIS_IP_, ret, + size, s->size, flags, node); + + return ret; } return __kmalloc_node(size, flags, node); } diff --git a/include/linux/smp.h b/include/linux/smp.h index bbacb7baa44..a69db820eed 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, /* * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. * (defined in asm header): - */ + */ /* * stops all CPUs but the current one: @@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, return 0; } -void __smp_call_function_single(int cpuid, struct call_single_data *data); +void __smp_call_function_single(int cpuid, struct call_single_data *data, + int wait); /* * Generic and arch helpers @@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus; #else /* !SMP */ +static inline void smp_send_stop(void) { } + /* * These macros fold the SMP functionality into a single CPU system */ diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index f41ffd7c2dd..34c4475ac4a 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h @@ -103,6 +103,14 @@ #define SONYPI_EVENT_WIRELESS_OFF 61 #define SONYPI_EVENT_ZOOM_IN_PRESSED 62 #define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 +#define SONYPI_EVENT_CD_EJECT_PRESSED 64 +#define SONYPI_EVENT_MODEKEY_PRESSED 65 +#define SONYPI_EVENT_PKEY_P4 66 +#define SONYPI_EVENT_PKEY_P5 67 +#define SONYPI_EVENT_SETTINGKEY_PRESSED 68 +#define SONYPI_EVENT_VOLUME_INC_PRESSED 69 +#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 +#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 /* get/set brightness */ #define SONYPI_IOCGBRT _IOR('v', 0, __u8) diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 68bb1c501d0..2cc43fa380c 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -204,6 +204,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * SPI slaves, and are numbered from zero to num_chipselects. * each slave has a chipselect signal, but it's common that not * every chipselect is connected to a slave. + * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This * must fail if an unrecognized or unsupported mode is requested. @@ -239,6 +240,11 @@ struct spi_master { */ u16 num_chipselect; + /* some SPI controllers pose alignment requirements on DMAable + * buffers; let protocol drivers know about these requirements. + */ + u16 dma_alignment; + /* setup mode and clock, etc (spi driver may call many times) */ int (*setup)(struct spi_device *spi); diff --git a/include/linux/string.h b/include/linux/string.h index 8852739f36d..489019ef169 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -10,6 +10,7 @@ #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ +#include <stdarg.h> extern char *strndup_user(const char __user *, long); extern void *memdup_user(const void __user *, size_t); @@ -112,8 +113,23 @@ extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); +#ifdef CONFIG_BINARY_PRINTF +int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); +int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); +int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); +#endif + extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, const void *from, size_t available); +/** + * strstarts - does @str start with @prefix? + * @str: string to examine + * @prefix: prefix to look for. + */ +static inline bool strstarts(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} #endif #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index d3a4c023193..2a30775959e 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -24,6 +24,15 @@ */ typedef int (*svc_thread_fn)(void *); +/* statistics for svc_pool structures */ +struct svc_pool_stats { + unsigned long packets; + unsigned long sockets_queued; + unsigned long threads_woken; + unsigned long overloads_avoided; + unsigned long threads_timedout; +}; + /* * * RPC service thread pool. @@ -41,6 +50,8 @@ struct svc_pool { struct list_head sp_sockets; /* pending sockets */ unsigned int sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ + int sp_nwaking; /* number of threads woken but not yet active */ + struct svc_pool_stats sp_stats; /* statistics on pool operation */ } ____cacheline_aligned_in_smp; /* @@ -83,6 +94,8 @@ struct svc_serv { struct module * sv_module; /* optional module to count when * adding threads */ svc_thread_fn sv_function; /* main function for threads */ + unsigned int sv_drc_max_pages; /* Total pages for DRC */ + unsigned int sv_drc_pages_used;/* DRC pages used */ }; /* @@ -218,6 +231,7 @@ struct svc_rqst { struct svc_cred rq_cred; /* auth info */ void * rq_xprt_ctxt; /* transport specific context ptr */ struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ + int rq_usedeferral; /* use deferral */ size_t rq_xprt_hlen; /* xprt header len */ struct xdr_buf rq_arg; @@ -263,6 +277,7 @@ struct svc_rqst { * cache pages */ wait_queue_head_t rq_wait; /* synchronization */ struct task_struct *rq_task; /* service thread */ + int rq_waking; /* 1 if thread is being woken */ }; /* @@ -393,6 +408,7 @@ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, void (*shutdown)(struct svc_serv *), svc_thread_fn, struct module *); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); +int svc_pool_stats_open(struct svc_serv *serv, struct file *file); void svc_destroy(struct svc_serv *); int svc_process(struct svc_rqst *); int svc_register(const struct svc_serv *, const int, diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 49e1eb45446..d8910b68e1b 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -69,27 +69,27 @@ struct xdr_buf { * pre-xdr'ed macros. */ -#define xdr_zero __constant_htonl(0) -#define xdr_one __constant_htonl(1) -#define xdr_two __constant_htonl(2) - -#define rpc_success __constant_htonl(RPC_SUCCESS) -#define rpc_prog_unavail __constant_htonl(RPC_PROG_UNAVAIL) -#define rpc_prog_mismatch __constant_htonl(RPC_PROG_MISMATCH) -#define rpc_proc_unavail __constant_htonl(RPC_PROC_UNAVAIL) -#define rpc_garbage_args __constant_htonl(RPC_GARBAGE_ARGS) -#define rpc_system_err __constant_htonl(RPC_SYSTEM_ERR) -#define rpc_drop_reply __constant_htonl(RPC_DROP_REPLY) - -#define rpc_auth_ok __constant_htonl(RPC_AUTH_OK) -#define rpc_autherr_badcred __constant_htonl(RPC_AUTH_BADCRED) -#define rpc_autherr_rejectedcred __constant_htonl(RPC_AUTH_REJECTEDCRED) -#define rpc_autherr_badverf __constant_htonl(RPC_AUTH_BADVERF) -#define rpc_autherr_rejectedverf __constant_htonl(RPC_AUTH_REJECTEDVERF) -#define rpc_autherr_tooweak __constant_htonl(RPC_AUTH_TOOWEAK) -#define rpcsec_gsserr_credproblem __constant_htonl(RPCSEC_GSS_CREDPROBLEM) -#define rpcsec_gsserr_ctxproblem __constant_htonl(RPCSEC_GSS_CTXPROBLEM) -#define rpc_autherr_oldseqnum __constant_htonl(101) +#define xdr_zero cpu_to_be32(0) +#define xdr_one cpu_to_be32(1) +#define xdr_two cpu_to_be32(2) + +#define rpc_success cpu_to_be32(RPC_SUCCESS) +#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) +#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) +#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL) +#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS) +#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) +#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) + +#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) +#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) +#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) +#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF) +#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF) +#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) +#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) +#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) +#define rpc_autherr_oldseqnum cpu_to_be32(101) /* * Miscellaneous XDR helper functions diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b299a82a05e..6470f74074a 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -65,6 +65,7 @@ struct old_linux_dirent; #include <asm/signal.h> #include <linux/quota.h> #include <linux/key.h> +#include <linux/ftrace.h> #define __SC_DECL1(t1, a1) t1 a1 #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) @@ -95,7 +96,46 @@ struct old_linux_dirent; #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) +#ifdef CONFIG_FTRACE_SYSCALLS +#define __SC_STR_ADECL1(t, a) #a +#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) +#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__) +#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__) +#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__) +#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__) + +#define __SC_STR_TDECL1(t, a) #t +#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__) +#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__) +#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__) +#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) +#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) + +#define SYSCALL_METADATA(sname, nb) \ + static const struct syscall_metadata __used \ + __attribute__((__aligned__(4))) \ + __attribute__((section("__syscalls_metadata"))) \ + __syscall_meta_##sname = { \ + .name = "sys"#sname, \ + .nb_args = nb, \ + .types = types_##sname, \ + .args = args_##sname, \ + } + +#define SYSCALL_DEFINE0(sname) \ + static const struct syscall_metadata __used \ + __attribute__((__aligned__(4))) \ + __attribute__((section("__syscalls_metadata"))) \ + __syscall_meta_##sname = { \ + .name = "sys_"#sname, \ + .nb_args = 0, \ + }; \ + asmlinkage long sys_##sname(void) + +#else #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) +#endif + #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) #define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) @@ -117,10 +157,26 @@ struct old_linux_dirent; #endif #endif +#ifdef CONFIG_FTRACE_SYSCALLS +#define SYSCALL_DEFINEx(x, sname, ...) \ + static const char *types_##sname[] = { \ + __SC_STR_TDECL##x(__VA_ARGS__) \ + }; \ + static const char *args_##sname[] = { \ + __SC_STR_ADECL##x(__VA_ARGS__) \ + }; \ + SYSCALL_METADATA(sname, x); \ + __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) +#else +#define SYSCALL_DEFINEx(x, sname, ...) \ + __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) +#endif + #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS #define SYSCALL_DEFINE(name) static inline long SYSC_##name -#define SYSCALL_DEFINEx(x, name, ...) \ + +#define __SYSCALL_DEFINEx(x, name, ...) \ asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ @@ -134,7 +190,7 @@ struct old_linux_dirent; #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ #define SYSCALL_DEFINE(name) asmlinkage long sys_##name -#define SYSCALL_DEFINEx(x, name, ...) \ +#define __SYSCALL_DEFINEx(x, name, ...) \ asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ @@ -462,9 +518,9 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf, asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, size_t count, loff_t pos); asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, u32 pos_high, u32 pos_low); + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, - unsigned long vlen, u32 pos_high, u32 pos_low); + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); asmlinkage long sys_getcwd(char __user *buf, unsigned long size); asmlinkage long sys_mkdir(const char __user *pathname, int mode); asmlinkage long sys_chdir(const char __user *filename); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 917707e6151..1de8b9eb841 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -27,27 +27,46 @@ #include <linux/idr.h> #include <linux/device.h> +#include <linux/workqueue.h> struct thermal_zone_device; struct thermal_cooling_device; +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE, + THERMAL_TRIP_HOT, + THERMAL_TRIP_CRITICAL, +}; + struct thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*unbind) (struct thermal_zone_device *, struct thermal_cooling_device *); - int (*get_temp) (struct thermal_zone_device *, char *); - int (*get_mode) (struct thermal_zone_device *, char *); - int (*set_mode) (struct thermal_zone_device *, const char *); - int (*get_trip_type) (struct thermal_zone_device *, int, char *); - int (*get_trip_temp) (struct thermal_zone_device *, int, char *); + int (*get_temp) (struct thermal_zone_device *, unsigned long *); + int (*get_mode) (struct thermal_zone_device *, + enum thermal_device_mode *); + int (*set_mode) (struct thermal_zone_device *, + enum thermal_device_mode); + int (*get_trip_type) (struct thermal_zone_device *, int, + enum thermal_trip_type *); + int (*get_trip_temp) (struct thermal_zone_device *, int, + unsigned long *); int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); + int (*notify) (struct thermal_zone_device *, int, + enum thermal_trip_type); }; struct thermal_cooling_device_ops { - int (*get_max_state) (struct thermal_cooling_device *, char *); - int (*get_cur_state) (struct thermal_cooling_device *, char *); - int (*set_cur_state) (struct thermal_cooling_device *, unsigned int); + int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); + int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); + int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); }; #define THERMAL_TRIPS_NONE -1 @@ -88,11 +107,19 @@ struct thermal_zone_device { struct device device; void *devdata; int trips; + int tc1; + int tc2; + int passive_delay; + int polling_delay; + int last_temperature; + bool passive; + unsigned int forced_passive; struct thermal_zone_device_ops *ops; struct list_head cooling_devices; struct idr idr; struct mutex lock; /* protect cooling devices list */ struct list_head node; + struct delayed_work poll_queue; #if defined(CONFIG_THERMAL_HWMON) struct list_head hwmon_node; struct thermal_hwmon_device *hwmon; @@ -104,13 +131,16 @@ struct thermal_zone_device { struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, struct thermal_zone_device_ops - *); + *, int tc1, int tc2, + int passive_freq, + int polling_freq); void thermal_zone_device_unregister(struct thermal_zone_device *); int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); +void thermal_zone_device_update(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, struct thermal_cooling_device_ops diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h index dd253177f65..3e08a1c8683 100644 --- a/include/linux/timeriomem-rng.h +++ b/include/linux/timeriomem-rng.h @@ -14,7 +14,7 @@ struct timeriomem_rng_data { struct completion completion; unsigned int present:1; - u32 __iomem *address; + void __iomem *address; /* measures in usecs */ unsigned int period; diff --git a/include/linux/topology.h b/include/linux/topology.h index a16b9e06f2e..7402c1a27c4 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -38,11 +38,7 @@ #endif #ifndef nr_cpus_node -#define nr_cpus_node(node) \ - ({ \ - node_to_cpumask_ptr(__tmp__, node); \ - cpus_weight(*__tmp__); \ - }) +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) #endif #define for_each_node_with_cpus(node) \ @@ -200,4 +196,9 @@ int arch_update_cpu_topology(void); #define topology_core_cpumask(cpu) cpumask_of(cpu) #endif +/* Returns the number of the current Node. */ +#ifndef numa_node_id +#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) +#endif + #endif /* _LINUX_TOPOLOGY_H */ diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h new file mode 100644 index 00000000000..7a813038408 --- /dev/null +++ b/include/linux/trace_clock.h @@ -0,0 +1,19 @@ +#ifndef _LINUX_TRACE_CLOCK_H +#define _LINUX_TRACE_CLOCK_H + +/* + * 3 trace clock variants, with differing scalability/precision + * tradeoffs: + * + * - local: CPU-local trace clock + * - medium: scalable global clock with some jitter + * - global: globally monotonic, serialized clock + */ +#include <linux/compiler.h> +#include <linux/types.h> + +extern u64 notrace trace_clock_local(void); +extern u64 notrace trace_clock(void); +extern u64 notrace trace_clock_global(void); + +#endif /* _LINUX_TRACE_CLOCK_H */ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 75700545836..d35a7ee7611 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -31,8 +31,8 @@ struct tracepoint { * Keep in sync with vmlinux.lds.h. */ -#define TPPROTO(args...) args -#define TPARGS(args...) args +#define TP_PROTO(args...) args +#define TP_ARGS(args...) args #ifdef CONFIG_TRACEPOINTS @@ -65,7 +65,7 @@ struct tracepoint { { \ if (unlikely(__tracepoint_##name.state)) \ __DO_TRACE(&__tracepoint_##name, \ - TPPROTO(proto), TPARGS(args)); \ + TP_PROTO(proto), TP_ARGS(args)); \ } \ static inline int register_trace_##name(void (*probe)(proto)) \ { \ @@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void) synchronize_sched(); } +#define PARAMS(args...) args +#define TRACE_FORMAT(name, proto, args, fmt) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) + + +/* + * For use with the TRACE_EVENT macro: + * + * We define a tracepoint, its arguments, its printk format + * and its 'fast binay record' layout. + * + * Firstly, name your tracepoint via TRACE_EVENT(name : the + * 'subsystem_event' notation is fine. + * + * Think about this whole construct as the + * 'trace_sched_switch() function' from now on. + * + * + * TRACE_EVENT(sched_switch, + * + * * + * * A function has a regular function arguments + * * prototype, declare it via TP_PROTO(): + * * + * + * TP_PROTO(struct rq *rq, struct task_struct *prev, + * struct task_struct *next), + * + * * + * * Define the call signature of the 'function'. + * * (Design sidenote: we use this instead of a + * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) + * * + * + * TP_ARGS(rq, prev, next), + * + * * + * * Fast binary tracing: define the trace record via + * * TP_STRUCT__entry(). You can think about it like a + * * regular C structure local variable definition. + * * + * * This is how the trace record is structured and will + * * be saved into the ring buffer. These are the fields + * * that will be exposed to user-space in + * * /debug/tracing/events/<*>/format. + * * + * * The declared 'local variable' is called '__entry' + * * + * * __field(pid_t, prev_prid) is equivalent to a standard declariton: + * * + * * pid_t prev_pid; + * * + * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: + * * + * * char prev_comm[TASK_COMM_LEN]; + * * + * + * TP_STRUCT__entry( + * __array( char, prev_comm, TASK_COMM_LEN ) + * __field( pid_t, prev_pid ) + * __field( int, prev_prio ) + * __array( char, next_comm, TASK_COMM_LEN ) + * __field( pid_t, next_pid ) + * __field( int, next_prio ) + * ), + * + * * + * * Assign the entry into the trace record, by embedding + * * a full C statement block into TP_fast_assign(). You + * * can refer to the trace record as '__entry' - + * * otherwise you can put arbitrary C code in here. + * * + * * Note: this C code will execute every time a trace event + * * happens, on an active tracepoint. + * * + * + * TP_fast_assign( + * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); + * __entry->prev_pid = prev->pid; + * __entry->prev_prio = prev->prio; + * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); + * __entry->next_pid = next->pid; + * __entry->next_prio = next->prio; + * ) + * + * * + * * Formatted output of a trace record via TP_printk(). + * * This is how the tracepoint will appear under ftrace + * * plugins that make use of this tracepoint. + * * + * * (raw-binary tracing wont actually perform this step.) + * * + * + * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", + * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, + * __entry->next_comm, __entry->next_pid, __entry->next_prio), + * + * ); + * + * This macro construct is thus used for the regular printk format + * tracing setup, it is used to construct a function pointer based + * tracepoint callback (this is used by programmatic plugins and + * can also by used by generic instrumentation like SystemTap), and + * it is also used to expose a structured trace record in + * /debug/tracing/events/. + */ + +#define TRACE_EVENT(name, proto, args, struct, assign, print) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) + #endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 8615d661ab6..bcba84ea2d8 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -309,7 +309,8 @@ extern void tty_set_operations(struct tty_driver *driver, extern struct tty_driver *tty_find_polling_driver(char *name, int *line); extern void tty_driver_kref_put(struct tty_driver *driver); -extern inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) + +static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h index 5f401b644ed..429c631d2aa 100644 --- a/include/linux/usb/wusb.h +++ b/include/linux/usb/wusb.h @@ -80,8 +80,7 @@ struct wusb_ckhdid { u8 data[16]; } __attribute__((packed)); -const static -struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; +static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; #define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 139d234923c..ebb2ea6b499 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -737,6 +737,11 @@ struct v4l2_input { #define V4L2_IN_ST_NO_SIGNAL 0x00000002 #define V4L2_IN_ST_NO_COLOR 0x00000004 +/* field 'status' - sensor orientation */ +/* If sensor is mounted upside down set both bits */ +#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */ +#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */ + /* field 'status' - analog */ #define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ #define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 93445477f86..9c1ed1fb6dd 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -168,6 +168,8 @@ void writeback_set_ratelimit(void); /* pdflush.c */ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl read-only. */ +extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */ +extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */ #endif /* WRITEBACK_H */ diff --git a/include/media/msp3400.h b/include/media/msp3400.h index 6ab854931c0..90cf22ada8b 100644 --- a/include/media/msp3400.h +++ b/include/media/msp3400.h @@ -54,13 +54,13 @@ ======= So to specify a complete routing scheme for the msp3400 you will have to - specify in the 'input' field of the v4l2_routing struct: + specify in the 'input' arg of the s_routing function: 1) which tuner input to use 2) which SCART input to use 3) which DSP input to use for each DSP output - And in the 'output' field of the v4l2_routing struct you specify: + And in the 'output' arg of the s_routing function you specify: 1) which SCART input to use for each SCART output diff --git a/include/media/ov772x.h b/include/media/ov772x.h index 57db48dd85b..30d9629198e 100644 --- a/include/media/ov772x.h +++ b/include/media/ov772x.h @@ -17,10 +17,45 @@ #define OV772X_FLAG_VFLIP 0x00000001 /* Vertical flip image */ #define OV772X_FLAG_HFLIP 0x00000002 /* Horizontal flip image */ +/* + * for Edge ctrl + * + * strength also control Auto or Manual Edge Control Mode + * see also OV772X_MANUAL_EDGE_CTRL + */ +struct ov772x_edge_ctrl { + unsigned char strength; + unsigned char threshold; + unsigned char upper; + unsigned char lower; +}; + +#define OV772X_MANUAL_EDGE_CTRL 0x80 /* un-used bit of strength */ +#define EDGE_STRENGTH_MASK 0x1F +#define EDGE_THRESHOLD_MASK 0x0F +#define EDGE_UPPER_MASK 0xFF +#define EDGE_LOWER_MASK 0xFF + +#define OV772X_AUTO_EDGECTRL(u, l) \ +{ \ + .upper = (u & EDGE_UPPER_MASK), \ + .lower = (l & EDGE_LOWER_MASK), \ +} + +#define OV772X_MANUAL_EDGECTRL(s, t) \ +{ \ + .strength = (s & EDGE_STRENGTH_MASK) | OV772X_MANUAL_EDGE_CTRL,\ + .threshold = (t & EDGE_THRESHOLD_MASK), \ +} + +/* + * ov772x camera info + */ struct ov772x_camera_info { unsigned long buswidth; unsigned long flags; struct soc_camera_link link; + struct ov772x_edge_ctrl edgectrl; }; #endif /* __OV772X_H__ */ diff --git a/include/media/saa7146.h b/include/media/saa7146.h index fff4235adae..7a9f76ecbbb 100644 --- a/include/media/saa7146.h +++ b/include/media/saa7146.h @@ -18,7 +18,7 @@ #include <linux/vmalloc.h> /* for vmalloc() */ #include <linux/mm.h> /* for vmalloc_to_page() */ -#define SAA7146_VERSION_CODE 0x000500 /* 0.5.0 */ +#define SAA7146_VERSION_CODE 0x000600 /* 0.6.0 */ #define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr))) #define saa7146_read(sxy,adr) readl(sxy->mem+(adr)) diff --git a/include/media/tvaudio.h b/include/media/tvaudio.h index 6915aafc875..1ac8184693f 100644 --- a/include/media/tvaudio.h +++ b/include/media/tvaudio.h @@ -21,10 +21,29 @@ #ifndef _TVAUDIO_H #define _TVAUDIO_H +#include <media/i2c-addr.h> + /* The tvaudio module accepts the following inputs: */ #define TVAUDIO_INPUT_TUNER 0 #define TVAUDIO_INPUT_RADIO 1 #define TVAUDIO_INPUT_EXTERN 2 #define TVAUDIO_INPUT_INTERN 3 +static inline const unsigned short *tvaudio_addrs(void) +{ + static const unsigned short addrs[] = { + I2C_ADDR_TDA8425 >> 1, + I2C_ADDR_TEA6300 >> 1, + I2C_ADDR_TEA6420 >> 1, + I2C_ADDR_TDA9840 >> 1, + I2C_ADDR_TDA985x_L >> 1, + I2C_ADDR_TDA985x_H >> 1, + I2C_ADDR_TDA9874 >> 1, + I2C_ADDR_PIC16C54 >> 1, + I2C_CLIENT_END + }; + + return addrs; +} + #endif diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 3a6905615d6..c48c24e4d0f 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -125,7 +125,7 @@ int v4l2_chip_match_host(const struct v4l2_dbg_match *match); /* ------------------------------------------------------------------------- */ -/* Helper function for I2C legacy drivers */ +/* I2C Helper functions */ struct i2c_driver; struct i2c_adapter; @@ -135,21 +135,24 @@ struct v4l2_device; struct v4l2_subdev; struct v4l2_subdev_ops; -int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver, - const char *name, - int (*probe)(struct i2c_client *, const struct i2c_device_id *)); /* Load an i2c module and return an initialized v4l2_subdev struct. Only call request_module if module_name != NULL. The client_type argument is the name of the chip that's on the adapter. */ -struct v4l2_subdev *v4l2_i2c_new_subdev(struct i2c_adapter *adapter, +struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *module_name, const char *client_type, u8 addr); /* Probe and load an i2c module and return an initialized v4l2_subdev struct. Only call request_module if module_name != NULL. The client_type argument is the name of the chip that's on the adapter. */ -struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct i2c_adapter *adapter, +struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *module_name, const char *client_type, const unsigned short *addrs); +/* Like v4l2_i2c_new_probed_subdev, except probe for a single address. */ +struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, + const char *module_name, const char *client_type, u8 addr); /* Initialize an v4l2_subdev with data from an i2c_client struct */ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, const struct v4l2_subdev_ops *ops); @@ -171,139 +174,23 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type); /* ------------------------------------------------------------------------- */ -/* Internal ioctls */ - -/* VIDIOC_INT_DECODE_VBI_LINE */ -struct v4l2_decode_vbi_line { - u32 is_second_field; /* Set to 0 for the first (odd) field, - set to 1 for the second (even) field. */ - u8 *p; /* Pointer to the sliced VBI data from the decoder. - On exit points to the start of the payload. */ - u32 line; /* Line number of the sliced VBI data (1-23) */ - u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */ -}; +/* Note: these remaining ioctls/structs should be removed as well, but they are + still used in tuner-simple.c (TUNER_SET_CONFIG), cx18/ivtv (RESET) and + v4l2-int-device.h (v4l2_routing). To remove these ioctls some more cleanup + is needed in those modules. */ +/* s_config */ struct v4l2_priv_tun_config { int tuner; void *priv; }; - -/* audio ioctls */ - -/* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */ -#define AUDC_SET_RADIO _IO('d',88) - -/* tuner ioctls */ - -/* Sets tuner type and its I2C addr */ -#define TUNER_SET_TYPE_ADDR _IOW('d', 90, int) - -/* Puts tuner on powersaving state, disabling it, except for i2c. To be replaced - by VIDIOC_INT_S_STANDBY. */ -#define TUNER_SET_STANDBY _IOW('d', 91, int) - -/* Sets tda9887 specific stuff, like port1, port2 and qss */ #define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) -/* Switch the tuner to a specific tuner mode. Replacement of AUDC_SET_RADIO */ -#define VIDIOC_INT_S_TUNER_MODE _IOW('d', 93, enum v4l2_tuner_type) - -/* Generic standby command. Passing -1 (all bits set to 1) will put the whole - chip into standby mode, value 0 will make the chip fully active. Specific - bits can be used by certain chips to enable/disable specific subsystems. - Replacement of TUNER_SET_STANDBY. */ -#define VIDIOC_INT_S_STANDBY _IOW('d', 94, u32) - -/* 100, 101 used by VIDIOC_DBG_[SG]_REGISTER */ - -/* Generic reset command. The argument selects which subsystems to reset. - Passing 0 will always reset the whole chip. */ #define VIDIOC_INT_RESET _IOW ('d', 102, u32) -/* Set the frequency (in Hz) of the audio clock output. - Used to slave an audio processor to the video decoder, ensuring that audio - and video remain synchronized. - Usual values for the frequency are 48000, 44100 or 32000 Hz. - If the frequency is not supported, then -EINVAL is returned. */ -#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOW ('d', 103, u32) - -/* Video decoders that support sliced VBI need to implement this ioctl. - Field p of the v4l2_sliced_vbi_line struct is set to the start of the VBI - data that was generated by the decoder. The driver then parses the sliced - VBI data and sets the other fields in the struct accordingly. The pointer p - is updated to point to the start of the payload which can be copied - verbatim into the data field of the v4l2_sliced_vbi_data struct. If no - valid VBI data was found, then the type field is set to 0 on return. */ -#define VIDIOC_INT_DECODE_VBI_LINE _IOWR('d', 104, struct v4l2_decode_vbi_line) - -/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is - filled with the data packets that should be output. Note that if you set - the line field to 0, then that VBI signal is disabled. If no - valid VBI data was found, then the type field is set to 0 on return. */ -#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data) - -/* Used to obtain the sliced VBI packet from a readback register. Not all - video decoders support this. If no data is available because the readback - register contains invalid or erroneous data -EIO is returned. Note that - you must fill in the 'id' member and the 'field' member (to determine - whether CC data from the first or second field should be obtained). */ -#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data) - -/* Sets I2S speed in bps. This is used to provide a standard way to select I2S - clock used by driving digital audio streams at some board designs. - Usual values for the frequency are 1024000 and 2048000. - If the frequency is not supported, then -EINVAL is returned. */ -#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32) - -/* Routing definition, device dependent. It specifies which inputs (if any) - should be routed to which outputs (if any). */ struct v4l2_routing { u32 input; u32 output; }; -/* These internal commands should be used to define the inputs and outputs - of an audio/video chip. They will replace the v4l2 API commands - VIDIOC_S/G_INPUT, VIDIOC_S/G_OUTPUT, VIDIOC_S/G_AUDIO and VIDIOC_S/G_AUDOUT - that are meant to be used by the user. - The internal commands should be used to switch inputs/outputs - because only the driver knows how to map a 'Television' input to the precise - input/output routing of an A/D converter, or a DSP, or a video digitizer. - These four commands should only be sent directly to an i2c device, they - should not be broadcast as the routing is very device specific. */ -#define VIDIOC_INT_S_AUDIO_ROUTING _IOW ('d', 109, struct v4l2_routing) -#define VIDIOC_INT_G_AUDIO_ROUTING _IOR ('d', 110, struct v4l2_routing) -#define VIDIOC_INT_S_VIDEO_ROUTING _IOW ('d', 111, struct v4l2_routing) -#define VIDIOC_INT_G_VIDEO_ROUTING _IOR ('d', 112, struct v4l2_routing) - -struct v4l2_crystal_freq { - u32 freq; /* frequency in Hz of the crystal */ - u32 flags; /* device specific flags */ -}; - -/* Sets the frequency of the crystal used to generate the clocks. - An extra flags field allows device specific configuration regarding - clock frequency dividers, etc. If not used, then set flags to 0. - If the frequency is not supported, then -EINVAL is returned. */ -#define VIDIOC_INT_S_CRYSTAL_FREQ _IOW('d', 113, struct v4l2_crystal_freq) - -/* Initialize the sensor registors to some sort of reasonable - default values. */ -#define VIDIOC_INT_INIT _IOW('d', 114, u32) - -/* Set v4l2_std_id for video OUTPUT devices. This is ignored by - video input devices. */ -#define VIDIOC_INT_S_STD_OUTPUT _IOW('d', 115, v4l2_std_id) - -/* Get v4l2_std_id for video OUTPUT devices. This is ignored by - video input devices. */ -#define VIDIOC_INT_G_STD_OUTPUT _IOW('d', 116, v4l2_std_id) - -/* Set GPIO pins. Very simple right now, might need to be extended with - a v4l2_gpio struct if a direction is also needed. */ -#define VIDIOC_INT_S_GPIO _IOW('d', 117, u32) - -/* Get input status. Same as the status field in the v4l2_input struct. */ -#define VIDIOC_INT_G_INPUT_STATUS _IOR('d', 118, u32) - #endif /* V4L2_COMMON_H_ */ diff --git a/include/media/v4l2-i2c-drv-legacy.h b/include/media/v4l2-i2c-drv-legacy.h deleted file mode 100644 index e65dd9d84e8..00000000000 --- a/include/media/v4l2-i2c-drv-legacy.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * v4l2-i2c-drv-legacy.h - contains I2C handling code that's identical - * for all V4L2 I2C drivers. Use this header if the - * I2C driver is used by both legacy drivers and - * drivers converted to the bus-based I2C API. - * - * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -/* NOTE: the full version of this header is in the v4l-dvb repository - * and allows v4l i2c drivers to be compiled on older kernels as well. - * The version of this header as it appears in the kernel is a stripped - * version (without all the backwards compatibility stuff) and so it - * looks a bit odd. - * - * If you look at the full version then you will understand the reason - * for introducing this header since you really don't want to have all - * the tricky backwards compatibility code in each and every i2c driver. - */ - -struct v4l2_i2c_driver_data { - const char * const name; - int driverid; - int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); - int (*probe)(struct i2c_client *client, const struct i2c_device_id *id); - int (*remove)(struct i2c_client *client); - int (*suspend)(struct i2c_client *client, pm_message_t state); - int (*resume)(struct i2c_client *client); - int (*legacy_probe)(struct i2c_adapter *adapter); - int legacy_class; - const struct i2c_device_id *id_table; -}; - -static struct v4l2_i2c_driver_data v4l2_i2c_data; -static const struct i2c_client_address_data addr_data; -static struct i2c_driver v4l2_i2c_driver_legacy; -static char v4l2_i2c_drv_name_legacy[32]; - -static int v4l2_i2c_drv_attach_legacy(struct i2c_adapter *adapter, int address, int kind) -{ - return v4l2_i2c_attach(adapter, address, &v4l2_i2c_driver_legacy, - v4l2_i2c_drv_name_legacy, v4l2_i2c_data.probe); -} - -static int v4l2_i2c_drv_probe_legacy(struct i2c_adapter *adapter) -{ - if (v4l2_i2c_data.legacy_probe) { - if (v4l2_i2c_data.legacy_probe(adapter)) - return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy); - return 0; - } - if (adapter->class & v4l2_i2c_data.legacy_class) - return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy); - return 0; -} - -static int v4l2_i2c_drv_detach_legacy(struct i2c_client *client) -{ - int err; - - if (v4l2_i2c_data.remove) - v4l2_i2c_data.remove(client); - - err = i2c_detach_client(client); - if (err) - return err; - kfree(client); - return 0; -} - -static int v4l2_i2c_drv_suspend_helper(struct i2c_client *client, pm_message_t state) -{ - return v4l2_i2c_data.suspend ? v4l2_i2c_data.suspend(client, state) : 0; -} - -static int v4l2_i2c_drv_resume_helper(struct i2c_client *client) -{ - return v4l2_i2c_data.resume ? v4l2_i2c_data.resume(client) : 0; -} - -/* ----------------------------------------------------------------------- */ - -/* i2c implementation */ -static struct i2c_driver v4l2_i2c_driver_legacy = { - .driver = { - .owner = THIS_MODULE, - }, - .attach_adapter = v4l2_i2c_drv_probe_legacy, - .detach_client = v4l2_i2c_drv_detach_legacy, - .suspend = v4l2_i2c_drv_suspend_helper, - .resume = v4l2_i2c_drv_resume_helper, -}; - -/* ----------------------------------------------------------------------- */ - -/* i2c implementation */ -static struct i2c_driver v4l2_i2c_driver = { - .suspend = v4l2_i2c_drv_suspend_helper, - .resume = v4l2_i2c_drv_resume_helper, -}; - -static int __init v4l2_i2c_drv_init(void) -{ - int err; - - strlcpy(v4l2_i2c_drv_name_legacy, v4l2_i2c_data.name, sizeof(v4l2_i2c_drv_name_legacy)); - strlcat(v4l2_i2c_drv_name_legacy, "'", sizeof(v4l2_i2c_drv_name_legacy)); - - if (v4l2_i2c_data.legacy_class == 0) - v4l2_i2c_data.legacy_class = I2C_CLASS_TV_ANALOG; - - v4l2_i2c_driver_legacy.driver.name = v4l2_i2c_drv_name_legacy; - v4l2_i2c_driver_legacy.id = v4l2_i2c_data.driverid; - v4l2_i2c_driver_legacy.command = v4l2_i2c_data.command; - err = i2c_add_driver(&v4l2_i2c_driver_legacy); - - if (err) - return err; - v4l2_i2c_driver.driver.name = v4l2_i2c_data.name; - v4l2_i2c_driver.id = v4l2_i2c_data.driverid; - v4l2_i2c_driver.command = v4l2_i2c_data.command; - v4l2_i2c_driver.probe = v4l2_i2c_data.probe; - v4l2_i2c_driver.remove = v4l2_i2c_data.remove; - v4l2_i2c_driver.id_table = v4l2_i2c_data.id_table; - err = i2c_add_driver(&v4l2_i2c_driver); - if (err) - i2c_del_driver(&v4l2_i2c_driver_legacy); - return err; -} - -static void __exit v4l2_i2c_drv_cleanup(void) -{ - i2c_del_driver(&v4l2_i2c_driver_legacy); - i2c_del_driver(&v4l2_i2c_driver); -} - -module_init(v4l2_i2c_drv_init); -module_exit(v4l2_i2c_drv_cleanup); diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h index efdc8bf27f8..10a2882c3cb 100644 --- a/include/media/v4l2-i2c-drv.h +++ b/include/media/v4l2-i2c-drv.h @@ -39,14 +39,11 @@ struct v4l2_i2c_driver_data { const char * const name; - int driverid; int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); int (*probe)(struct i2c_client *client, const struct i2c_device_id *id); int (*remove)(struct i2c_client *client); int (*suspend)(struct i2c_client *client, pm_message_t state); int (*resume)(struct i2c_client *client); - int (*legacy_probe)(struct i2c_adapter *adapter); - int legacy_class; const struct i2c_device_id *id_table; }; @@ -54,12 +51,11 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data; static struct i2c_driver v4l2_i2c_driver; -/* Bus-based I2C implementation for kernels >= 2.6.22 */ +/* Bus-based I2C implementation for kernels >= 2.6.26 */ static int __init v4l2_i2c_drv_init(void) { v4l2_i2c_driver.driver.name = v4l2_i2c_data.name; - v4l2_i2c_driver.id = v4l2_i2c_data.driverid; v4l2_i2c_driver.command = v4l2_i2c_data.command; v4l2_i2c_driver.probe = v4l2_i2c_data.probe; v4l2_i2c_driver.remove = v4l2_i2c_data.remove; diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 1d181b4ccb0..17856081c80 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -27,6 +27,16 @@ struct v4l2_device; struct v4l2_subdev; struct tuner_setup; +/* decode_vbi_line */ +struct v4l2_decode_vbi_line { + u32 is_second_field; /* Set to 0 for the first (odd) field, + set to 1 for the second (even) field. */ + u8 *p; /* Pointer to the sliced VBI data from the decoder. + On exit points to the start of the payload. */ + u32 line; /* Line number of the sliced VBI data (1-23) */ + u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */ +}; + /* Sub-devices are devices that are connected somehow to the main bridge device. These devices are usually audio/video muxers/encoders/decoders or sensors and webcam controllers. @@ -68,11 +78,26 @@ struct tuner_setup; the use-case it might be better to use subdev-specific ops (currently not yet implemented) since ops provide proper type-checking. */ + +/* init: initialize the sensor registors to some sort of reasonable default + values. Do not use for new drivers and should be removed in existing + drivers. + + load_fw: load firmware. + + reset: generic reset command. The argument selects which subsystems to + reset. Passing 0 will always reset the whole chip. Do not use for new + drivers without discussing this first on the linux-media mailinglist. + There should be no reason normally to reset a device. + + s_gpio: set GPIO pins. Very simple right now, might need to be extended with + a direction argument if needed. + */ struct v4l2_subdev_core_ops { int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip); int (*log_status)(struct v4l2_subdev *sd); int (*init)(struct v4l2_subdev *sd, u32 val); - int (*s_standby)(struct v4l2_subdev *sd, u32 standby); + int (*load_fw)(struct v4l2_subdev *sd); int (*reset)(struct v4l2_subdev *sd, u32 val); int (*s_gpio)(struct v4l2_subdev *sd, u32 val); int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc); @@ -82,6 +107,7 @@ struct v4l2_subdev_core_ops { int (*s_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls); int (*try_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls); int (*querymenu)(struct v4l2_subdev *sd, struct v4l2_querymenu *qm); + int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm); long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); #ifdef CONFIG_VIDEO_ADV_DEBUG int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg); @@ -89,6 +115,16 @@ struct v4l2_subdev_core_ops { #endif }; +/* s_mode: switch the tuner to a specific tuner mode. Replacement of s_radio. + + s_radio: v4l device was opened in Radio mode, to be replaced by s_mode. + + s_type_addr: sets tuner type and its I2C addr. + + s_config: sets tda9887 specific stuff, like port1, port2 and qss + + s_standby: puts tuner on powersaving state, disabling it, except for i2c. + */ struct v4l2_subdev_tuner_ops { int (*s_mode)(struct v4l2_subdev *sd, enum v4l2_tuner_type); int (*s_radio)(struct v4l2_subdev *sd); @@ -96,20 +132,77 @@ struct v4l2_subdev_tuner_ops { int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq); int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); int (*s_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); - int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm); int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type); int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config); + int (*s_standby)(struct v4l2_subdev *sd); }; +/* s_clock_freq: set the frequency (in Hz) of the audio clock output. + Used to slave an audio processor to the video decoder, ensuring that + audio and video remain synchronized. Usual values for the frequency + are 48000, 44100 or 32000 Hz. If the frequency is not supported, then + -EINVAL is returned. + + s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard + way to select I2S clock used by driving digital audio streams at some + board designs. Usual values for the frequency are 1024000 and 2048000. + If the frequency is not supported, then -EINVAL is returned. + + s_routing: used to define the input and/or output pins of an audio chip, + and any additional configuration data. + Never attempt to use user-level input IDs (e.g. Composite, S-Video, + Tuner) at this level. An i2c device shouldn't know about whether an + input pin is connected to a Composite connector, become on another + board or platform it might be connected to something else entirely. + The calling driver is responsible for mapping a user-level input to + the right pins on the i2c device. + */ struct v4l2_subdev_audio_ops { int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq); int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq); - int (*s_routing)(struct v4l2_subdev *sd, const struct v4l2_routing *route); + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); }; +/* + decode_vbi_line: video decoders that support sliced VBI need to implement + this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the + start of the VBI data that was generated by the decoder. The driver + then parses the sliced VBI data and sets the other fields in the + struct accordingly. The pointer p is updated to point to the start of + the payload which can be copied verbatim into the data field of the + v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the + type field is set to 0 on return. + + s_vbi_data: used to generate VBI signals on a video signal. + v4l2_sliced_vbi_data is filled with the data packets that should be + output. Note that if you set the line field to 0, then that VBI signal + is disabled. If no valid VBI data was found, then the type field is + set to 0 on return. + + g_vbi_data: used to obtain the sliced VBI packet from a readback register. + Not all video decoders support this. If no data is available because + the readback register contains invalid or erroneous data -EIO is + returned. Note that you must fill in the 'id' member and the 'field' + member (to determine whether CC data from the first or second field + should be obtained). + + s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by + video input devices. + + s_crystal_freq: sets the frequency of the crystal used to generate the + clocks in Hz. An extra flags field allows device specific configuration + regarding clock frequency dividers, etc. If not used, then set flags + to 0. If the frequency is not supported, then -EINVAL is returned. + + g_input_status: get input status. Same as the status field in the v4l2_input + struct. + + s_routing: see s_routing in audio_ops, except this version is for video + devices. + */ struct v4l2_subdev_video_ops { - int (*s_routing)(struct v4l2_subdev *sd, const struct v4l2_routing *route); - int (*s_crystal_freq)(struct v4l2_subdev *sd, struct v4l2_crystal_freq *freq); + int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); + int (*s_crystal_freq)(struct v4l2_subdev *sd, u32 freq, u32 flags); int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line); int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data); int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data); @@ -163,18 +256,6 @@ static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd) return sd->priv; } -/* Convert an ioctl-type command to the proper v4l2_subdev_ops function call. - This is used by subdev modules that can be called by both old-style ioctl - commands and through the v4l2_subdev_ops. - - The ioctl API of the subdev driver can call this function to call the - right ops based on the ioctl cmd and arg. - - Once all subdev drivers have been converted and all drivers no longer - use the ioctl interface, then this function can be removed. - */ -int v4l2_subdev_command(struct v4l2_subdev *sd, unsigned cmd, void *arg); - static inline void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) { diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index ab17a159ac6..a9652806d0d 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -99,9 +99,12 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t, const union nf_inet_addr *, u_int8_t, const __be16 *, const __be16 *); void nf_ct_expect_put(struct nf_conntrack_expect *exp); -int nf_ct_expect_related(struct nf_conntrack_expect *expect); int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, u32 pid, int report); +static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect) +{ + return nf_ct_expect_related_report(expect, 0, 0); +} #endif /*_NF_CONNTRACK_EXPECT_H*/ diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h new file mode 100644 index 00000000000..0627a9ae634 --- /dev/null +++ b/include/scsi/fc/fc_fip.h @@ -0,0 +1,237 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FC_FIP_H_ +#define _FC_FIP_H_ + +/* + * This version is based on: + * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf + */ + +/* + * The FIP ethertype eventually goes in net/if_ether.h. + */ +#ifndef ETH_P_FIP +#define ETH_P_FIP 0x8914 /* FIP Ethertype */ +#endif + +#define FIP_DEF_PRI 128 /* default selection priority */ +#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */ +#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */ +#define FIP_VN_KA_PERIOD 90000 /* required VN_port keep-alive period (mS) */ +#define FIP_FCF_FUZZ 100 /* random time added by FCF (mS) */ + +/* + * Multicast MAC addresses. T11-adopted. + */ +#define FIP_ALL_FCOE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 0 }) +#define FIP_ALL_ENODE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 1 }) +#define FIP_ALL_FCF_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) + +#define FIP_VER 1 /* version for fip_header */ + +struct fip_header { + __u8 fip_ver; /* upper 4 bits are the version */ + __u8 fip_resv1; /* reserved */ + __be16 fip_op; /* operation code */ + __u8 fip_resv2; /* reserved */ + __u8 fip_subcode; /* lower 4 bits are sub-code */ + __be16 fip_dl_len; /* length of descriptors in words */ + __be16 fip_flags; /* header flags */ +} __attribute__((packed)); + +#define FIP_VER_SHIFT 4 +#define FIP_VER_ENCAPS(v) ((v) << FIP_VER_SHIFT) +#define FIP_VER_DECAPS(v) ((v) >> FIP_VER_SHIFT) +#define FIP_BPW 4 /* bytes per word for lengths */ + +/* + * fip_op. + */ +enum fip_opcode { + FIP_OP_DISC = 1, /* discovery, advertisement, etc. */ + FIP_OP_LS = 2, /* Link Service request or reply */ + FIP_OP_CTRL = 3, /* Keep Alive / Link Reset */ + FIP_OP_VLAN = 4, /* VLAN discovery */ + FIP_OP_VENDOR_MIN = 0xfff8, /* min vendor-specific opcode */ + FIP_OP_VENDOR_MAX = 0xfffe, /* max vendor-specific opcode */ +}; + +/* + * Subcodes for FIP_OP_DISC. + */ +enum fip_disc_subcode { + FIP_SC_SOL = 1, /* solicitation */ + FIP_SC_ADV = 2, /* advertisement */ +}; + +/* + * Subcodes for FIP_OP_LS. + */ +enum fip_trans_subcode { + FIP_SC_REQ = 1, /* request */ + FIP_SC_REP = 2, /* reply */ +}; + +/* + * Subcodes for FIP_OP_RESET. + */ +enum fip_reset_subcode { + FIP_SC_KEEP_ALIVE = 1, /* keep-alive from VN_Port */ + FIP_SC_CLR_VLINK = 2, /* clear virtual link from VF_Port */ +}; + +/* + * Subcodes for FIP_OP_VLAN. + */ +enum fip_vlan_subcode { + FIP_SC_VL_REQ = 1, /* request */ + FIP_SC_VL_REP = 2, /* reply */ +}; + +/* + * flags in header fip_flags. + */ +enum fip_flag { + FIP_FL_FPMA = 0x8000, /* supports FPMA fabric-provided MACs */ + FIP_FL_SPMA = 0x4000, /* supports SPMA server-provided MACs */ + FIP_FL_AVAIL = 0x0004, /* available for FLOGI/ELP */ + FIP_FL_SOL = 0x0002, /* this is a solicited message */ + FIP_FL_FPORT = 0x0001, /* sent from an F port */ +}; + +/* + * Common descriptor header format. + */ +struct fip_desc { + __u8 fip_dtype; /* type - see below */ + __u8 fip_dlen; /* length - in 32-bit words */ +}; + +enum fip_desc_type { + FIP_DT_PRI = 1, /* priority for forwarder selection */ + FIP_DT_MAC = 2, /* MAC address */ + FIP_DT_MAP_OUI = 3, /* FC-MAP OUI */ + FIP_DT_NAME = 4, /* switch name or node name */ + FIP_DT_FAB = 5, /* fabric descriptor */ + FIP_DT_FCOE_SIZE = 6, /* max FCoE frame size */ + FIP_DT_FLOGI = 7, /* FLOGI request or response */ + FIP_DT_FDISC = 8, /* FDISC request or response */ + FIP_DT_LOGO = 9, /* LOGO request or response */ + FIP_DT_ELP = 10, /* ELP request or response */ + FIP_DT_VN_ID = 11, /* VN_Node Identifier */ + FIP_DT_FKA = 12, /* advertisement keep-alive period */ + FIP_DT_VENDOR = 13, /* vendor ID */ + FIP_DT_VLAN = 14, /* vlan number */ + FIP_DT_LIMIT, /* max defined desc_type + 1 */ + FIP_DT_VENDOR_BASE = 128, /* first vendor-specific desc_type */ +}; + +/* + * FIP_DT_PRI - priority descriptor. + */ +struct fip_pri_desc { + struct fip_desc fd_desc; + __u8 fd_resvd; + __u8 fd_pri; /* FCF priority: higher is better */ +} __attribute__((packed)); + +/* + * FIP_DT_MAC - MAC address descriptor. + */ +struct fip_mac_desc { + struct fip_desc fd_desc; + __u8 fd_mac[ETH_ALEN]; +} __attribute__((packed)); + +/* + * FIP_DT_MAP - descriptor. + */ +struct fip_map_desc { + struct fip_desc fd_desc; + __u8 fd_resvd[3]; + __u8 fd_map[3]; +} __attribute__((packed)); + +/* + * FIP_DT_NAME descriptor. + */ +struct fip_wwn_desc { + struct fip_desc fd_desc; + __u8 fd_resvd[2]; + __be64 fd_wwn; /* 64-bit WWN, unaligned */ +} __attribute__((packed)); + +/* + * FIP_DT_FAB descriptor. + */ +struct fip_fab_desc { + struct fip_desc fd_desc; + __be16 fd_vfid; /* virtual fabric ID */ + __u8 fd_resvd; + __u8 fd_map[3]; /* FC-MAP value */ + __be64 fd_wwn; /* fabric name, unaligned */ +} __attribute__((packed)); + +/* + * FIP_DT_FCOE_SIZE descriptor. + */ +struct fip_size_desc { + struct fip_desc fd_desc; + __be16 fd_size; +} __attribute__((packed)); + +/* + * Descriptor that encapsulates an ELS or ILS frame. + * The encapsulated frame immediately follows this header, without + * SOF, EOF, or CRC. + */ +struct fip_encaps { + struct fip_desc fd_desc; + __u8 fd_resvd[2]; +} __attribute__((packed)); + +/* + * FIP_DT_VN_ID - VN_Node Identifier descriptor. + */ +struct fip_vn_desc { + struct fip_desc fd_desc; + __u8 fd_mac[ETH_ALEN]; + __u8 fd_resvd; + __u8 fd_fc_id[3]; + __be64 fd_wwpn; /* port name, unaligned */ +} __attribute__((packed)); + +/* + * FIP_DT_FKA - Advertisement keep-alive period. + */ +struct fip_fka_desc { + struct fip_desc fd_desc; + __u8 fd_resvd[2]; + __be32 fd_fka_period; /* adv./keep-alive period in mS */ +} __attribute__((packed)); + +/* + * FIP_DT_VENDOR descriptor. + */ +struct fip_vendor_desc { + struct fip_desc fd_desc; + __u8 fd_resvd[2]; + __u8 fd_vendor_id[8]; +} __attribute__((packed)); + +#endif /* _FC_FIP_H_ */ diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h deleted file mode 100644 index 8dca2af14ff..00000000000 --- a/include/scsi/fc_transport_fcoe.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef FC_TRANSPORT_FCOE_H -#define FC_TRANSPORT_FCOE_H - -#include <linux/device.h> -#include <linux/netdevice.h> -#include <scsi/scsi_host.h> -#include <scsi/libfc.h> - -/** - * struct fcoe_transport - FCoE transport struct for generic transport - * for Ethernet devices as well as pure HBAs - * - * @name: name for thsi transport - * @bus: physical bus type (pci_bus_type) - * @driver: physical bus driver for network device - * @create: entry create function - * @destroy: exit destroy function - * @list: list of transports - */ -struct fcoe_transport { - char *name; - unsigned short vendor; - unsigned short device; - struct bus_type *bus; - struct device_driver *driver; - int (*create)(struct net_device *device); - int (*destroy)(struct net_device *device); - bool (*match)(struct net_device *device); - struct list_head list; - struct list_head devlist; - struct mutex devlock; -}; - -/** - * MODULE_ALIAS_FCOE_PCI - * - * some care must be taken with this, vendor and device MUST be a hex value - * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB) - */ -#define MODULE_ALIAS_FCOE_PCI(vendor, device) \ - MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device)) - -/* exported funcs */ -int fcoe_transport_attach(struct net_device *netdev); -int fcoe_transport_release(struct net_device *netdev); -int fcoe_transport_register(struct fcoe_transport *t); -int fcoe_transport_unregister(struct fcoe_transport *t); -int fcoe_load_transport_driver(struct net_device *netdev); -int __init fcoe_transport_init(void); -int __exit fcoe_transport_exit(void); - -/* fcow_sw is the default transport */ -extern struct fcoe_transport fcoe_sw_transport; -#endif /* FC_TRANSPORT_FCOE_H */ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index a70eafaad08..0303a6a098c 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -22,6 +22,7 @@ #include <linux/timer.h> #include <linux/if.h> +#include <linux/percpu.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> @@ -661,7 +662,8 @@ struct fc_lport { unsigned long boot_time; struct fc_host_statistics host_stats; - struct fcoe_dev_stats *dev_stats[NR_CPUS]; + struct fcoe_dev_stats *dev_stats; + u64 wwpn; u64 wwnn; u8 retry_count; @@ -694,11 +696,6 @@ struct fc_lport { /* * FC_LPORT HELPER FUNCTIONS *****************************/ -static inline void *lport_priv(const struct fc_lport *lp) -{ - return (void *)(lp + 1); -} - static inline int fc_lport_test_ready(struct fc_lport *lp) { return lp->state == LPORT_ST_READY; @@ -722,6 +719,42 @@ static inline void fc_lport_state_enter(struct fc_lport *lp, lp->state = state; } +static inline int fc_lport_init_stats(struct fc_lport *lp) +{ + /* allocate per cpu stats block */ + lp->dev_stats = alloc_percpu(struct fcoe_dev_stats); + if (!lp->dev_stats) + return -ENOMEM; + return 0; +} + +static inline void fc_lport_free_stats(struct fc_lport *lp) +{ + free_percpu(lp->dev_stats); +} + +static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lp) +{ + return per_cpu_ptr(lp->dev_stats, smp_processor_id()); +} + +static inline void *lport_priv(const struct fc_lport *lp) +{ + return (void *)(lp + 1); +} + +/** + * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport + * @sht: ptr to the scsi host templ + * @priv_size: size of private data after fc_lport + * + * Returns: ptr to Scsi_Host + */ +static inline struct Scsi_Host * +libfc_host_alloc(struct scsi_host_template *sht, int priv_size) +{ + return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size); +} /* * LOCAL PORT LAYER diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index c41f7d0c6ef..666cc131732 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -1,5 +1,6 @@ /* - * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. + * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007-2008 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -20,134 +21,144 @@ #ifndef _LIBFCOE_H #define _LIBFCOE_H +#include <linux/etherdevice.h> +#include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <linux/workqueue.h> #include <scsi/fc/fc_fcoe.h> #include <scsi/libfc.h> /* - * this percpu struct for fcoe + * FIP tunable parameters. */ -struct fcoe_percpu_s { - int cpu; - struct task_struct *thread; - struct sk_buff_head fcoe_rx_list; - struct page *crc_eof_page; - int crc_eof_offset; +#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */ +#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */ +#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */ + +/** + * enum fip_state - internal state of FCoE controller. + * @FIP_ST_DISABLED: controller has been disabled or not yet enabled. + * @FIP_ST_LINK_WAIT: the physical link is down or unusable. + * @FIP_ST_AUTO: determining whether to use FIP or non-FIP mode. + * @FIP_ST_NON_FIP: non-FIP mode selected. + * @FIP_ST_ENABLED: FIP mode selected. + */ +enum fip_state { + FIP_ST_DISABLED, + FIP_ST_LINK_WAIT, + FIP_ST_AUTO, + FIP_ST_NON_FIP, + FIP_ST_ENABLED, }; -/* - * the fcoe sw transport private data +/** + * struct fcoe_ctlr - FCoE Controller and FIP state. + * @state: internal FIP state for network link and FIP or non-FIP mode. + * @lp: &fc_lport: libfc local port. + * @sel_fcf: currently selected FCF, or NULL. + * @fcfs: list of discovered FCFs. + * @fcf_count: number of discovered FCF entries. + * @sol_time: time when a multicast solicitation was last sent. + * @sel_time: time after which to select an FCF. + * @port_ka_time: time of next port keep-alive. + * @ctlr_ka_time: time of next controller keep-alive. + * @timer: timer struct used for all delayed events. + * @link_work: &work_struct for doing FCF selection. + * @recv_work: &work_struct for receiving FIP frames. + * @fip_recv_list: list of received FIP frames. + * @user_mfs: configured maximum FC frame size, including FC header. + * @flogi_oxid: exchange ID of most recent fabric login. + * @flogi_count: number of FLOGI attempts in AUTO mode. + * @link: current link status for libfc. + * @last_link: last link state reported to libfc. + * @map_dest: use the FC_MAP mode for destination MAC addresses. + * @dest_addr: MAC address of the selected FC forwarder. + * @ctl_src_addr: the native MAC address of our local port. + * @data_src_addr: the assigned MAC address for the local port after FLOGI. + * @send: LLD-supplied function to handle sending of FIP Ethernet frames. + * @update_mac: LLD-supplied function to handle changes to MAC addresses. + * @lock: lock protecting this structure. + * + * This structure is used by all FCoE drivers. It contains information + * needed by all FCoE low-level drivers (LLDs) as well as internal state + * for FIP, and fields shared with the LLDS. */ -struct fcoe_softc { - struct list_head list; +struct fcoe_ctlr { + enum fip_state state; struct fc_lport *lp; - struct net_device *real_dev; - struct net_device *phys_dev; /* device with ethtool_ops */ - struct packet_type fcoe_packet_type; - struct sk_buff_head fcoe_pending_queue; - u8 fcoe_pending_queue_active; - + struct fcoe_fcf *sel_fcf; + struct list_head fcfs; + u16 fcf_count; + unsigned long sol_time; + unsigned long sel_time; + unsigned long port_ka_time; + unsigned long ctlr_ka_time; + struct timer_list timer; + struct work_struct link_work; + struct work_struct recv_work; + struct sk_buff_head fip_recv_list; + u16 user_mfs; + u16 flogi_oxid; + u8 flogi_count; + u8 link; + u8 last_link; + u8 map_dest; u8 dest_addr[ETH_ALEN]; u8 ctl_src_addr[ETH_ALEN]; u8 data_src_addr[ETH_ALEN]; - /* - * fcoe protocol address learning related stuff - */ - u16 flogi_oxid; - u8 flogi_progress; - u8 address_mode; -}; - -static inline struct net_device *fcoe_netdev( - const struct fc_lport *lp) -{ - return ((struct fcoe_softc *)lport_priv(lp))->real_dev; -} - -static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb) -{ - return (struct fcoe_hdr *)skb_network_header(skb); -} - -static inline int skb_fcoe_offset(const struct sk_buff *skb) -{ - return skb_network_offset(skb); -} - -static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb) -{ - return (struct fc_frame_header *)skb_transport_header(skb); -} - -static inline int skb_fc_offset(const struct sk_buff *skb) -{ - return skb_transport_offset(skb); -} -static inline void skb_reset_fc_header(struct sk_buff *skb) -{ - skb_reset_network_header(skb); - skb_set_transport_header(skb, skb_network_offset(skb) + - sizeof(struct fcoe_hdr)); -} - -static inline bool skb_fc_is_data(const struct sk_buff *skb) -{ - return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA; -} - -static inline bool skb_fc_is_cmd(const struct sk_buff *skb) -{ - return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD; -} + void (*send)(struct fcoe_ctlr *, struct sk_buff *); + void (*update_mac)(struct fcoe_ctlr *, u8 *old, u8 *new); + spinlock_t lock; +}; -static inline bool skb_fc_has_exthdr(const struct sk_buff *skb) -{ - return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) || - (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) || - (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH); -} +/* + * struct fcoe_fcf - Fibre-Channel Forwarder. + * @list: list linkage. + * @time: system time (jiffies) when an advertisement was last received. + * @switch_name: WWN of switch from advertisement. + * @fabric_name: WWN of fabric from advertisement. + * @fc_map: FC_MAP value from advertisement. + * @fcf_mac: Ethernet address of the FCF. + * @vfid: virtual fabric ID. + * @pri: seletion priority, smaller values are better. + * @flags: flags received from advertisement. + * @fka_period: keep-alive period, in jiffies. + * + * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that + * passes FCoE frames on to an FC fabric. This structure represents + * one FCF from which advertisements have been received. + * + * When looking up an FCF, @switch_name, @fabric_name, @fc_map, @vfid, and + * @fcf_mac together form the lookup key. + */ +struct fcoe_fcf { + struct list_head list; + unsigned long time; -static inline bool skb_fc_is_roff(const struct sk_buff *skb) -{ - return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF; -} + u64 switch_name; + u64 fabric_name; + u32 fc_map; + u16 vfid; + u8 fcf_mac[ETH_ALEN]; -static inline u16 skb_fc_oxid(const struct sk_buff *skb) -{ - return be16_to_cpu(skb_fc_header(skb)->fh_ox_id); -} + u8 pri; + u16 flags; + u32 fka_period; +}; -static inline u16 skb_fc_rxid(const struct sk_buff *skb) -{ - return be16_to_cpu(skb_fc_header(skb)->fh_rx_id); -} +/* FIP API functions */ +void fcoe_ctlr_init(struct fcoe_ctlr *); +void fcoe_ctlr_destroy(struct fcoe_ctlr *); +void fcoe_ctlr_link_up(struct fcoe_ctlr *); +int fcoe_ctlr_link_down(struct fcoe_ctlr *); +int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct sk_buff *); +void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *); +int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_frame *fp, u8 *sa); /* libfcoe funcs */ -int fcoe_reset(struct Scsi_Host *shost); -u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], - unsigned int scheme, unsigned int port); - -u32 fcoe_fc_crc(struct fc_frame *fp); -int fcoe_xmit(struct fc_lport *, struct fc_frame *); -int fcoe_rcv(struct sk_buff *, struct net_device *, - struct packet_type *, struct net_device *); - -int fcoe_percpu_receive_thread(void *arg); -void fcoe_clean_pending_queue(struct fc_lport *lp); -void fcoe_percpu_clean(struct fc_lport *lp); -void fcoe_watchdog(ulong vp); -int fcoe_link_ok(struct fc_lport *lp); - -struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); -int fcoe_hostlist_add(const struct fc_lport *); -int fcoe_hostlist_remove(const struct fc_lport *); - -struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int); +u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); -/* fcoe sw hba */ -int __init fcoe_sw_init(void); -int __exit fcoe_sw_exit(void); #endif /* _LIBFCOE_H */ diff --git a/include/trace/block.h b/include/trace/block.h index 25c6a1fd5b7..25b7068b819 100644 --- a/include/trace/block.h +++ b/include/trace/block.h @@ -5,72 +5,72 @@ #include <linux/tracepoint.h> DECLARE_TRACE(block_rq_abort, - TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); + TP_PROTO(struct request_queue *q, struct request *rq), + TP_ARGS(q, rq)); DECLARE_TRACE(block_rq_insert, - TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); + TP_PROTO(struct request_queue *q, struct request *rq), + TP_ARGS(q, rq)); DECLARE_TRACE(block_rq_issue, - TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); + TP_PROTO(struct request_queue *q, struct request *rq), + TP_ARGS(q, rq)); DECLARE_TRACE(block_rq_requeue, - TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); + TP_PROTO(struct request_queue *q, struct request *rq), + TP_ARGS(q, rq)); DECLARE_TRACE(block_rq_complete, - TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); + TP_PROTO(struct request_queue *q, struct request *rq), + TP_ARGS(q, rq)); DECLARE_TRACE(block_bio_bounce, - TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); + TP_PROTO(struct request_queue *q, struct bio *bio), + TP_ARGS(q, bio)); DECLARE_TRACE(block_bio_complete, - TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); + TP_PROTO(struct request_queue *q, struct bio *bio), + TP_ARGS(q, bio)); DECLARE_TRACE(block_bio_backmerge, - TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); + TP_PROTO(struct request_queue *q, struct bio *bio), + TP_ARGS(q, bio)); DECLARE_TRACE(block_bio_frontmerge, - TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); + TP_PROTO(struct request_queue *q, struct bio *bio), + TP_ARGS(q, bio)); DECLARE_TRACE(block_bio_queue, - TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); + TP_PROTO(struct request_queue *q, struct bio *bio), + TP_ARGS(q, bio)); DECLARE_TRACE(block_getrq, - TPPROTO(struct request_queue *q, struct bio *bio, int rw), - TPARGS(q, bio, rw)); + TP_PROTO(struct request_queue *q, struct bio *bio, int rw), + TP_ARGS(q, bio, rw)); DECLARE_TRACE(block_sleeprq, - TPPROTO(struct request_queue *q, struct bio *bio, int rw), - TPARGS(q, bio, rw)); + TP_PROTO(struct request_queue *q, struct bio *bio, int rw), + TP_ARGS(q, bio, rw)); DECLARE_TRACE(block_plug, - TPPROTO(struct request_queue *q), - TPARGS(q)); + TP_PROTO(struct request_queue *q), + TP_ARGS(q)); DECLARE_TRACE(block_unplug_timer, - TPPROTO(struct request_queue *q), - TPARGS(q)); + TP_PROTO(struct request_queue *q), + TP_ARGS(q)); DECLARE_TRACE(block_unplug_io, - TPPROTO(struct request_queue *q), - TPARGS(q)); + TP_PROTO(struct request_queue *q), + TP_ARGS(q)); DECLARE_TRACE(block_split, - TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), - TPARGS(q, bio, pdu)); + TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), + TP_ARGS(q, bio, pdu)); DECLARE_TRACE(block_remap, - TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, - sector_t from, sector_t to), - TPARGS(q, bio, dev, from, to)); + TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, + sector_t from, sector_t to), + TP_ARGS(q, bio, dev, from, to)); #endif diff --git a/include/trace/irq.h b/include/trace/irq.h new file mode 100644 index 00000000000..ff5d4495dc3 --- /dev/null +++ b/include/trace/irq.h @@ -0,0 +1,9 @@ +#ifndef _TRACE_IRQ_H +#define _TRACE_IRQ_H + +#include <linux/interrupt.h> +#include <linux/tracepoint.h> + +#include <trace/irq_event_types.h> + +#endif diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h new file mode 100644 index 00000000000..85964ebd47e --- /dev/null +++ b/include/trace/irq_event_types.h @@ -0,0 +1,55 @@ + +/* use <trace/irq.h> instead */ +#ifndef TRACE_FORMAT +# error Do not include this file directly. +# error Unless you know what you are doing. +#endif + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM irq + +/* + * Tracepoint for entry of interrupt handler: + */ +TRACE_FORMAT(irq_handler_entry, + TP_PROTO(int irq, struct irqaction *action), + TP_ARGS(irq, action), + TP_FMT("irq=%d handler=%s", irq, action->name) + ); + +/* + * Tracepoint for return of an interrupt handler: + */ +TRACE_EVENT(irq_handler_exit, + + TP_PROTO(int irq, struct irqaction *action, int ret), + + TP_ARGS(irq, action, ret), + + TP_STRUCT__entry( + __field( int, irq ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->irq = irq; + __entry->ret = ret; + ), + + TP_printk("irq=%d return=%s", + __entry->irq, __entry->ret ? "handled" : "unhandled") +); + +TRACE_FORMAT(softirq_entry, + TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_ARGS(h, vec), + TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) + ); + +TRACE_FORMAT(softirq_exit, + TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_ARGS(h, vec), + TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) + ); + +#undef TRACE_SYSTEM diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h new file mode 100644 index 00000000000..28ee69f9cd4 --- /dev/null +++ b/include/trace/kmemtrace.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2008 Eduard - Gabriel Munteanu + * + * This file is released under GPL version 2. + */ + +#ifndef _LINUX_KMEMTRACE_H +#define _LINUX_KMEMTRACE_H + +#ifdef __KERNEL__ + +#include <linux/tracepoint.h> +#include <linux/types.h> + +#ifdef CONFIG_KMEMTRACE +extern void kmemtrace_init(void); +#else +static inline void kmemtrace_init(void) +{ +} +#endif + +DECLARE_TRACE(kmalloc, + TP_PROTO(unsigned long call_site, + const void *ptr, + size_t bytes_req, + size_t bytes_alloc, + gfp_t gfp_flags), + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); +DECLARE_TRACE(kmem_cache_alloc, + TP_PROTO(unsigned long call_site, + const void *ptr, + size_t bytes_req, + size_t bytes_alloc, + gfp_t gfp_flags), + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); +DECLARE_TRACE(kmalloc_node, + TP_PROTO(unsigned long call_site, + const void *ptr, + size_t bytes_req, + size_t bytes_alloc, + gfp_t gfp_flags, + int node), + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); +DECLARE_TRACE(kmem_cache_alloc_node, + TP_PROTO(unsigned long call_site, + const void *ptr, + size_t bytes_req, + size_t bytes_alloc, + gfp_t gfp_flags, + int node), + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); +DECLARE_TRACE(kfree, + TP_PROTO(unsigned long call_site, const void *ptr), + TP_ARGS(call_site, ptr)); +DECLARE_TRACE(kmem_cache_free, + TP_PROTO(unsigned long call_site, const void *ptr), + TP_ARGS(call_site, ptr)); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_KMEMTRACE_H */ + diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h new file mode 100644 index 00000000000..5ca67df87f2 --- /dev/null +++ b/include/trace/lockdep.h @@ -0,0 +1,9 @@ +#ifndef _TRACE_LOCKDEP_H +#define _TRACE_LOCKDEP_H + +#include <linux/lockdep.h> +#include <linux/tracepoint.h> + +#include <trace/lockdep_event_types.h> + +#endif diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h new file mode 100644 index 00000000000..adccfcd2ec8 --- /dev/null +++ b/include/trace/lockdep_event_types.h @@ -0,0 +1,44 @@ + +#ifndef TRACE_FORMAT +# error Do not include this file directly. +# error Unless you know what you are doing. +#endif + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM lock + +#ifdef CONFIG_LOCKDEP + +TRACE_FORMAT(lock_acquire, + TP_PROTO(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lockdep_map *next_lock, unsigned long ip), + TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), + TP_FMT("%s%s%s", trylock ? "try " : "", + read ? "read " : "", lock->name) + ); + +TRACE_FORMAT(lock_release, + TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), + TP_ARGS(lock, nested, ip), + TP_FMT("%s", lock->name) + ); + +#ifdef CONFIG_LOCK_STAT + +TRACE_FORMAT(lock_contended, + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + TP_ARGS(lock, ip), + TP_FMT("%s", lock->name) + ); + +TRACE_FORMAT(lock_acquired, + TP_PROTO(struct lockdep_map *lock, unsigned long ip), + TP_ARGS(lock, ip), + TP_FMT("%s", lock->name) + ); + +#endif +#endif + +#undef TRACE_SYSTEM diff --git a/include/trace/power.h b/include/trace/power.h new file mode 100644 index 00000000000..ef204666e98 --- /dev/null +++ b/include/trace/power.h @@ -0,0 +1,32 @@ +#ifndef _TRACE_POWER_H +#define _TRACE_POWER_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +enum { + POWER_NONE = 0, + POWER_CSTATE = 1, + POWER_PSTATE = 2, +}; + +struct power_trace { + ktime_t stamp; + ktime_t end; + int type; + int state; +}; + +DECLARE_TRACE(power_start, + TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state), + TP_ARGS(it, type, state)); + +DECLARE_TRACE(power_mark, + TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state), + TP_ARGS(it, type, state)); + +DECLARE_TRACE(power_end, + TP_PROTO(struct power_trace *it), + TP_ARGS(it)); + +#endif /* _TRACE_POWER_H */ diff --git a/include/trace/sched.h b/include/trace/sched.h index 0d81098ee9f..4e372a1a29b 100644 --- a/include/trace/sched.h +++ b/include/trace/sched.h @@ -4,53 +4,6 @@ #include <linux/sched.h> #include <linux/tracepoint.h> -DECLARE_TRACE(sched_kthread_stop, - TPPROTO(struct task_struct *t), - TPARGS(t)); - -DECLARE_TRACE(sched_kthread_stop_ret, - TPPROTO(int ret), - TPARGS(ret)); - -DECLARE_TRACE(sched_wait_task, - TPPROTO(struct rq *rq, struct task_struct *p), - TPARGS(rq, p)); - -DECLARE_TRACE(sched_wakeup, - TPPROTO(struct rq *rq, struct task_struct *p, int success), - TPARGS(rq, p, success)); - -DECLARE_TRACE(sched_wakeup_new, - TPPROTO(struct rq *rq, struct task_struct *p, int success), - TPARGS(rq, p, success)); - -DECLARE_TRACE(sched_switch, - TPPROTO(struct rq *rq, struct task_struct *prev, - struct task_struct *next), - TPARGS(rq, prev, next)); - -DECLARE_TRACE(sched_migrate_task, - TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu), - TPARGS(p, orig_cpu, dest_cpu)); - -DECLARE_TRACE(sched_process_free, - TPPROTO(struct task_struct *p), - TPARGS(p)); - -DECLARE_TRACE(sched_process_exit, - TPPROTO(struct task_struct *p), - TPARGS(p)); - -DECLARE_TRACE(sched_process_wait, - TPPROTO(struct pid *pid), - TPARGS(pid)); - -DECLARE_TRACE(sched_process_fork, - TPPROTO(struct task_struct *parent, struct task_struct *child), - TPARGS(parent, child)); - -DECLARE_TRACE(sched_signal_send, - TPPROTO(int sig, struct task_struct *p), - TPARGS(sig, p)); +#include <trace/sched_event_types.h> #endif diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h new file mode 100644 index 00000000000..63547dc1125 --- /dev/null +++ b/include/trace/sched_event_types.h @@ -0,0 +1,337 @@ + +/* use <trace/sched.h> instead */ +#ifndef TRACE_EVENT +# error Do not include this file directly. +# error Unless you know what you are doing. +#endif + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sched + +/* + * Tracepoint for calling kthread_stop, performed to end a kthread: + */ +TRACE_EVENT(sched_kthread_stop, + + TP_PROTO(struct task_struct *t), + + TP_ARGS(t), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + ), + + TP_fast_assign( + memcpy(__entry->comm, t->comm, TASK_COMM_LEN); + __entry->pid = t->pid; + ), + + TP_printk("task %s:%d", __entry->comm, __entry->pid) +); + +/* + * Tracepoint for the return value of the kthread stopping: + */ +TRACE_EVENT(sched_kthread_stop_ret, + + TP_PROTO(int ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( int, ret ) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("ret %d", __entry->ret) +); + +/* + * Tracepoint for waiting on task to unschedule: + * + * (NOTE: the 'rq' argument is not used by generic trace events, + * but used by the latency tracer plugin. ) + */ +TRACE_EVENT(sched_wait_task, + + TP_PROTO(struct rq *rq, struct task_struct *p), + + TP_ARGS(rq, p), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + ), + + TP_printk("task %s:%d [%d]", + __entry->comm, __entry->pid, __entry->prio) +); + +/* + * Tracepoint for waking up a task: + * + * (NOTE: the 'rq' argument is not used by generic trace events, + * but used by the latency tracer plugin. ) + */ +TRACE_EVENT(sched_wakeup, + + TP_PROTO(struct rq *rq, struct task_struct *p, int success), + + TP_ARGS(rq, p, success), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + __field( int, success ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + __entry->success = success; + ), + + TP_printk("task %s:%d [%d] success=%d", + __entry->comm, __entry->pid, __entry->prio, + __entry->success) +); + +/* + * Tracepoint for waking up a new task: + * + * (NOTE: the 'rq' argument is not used by generic trace events, + * but used by the latency tracer plugin. ) + */ +TRACE_EVENT(sched_wakeup_new, + + TP_PROTO(struct rq *rq, struct task_struct *p, int success), + + TP_ARGS(rq, p, success), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + __field( int, success ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + __entry->success = success; + ), + + TP_printk("task %s:%d [%d] success=%d", + __entry->comm, __entry->pid, __entry->prio, + __entry->success) +); + +/* + * Tracepoint for task switches, performed by the scheduler: + * + * (NOTE: the 'rq' argument is not used by generic trace events, + * but used by the latency tracer plugin. ) + */ +TRACE_EVENT(sched_switch, + + TP_PROTO(struct rq *rq, struct task_struct *prev, + struct task_struct *next), + + TP_ARGS(rq, prev, next), + + TP_STRUCT__entry( + __array( char, prev_comm, TASK_COMM_LEN ) + __field( pid_t, prev_pid ) + __field( int, prev_prio ) + __array( char, next_comm, TASK_COMM_LEN ) + __field( pid_t, next_pid ) + __field( int, next_prio ) + ), + + TP_fast_assign( + memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); + __entry->prev_pid = prev->pid; + __entry->prev_prio = prev->prio; + memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); + __entry->next_pid = next->pid; + __entry->next_prio = next->prio; + ), + + TP_printk("task %s:%d [%d] ==> %s:%d [%d]", + __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, + __entry->next_comm, __entry->next_pid, __entry->next_prio) +); + +/* + * Tracepoint for a task being migrated: + */ +TRACE_EVENT(sched_migrate_task, + + TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu), + + TP_ARGS(p, orig_cpu, dest_cpu), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + __field( int, orig_cpu ) + __field( int, dest_cpu ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + __entry->orig_cpu = orig_cpu; + __entry->dest_cpu = dest_cpu; + ), + + TP_printk("task %s:%d [%d] from: %d to: %d", + __entry->comm, __entry->pid, __entry->prio, + __entry->orig_cpu, __entry->dest_cpu) +); + +/* + * Tracepoint for freeing a task: + */ +TRACE_EVENT(sched_process_free, + + TP_PROTO(struct task_struct *p), + + TP_ARGS(p), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + ), + + TP_printk("task %s:%d [%d]", + __entry->comm, __entry->pid, __entry->prio) +); + +/* + * Tracepoint for a task exiting: + */ +TRACE_EVENT(sched_process_exit, + + TP_PROTO(struct task_struct *p), + + TP_ARGS(p), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + ), + + TP_printk("task %s:%d [%d]", + __entry->comm, __entry->pid, __entry->prio) +); + +/* + * Tracepoint for a waiting task: + */ +TRACE_EVENT(sched_process_wait, + + TP_PROTO(struct pid *pid), + + TP_ARGS(pid), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + ), + + TP_fast_assign( + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + __entry->pid = pid_nr(pid); + __entry->prio = current->prio; + ), + + TP_printk("task %s:%d [%d]", + __entry->comm, __entry->pid, __entry->prio) +); + +/* + * Tracepoint for do_fork: + */ +TRACE_EVENT(sched_process_fork, + + TP_PROTO(struct task_struct *parent, struct task_struct *child), + + TP_ARGS(parent, child), + + TP_STRUCT__entry( + __array( char, parent_comm, TASK_COMM_LEN ) + __field( pid_t, parent_pid ) + __array( char, child_comm, TASK_COMM_LEN ) + __field( pid_t, child_pid ) + ), + + TP_fast_assign( + memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); + __entry->parent_pid = parent->pid; + memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); + __entry->child_pid = child->pid; + ), + + TP_printk("parent %s:%d child %s:%d", + __entry->parent_comm, __entry->parent_pid, + __entry->child_comm, __entry->child_pid) +); + +/* + * Tracepoint for sending a signal: + */ +TRACE_EVENT(sched_signal_send, + + TP_PROTO(int sig, struct task_struct *p), + + TP_ARGS(sig, p), + + TP_STRUCT__entry( + __field( int, sig ) + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->sig = sig; + ), + + TP_printk("sig: %d task %s:%d", + __entry->sig, __entry->comm, __entry->pid) +); + +#undef TRACE_SYSTEM diff --git a/include/trace/skb.h b/include/trace/skb.h index a96610f92f6..b66206d9be7 100644 --- a/include/trace/skb.h +++ b/include/trace/skb.h @@ -5,7 +5,7 @@ #include <linux/tracepoint.h> DECLARE_TRACE(kfree_skb, - TPPROTO(struct sk_buff *skb, void *location), - TPARGS(skb, location)); + TP_PROTO(struct sk_buff *skb, void *location), + TP_ARGS(skb, location)); #endif diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h new file mode 100644 index 00000000000..df56f5694be --- /dev/null +++ b/include/trace/trace_event_types.h @@ -0,0 +1,5 @@ +/* trace/<type>_event_types.h here */ + +#include <trace/sched_event_types.h> +#include <trace/irq_event_types.h> +#include <trace/lockdep_event_types.h> diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h new file mode 100644 index 00000000000..fd13750ca4b --- /dev/null +++ b/include/trace/trace_events.h @@ -0,0 +1,5 @@ +/* trace/<type>.h here */ + +#include <trace/sched.h> +#include <trace/irq.h> +#include <trace/lockdep.h> diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h new file mode 100644 index 00000000000..7626523deeb --- /dev/null +++ b/include/trace/workqueue.h @@ -0,0 +1,25 @@ +#ifndef __TRACE_WORKQUEUE_H +#define __TRACE_WORKQUEUE_H + +#include <linux/tracepoint.h> +#include <linux/workqueue.h> +#include <linux/sched.h> + +DECLARE_TRACE(workqueue_insertion, + TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), + TP_ARGS(wq_thread, work)); + +DECLARE_TRACE(workqueue_execution, + TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), + TP_ARGS(wq_thread, work)); + +/* Trace the creation of one workqueue thread on a cpu */ +DECLARE_TRACE(workqueue_creation, + TP_PROTO(struct task_struct *wq_thread, int cpu), + TP_ARGS(wq_thread, cpu)); + +DECLARE_TRACE(workqueue_destruction, + TP_PROTO(struct task_struct *wq_thread), + TP_ARGS(wq_thread)); + +#endif /* __TRACE_WORKQUEUE_H */ diff --git a/include/video/tdfx.h b/include/video/tdfx.h index 7431d9681e5..befbaf0a92d 100644 --- a/include/video/tdfx.h +++ b/include/video/tdfx.h @@ -1,6 +1,9 @@ #ifndef _TDFX_H #define _TDFX_H +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> + /* membase0 register offsets */ #define STATUS 0x00 #define PCIINIT0 0x04 @@ -123,6 +126,18 @@ #define VIDCFG_PIXFMT_SHIFT 18 #define DACMODE_2X BIT(0) +/* I2C bit locations in the VIDSERPARPORT register */ +#define DDC_ENAB 0x00040000 +#define DDC_SCL_OUT 0x00080000 +#define DDC_SDA_OUT 0x00100000 +#define DDC_SCL_IN 0x00200000 +#define DDC_SDA_IN 0x00400000 +#define I2C_ENAB 0x00800000 +#define I2C_SCL_OUT 0x01000000 +#define I2C_SDA_OUT 0x02000000 +#define I2C_SCL_IN 0x04000000 +#define I2C_SDA_IN 0x08000000 + /* VGA rubbish, need to change this for multihead support */ #define MISC_W 0x3c2 #define MISC_R 0x3cc @@ -168,12 +183,23 @@ struct banshee_reg { unsigned long miscinit0; }; +struct tdfx_par; + +struct tdfxfb_i2c_chan { + struct tdfx_par *par; + struct i2c_adapter adapter; + struct i2c_algo_bit_data algo; +}; + struct tdfx_par { u32 max_pixclock; u32 palette[16]; void __iomem *regbase_virt; unsigned long iobase; int mtrr_handle; +#ifdef CONFIG_FB_3DFX_I2C + struct tdfxfb_i2c_chan chan[2]; +#endif }; #endif /* __KERNEL__ */ |