diff options
Diffstat (limited to 'drivers')
251 files changed, 17411 insertions, 11337 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index bba867391a8..735f5ea1747 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -336,6 +336,15 @@ config ACPI_EC the battery and thermal drivers. If you are compiling for a mobile system, say Y. +config ACPI_PCI_SLOT + tristate "PCI slot detection driver" + default n + help + This driver will attempt to discover all PCI slots in your system, + and creates entries in /sys/bus/pci/slots/. This feature can + help you correlate PCI bus addresses with the physical geography + of your slots. If you are unsure, say N. + config ACPI_POWER bool default y diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 40b0fcae4c7..52a4cd4b81d 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -21,7 +21,7 @@ obj-$(CONFIG_X86) += blacklist.o # # ACPI Core Subsystem (Interpreter) # -obj-y += osl.o utils.o \ +obj-y += osl.o utils.o reboot.o\ dispatcher/ events/ executer/ hardware/ \ namespace/ parser/ resources/ tables/ \ utilities/ @@ -48,6 +48,7 @@ obj-$(CONFIG_ACPI_DOCK) += dock.o obj-$(CONFIG_ACPI_BAY) += bay.o obj-$(CONFIG_ACPI_VIDEO) += video.o obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o +obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_POWER) += power.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI_CONTAINER) += container.o diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c index 61b6c5beb2d..e6caf5d42e0 100644 --- a/drivers/acpi/bay.c +++ b/drivers/acpi/bay.c @@ -380,6 +380,9 @@ static int __init bay_init(void) if (acpi_disabled) return -ENODEV; + if (acpi_disabled) + return -ENODEV; + /* look for dockable drive bays */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_bay, &bays, NULL); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index a6dbcf4d9ef..ccae305ee55 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -295,6 +295,28 @@ int acpi_bus_set_power(acpi_handle handle, int state) EXPORT_SYMBOL(acpi_bus_set_power); +bool acpi_bus_power_manageable(acpi_handle handle) +{ + struct acpi_device *device; + int result; + + result = acpi_bus_get_device(handle, &device); + return result ? false : device->flags.power_manageable; +} + +EXPORT_SYMBOL(acpi_bus_power_manageable); + +bool acpi_bus_can_wakeup(acpi_handle handle) +{ + struct acpi_device *device; + int result; + + result = acpi_bus_get_device(handle, &device); + return result ? false : device->wakeup.flags.valid; +} + +EXPORT_SYMBOL(acpi_bus_can_wakeup); + /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ @@ -612,7 +634,7 @@ static int __init acpi_bus_init_irq(void) return 0; } -acpi_native_uint acpi_gbl_permanent_mmap; +u8 acpi_gbl_permanent_mmap; void __init acpi_early_init(void) diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c index 610b1ee102b..949f7c75029 100644 --- a/drivers/acpi/dispatcher/dsinit.c +++ b/drivers/acpi/dispatcher/dsinit.c @@ -151,7 +151,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ******************************************************************************/ acpi_status -acpi_ds_initialize_objects(acpi_native_uint table_index, +acpi_ds_initialize_objects(u32 table_index, struct acpi_namespace_node * start_node) { acpi_status status; diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c index 2509809a36c..4613b9ca579 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/dispatcher/dsmethod.c @@ -377,7 +377,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, } info->parameters = &this_walk_state->operands[0]; - info->parameter_type = ACPI_PARAM_ARGS; status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, obj_desc->method.aml_start, diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c index a818e0ddb99..6a81c4400ed 100644 --- a/drivers/acpi/dispatcher/dsopcode.c +++ b/drivers/acpi/dispatcher/dsopcode.c @@ -691,12 +691,6 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state, status = acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS, walk_state); - - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - walk_state->num_operands, - "after AcpiExResolveOperands"); - if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)", acpi_ps_get_opcode_name(op->common.aml_opcode), @@ -785,10 +779,6 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - 1, "after AcpiExResolveOperands"); - obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { return_ACPI_STATUS(AE_NOT_EXIST); @@ -848,7 +838,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, union acpi_operand_object **operand; struct acpi_namespace_node *node; union acpi_parse_object *next_op; - acpi_native_uint table_index; + u32 table_index; struct acpi_table_header *table; ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op); @@ -882,10 +872,6 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - 1, "after AcpiExResolveOperands"); - operand = &walk_state->operands[0]; /* Find the ACPI table */ @@ -1091,10 +1077,8 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - 1, "after AcpiExResolveOperands"); - + ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, + acpi_ps_get_opcode_name(op->common.aml_opcode), 1); /* * Get the bank_value operand and save it * (at Top of stack) diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c index b246b9657ea..b5072fa9c92 100644 --- a/drivers/acpi/dispatcher/dswexec.c +++ b/drivers/acpi/dispatcher/dswexec.c @@ -408,14 +408,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) [walk_state-> num_operands - 1]), walk_state); - if (ACPI_SUCCESS(status)) { - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, - ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name - (walk_state->opcode), - walk_state->num_operands, - "after ExResolveOperands"); - } } if (ACPI_SUCCESS(status)) { diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c index 1386ced332e..b00d4af791a 100644 --- a/drivers/acpi/dispatcher/dswstate.c +++ b/drivers/acpi/dispatcher/dswstate.c @@ -70,7 +70,7 @@ acpi_status acpi_ds_result_pop(union acpi_operand_object **object, struct acpi_walk_state *walk_state) { - acpi_native_uint index; + u32 index; union acpi_generic_state *state; acpi_status status; @@ -122,7 +122,7 @@ acpi_ds_result_pop(union acpi_operand_object **object, ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object, acpi_ut_get_object_type_name(*object), - (u32) index, walk_state, walk_state->result_count)); + index, walk_state, walk_state->result_count)); return (AE_OK); } @@ -146,7 +146,7 @@ acpi_ds_result_push(union acpi_operand_object * object, { union acpi_generic_state *state; acpi_status status; - acpi_native_uint index; + u32 index; ACPI_FUNCTION_NAME(ds_result_push); @@ -400,7 +400,7 @@ void acpi_ds_obj_stack_pop_and_delete(u32 pop_count, struct acpi_walk_state *walk_state) { - acpi_native_int i; + s32 i; union acpi_operand_object *obj_desc; ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete); @@ -409,7 +409,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count, return; } - for (i = (acpi_native_int) (pop_count - 1); i >= 0; i--) { + for (i = (s32) pop_count - 1; i >= 0; i--) { if (walk_state->num_operands == 0) { return; } @@ -615,14 +615,8 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, walk_state->pass_number = pass_number; if (info) { - if (info->parameter_type == ACPI_PARAM_GPE) { - walk_state->gpe_event_info = - ACPI_CAST_PTR(struct acpi_gpe_event_info, - info->parameters); - } else { - walk_state->params = info->parameters; - walk_state->caller_return_desc = &info->return_object; - } + walk_state->params = info->parameters; + walk_state->caller_return_desc = &info->return_object; } status = acpi_ps_init_scope(&walk_state->parser_state, op); diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index bb7c51f712b..1e872e79db3 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -920,6 +920,9 @@ static int __init dock_init(void) if (acpi_disabled) return 0; + if (acpi_disabled) + return 0; + /* look for a dock station */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_dock, &num, NULL); diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c index 5d30e5be1b1..c56c5c6ea77 100644 --- a/drivers/acpi/events/evevent.c +++ b/drivers/acpi/events/evevent.c @@ -188,7 +188,7 @@ acpi_status acpi_ev_install_xrupt_handlers(void) static acpi_status acpi_ev_fixed_event_initialize(void) { - acpi_native_uint i; + u32 i; acpi_status status; /* @@ -231,7 +231,7 @@ u32 acpi_ev_fixed_event_detect(void) u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u32 fixed_status; u32 fixed_enable; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_NAME(ev_fixed_event_detect); @@ -260,7 +260,7 @@ u32 acpi_ev_fixed_event_detect(void) /* Found an active (signalled) event */ acpi_os_fixed_event_count(i); - int_status |= acpi_ev_fixed_event_dispatch((u32) i); + int_status |= acpi_ev_fixed_event_dispatch(i); } } diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c index 5354be44f87..c5e53aae86f 100644 --- a/drivers/acpi/events/evgpe.c +++ b/drivers/acpi/events/evgpe.c @@ -256,7 +256,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) return_ACPI_STATUS(status); } - /* Mark wake-disabled or HW disable, or both */ + /* Clear the appropriate enabled flags for this GPE */ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { case ACPI_GPE_TYPE_WAKE: @@ -273,13 +273,23 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) /* Disable the requested runtime GPE */ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); - - /* fallthrough */ + break; default: - acpi_hw_write_gpe_enable_reg(gpe_event_info); + break; } + /* + * Even if we don't know the GPE type, make sure that we always + * disable it. low_disable_gpe will just clear the enable bit for this + * GPE and write it. It will not write out the current GPE enable mask, + * since this may inadvertently enable GPEs too early, if a rogue GPE has + * come in during ACPICA initialization - possibly as a result of AML or + * other code that has enabled the GPE. + */ + status = acpi_hw_low_disable_gpe(gpe_event_info); + return_ACPI_STATUS(status); + return_ACPI_STATUS(AE_OK); } @@ -305,7 +315,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, { union acpi_operand_object *obj_desc; struct acpi_gpe_block_info *gpe_block; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_ENTRY(); @@ -379,8 +389,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) u32 status_reg; u32 enable_reg; acpi_cpu_flags flags; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; ACPI_FUNCTION_NAME(ev_gpe_detect); @@ -462,13 +472,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) */ int_status |= acpi_ev_gpe_dispatch(&gpe_block-> - event_info[(i * - ACPI_GPE_REGISTER_WIDTH) - + - j], - (u32) j + - gpe_register_info-> - base_gpe_number); + event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } } @@ -555,10 +559,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) */ info->prefix_node = local_gpe_event_info.dispatch.method_node; - info->parameters = - ACPI_CAST_PTR(union acpi_operand_object *, - gpe_event_info); - info->parameter_type = ACPI_PARAM_GPE; info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info); diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c index e6c4d4c49e7..73c058e2f5c 100644 --- a/drivers/acpi/events/evgpeblk.c +++ b/drivers/acpi/events/evgpeblk.c @@ -189,8 +189,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block) { struct acpi_gpe_event_info *gpe_event_info; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers); @@ -203,7 +203,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { gpe_event_info = &gpe_block-> - event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; + event_info[((acpi_size) i * + ACPI_GPE_REGISTER_WIDTH) + j]; if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { @@ -744,8 +745,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) struct acpi_gpe_event_info *gpe_event_info = NULL; struct acpi_gpe_event_info *this_event; struct acpi_gpe_register_info *this_register; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; acpi_status status; ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); @@ -983,8 +984,8 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, struct acpi_gpe_walk_info gpe_info; u32 wake_gpe_count; u32 gpe_enabled_count; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); @@ -1033,7 +1034,8 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, gpe_event_info = &gpe_block-> - event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; + event_info[((acpi_size) i * + ACPI_GPE_REGISTER_WIDTH) + j]; if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_METHOD) diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c index 2113e58e222..1d5670be729 100644 --- a/drivers/acpi/events/evmisc.c +++ b/drivers/acpi/events/evmisc.c @@ -575,7 +575,7 @@ acpi_status acpi_ev_release_global_lock(void) void acpi_ev_terminate(void) { - acpi_native_uint i; + u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ev_terminate); @@ -589,7 +589,7 @@ void acpi_ev_terminate(void) /* Disable all fixed events */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { - status = acpi_disable_event((u32) i, 0); + status = acpi_disable_event(i, 0); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not disable fixed event %d", diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c index 1628f593475..236fbd1ca43 100644 --- a/drivers/acpi/events/evregion.c +++ b/drivers/acpi/events/evregion.c @@ -81,7 +81,7 @@ acpi_ev_install_handler(acpi_handle obj_handle, acpi_status acpi_ev_install_region_handlers(void) { acpi_status status; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ev_install_region_handlers); @@ -151,7 +151,7 @@ acpi_status acpi_ev_install_region_handlers(void) acpi_status acpi_ev_initialize_op_regions(void) { acpi_status status; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ev_initialize_op_regions); @@ -219,7 +219,6 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) info->prefix_node = region_obj2->extra.method_REG; info->pathname = NULL; info->parameters = args; - info->parameter_type = ACPI_PARAM_ARGS; info->flags = ACPI_IGNORE_RETURN_VALUE; /* diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c index 2e3d2c5e4f4..6b94b38df07 100644 --- a/drivers/acpi/events/evrgnini.c +++ b/drivers/acpi/events/evrgnini.c @@ -380,7 +380,7 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) acpi_status status; struct acpica_device_id hid; struct acpi_compatible_id_list *cid; - acpi_native_uint i; + u32 i; /* * Get the _HID and check for a PCI Root Bridge diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c index 99a7502e6a8..73bfd6bf962 100644 --- a/drivers/acpi/events/evxfevnt.c +++ b/drivers/acpi/events/evxfevnt.c @@ -472,7 +472,6 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) } ACPI_EXPORT_SYMBOL(acpi_clear_gpe) -#ifdef ACPI_FUTURE_USAGE /******************************************************************************* * * FUNCTION: acpi_get_event_status @@ -489,6 +488,7 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe) acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) { acpi_status status = AE_OK; + u32 value; ACPI_FUNCTION_TRACE(acpi_get_event_status); @@ -506,7 +506,20 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) status = acpi_get_register(acpi_gbl_fixed_event_info[event]. - status_register_id, event_status); + enable_register_id, &value); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + + *event_status = value; + + status = + acpi_get_register(acpi_gbl_fixed_event_info[event]. + status_register_id, &value); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + + if (value) + *event_status |= ACPI_EVENT_FLAG_SET; return_ACPI_STATUS(status); } @@ -566,7 +579,6 @@ acpi_get_gpe_status(acpi_handle gpe_device, } ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) -#endif /* ACPI_FUTURE_USAGE */ /******************************************************************************* * * FUNCTION: acpi_install_gpe_block diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c index 39d74219058..2a32c843cb4 100644 --- a/drivers/acpi/executer/exconfig.c +++ b/drivers/acpi/executer/exconfig.c @@ -53,7 +53,7 @@ ACPI_MODULE_NAME("exconfig") /* Local prototypes */ static acpi_status -acpi_ex_add_table(acpi_native_uint table_index, +acpi_ex_add_table(u32 table_index, struct acpi_namespace_node *parent_node, union acpi_operand_object **ddb_handle); @@ -73,7 +73,7 @@ acpi_ex_add_table(acpi_native_uint table_index, ******************************************************************************/ static acpi_status -acpi_ex_add_table(acpi_native_uint table_index, +acpi_ex_add_table(u32 table_index, struct acpi_namespace_node *parent_node, union acpi_operand_object **ddb_handle) { @@ -96,7 +96,8 @@ acpi_ex_add_table(acpi_native_uint table_index, /* Install the new table into the local data structures */ - obj_desc->reference.object = ACPI_CAST_PTR(void, table_index); + obj_desc->reference.object = ACPI_CAST_PTR(void, + (unsigned long)table_index); /* Add the table to the namespace */ @@ -128,12 +129,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, { acpi_status status; union acpi_operand_object **operand = &walk_state->operands[0]; - acpi_native_uint table_index; struct acpi_namespace_node *parent_node; struct acpi_namespace_node *start_node; struct acpi_namespace_node *parameter_node = NULL; union acpi_operand_object *ddb_handle; struct acpi_table_header *table; + u32 table_index; ACPI_FUNCTION_TRACE(ex_load_table_op); @@ -280,7 +281,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, { union acpi_operand_object *ddb_handle; struct acpi_table_desc table_desc; - acpi_native_uint table_index; + u32 table_index; acpi_status status; u32 length; @@ -437,7 +438,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) { acpi_status status = AE_OK; union acpi_operand_object *table_desc = ddb_handle; - acpi_native_uint table_index; + u32 table_index; struct acpi_table_header *table; ACPI_FUNCTION_TRACE(ex_unload_table); @@ -454,9 +455,9 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) return_ACPI_STATUS(AE_BAD_PARAMETER); } - /* Get the table index from the ddb_handle */ + /* Get the table index from the ddb_handle (acpi_size for 64-bit case) */ - table_index = (acpi_native_uint) table_desc->reference.object; + table_index = (u32) (acpi_size) table_desc->reference.object; /* Invoke table handler if present */ diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c index fd954b4ed83..261d97516d9 100644 --- a/drivers/acpi/executer/exconvrt.c +++ b/drivers/acpi/executer/exconvrt.c @@ -288,11 +288,11 @@ acpi_ex_convert_to_ascii(acpi_integer integer, u16 base, u8 * string, u8 data_width) { acpi_integer digit; - acpi_native_uint i; - acpi_native_uint j; - acpi_native_uint k = 0; - acpi_native_uint hex_length; - acpi_native_uint decimal_length; + u32 i; + u32 j; + u32 k = 0; + u32 hex_length; + u32 decimal_length; u32 remainder; u8 supress_zeros; @@ -348,7 +348,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer, /* hex_length: 2 ascii hex chars per data byte */ - hex_length = (acpi_native_uint) ACPI_MUL_2(data_width); + hex_length = ACPI_MUL_2(data_width); for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) { /* Get one hex digit, most significant digits first */ diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c index 60e62c4f057..ad09696d506 100644 --- a/drivers/acpi/executer/excreate.c +++ b/drivers/acpi/executer/excreate.c @@ -45,8 +45,6 @@ #include <acpi/acinterp.h> #include <acpi/amlcode.h> #include <acpi/acnamesp.h> -#include <acpi/acevents.h> -#include <acpi/actables.h> #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("excreate") diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c index 74f1b22601b..2be2e2bf95b 100644 --- a/drivers/acpi/executer/exdump.c +++ b/drivers/acpi/executer/exdump.c @@ -580,25 +580,22 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) case ACPI_TYPE_BUFFER: - acpi_os_printf("Buffer len %X @ %p\n", + acpi_os_printf("Buffer length %.2X @ %p\n", obj_desc->buffer.length, obj_desc->buffer.pointer); - length = obj_desc->buffer.length; - if (length > 64) { - length = 64; - } - /* Debug only -- dump the buffer contents */ if (obj_desc->buffer.pointer) { - acpi_os_printf("Buffer Contents: "); - - for (index = 0; index < length; index++) { - acpi_os_printf(" %02x", - obj_desc->buffer.pointer[index]); + length = obj_desc->buffer.length; + if (length > 128) { + length = 128; } - acpi_os_printf("\n"); + + acpi_os_printf + ("Buffer Contents: (displaying length 0x%.2X)\n", + length); + ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length); } break; @@ -756,54 +753,42 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) * * FUNCTION: acpi_ex_dump_operands * - * PARAMETERS: Operands - Operand list - * interpreter_mode - Load or Exec - * Ident - Identification - * num_levels - # of stack entries to dump above line - * Note - Output notation - * module_name - Caller's module name - * line_number - Caller's invocation line number + * PARAMETERS: Operands - A list of Operand objects + * opcode_name - AML opcode name + * num_operands - Operand count for this opcode * - * DESCRIPTION: Dump the object stack + * DESCRIPTION: Dump the operands associated with the opcode * ******************************************************************************/ void acpi_ex_dump_operands(union acpi_operand_object **operands, - acpi_interpreter_mode interpreter_mode, - char *ident, - u32 num_levels, - char *note, char *module_name, u32 line_number) + const char *opcode_name, u32 num_operands) { - acpi_native_uint i; - ACPI_FUNCTION_NAME(ex_dump_operands); - if (!ident) { - ident = "?"; - } - - if (!note) { - note = "?"; + if (!opcode_name) { + opcode_name = "UNKNOWN"; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "************* Operand Stack Contents (Opcode [%s], %d Operands)\n", - ident, num_levels)); + "**** Start operand dump for opcode [%s], %d operands\n", + opcode_name, num_operands)); - if (num_levels == 0) { - num_levels = 1; + if (num_operands == 0) { + num_operands = 1; } - /* Dump the operand stack starting at the top */ + /* Dump the individual operands */ - for (i = 0; num_levels > 0; i--, num_levels--) { - acpi_ex_dump_operand(operands[i], 0); + while (num_operands) { + acpi_ex_dump_operand(*operands, 0); + operands++; + num_operands--; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "************* Operand Stack dump from %s(%d), %s\n", - module_name, line_number, note)); + "**** End operand dump for [%s]\n", opcode_name)); return; } diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c index e336b5dc7a5..9ff9d1f4615 100644 --- a/drivers/acpi/executer/exfldio.c +++ b/drivers/acpi/executer/exfldio.c @@ -153,14 +153,15 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, /* * Slack mode only: We will go ahead and allow access to this * field if it is within the region length rounded up to the next - * access width boundary. + * access width boundary. acpi_size cast for 64-bit compile. */ if (ACPI_ROUND_UP(rgn_desc->region.length, obj_desc->common_field. access_byte_width) >= - (obj_desc->common_field.base_byte_offset + - (acpi_native_uint) obj_desc->common_field. - access_byte_width + field_datum_byte_offset)) { + ((acpi_size) obj_desc->common_field. + base_byte_offset + + obj_desc->common_field.access_byte_width + + field_datum_byte_offset)) { return_ACPI_STATUS(AE_OK); } } diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c index cc956a5b526..731414a581a 100644 --- a/drivers/acpi/executer/exmisc.c +++ b/drivers/acpi/executer/exmisc.c @@ -329,8 +329,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, /* Result of two Strings is a String */ - return_desc = acpi_ut_create_string_object((acpi_size) - (operand0->string. + return_desc = acpi_ut_create_string_object(((acpi_size) + operand0->string. length + local_operand1-> string.length)); @@ -352,8 +352,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, /* Result of two Buffers is a Buffer */ - return_desc = acpi_ut_create_buffer_object((acpi_size) - (operand0->buffer. + return_desc = acpi_ut_create_buffer_object(((acpi_size) + operand0->buffer. length + local_operand1-> buffer.length)); diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c index 3a2f8cd4c62..5d438c32989 100644 --- a/drivers/acpi/executer/exprep.c +++ b/drivers/acpi/executer/exprep.c @@ -503,11 +503,11 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) */ second_desc = obj_desc->common.next_object; second_desc->extra.aml_start = - ((union acpi_parse_object *)(info->data_register_node))-> - named.data; + ACPI_CAST_PTR(union acpi_parse_object, + info->data_register_node)->named.data; second_desc->extra.aml_length = - ((union acpi_parse_object *)(info->data_register_node))-> - named.length; + ACPI_CAST_PTR(union acpi_parse_object, + info->data_register_node)->named.length; break; diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c index 7cd8bb54fa0..7a41c409ae4 100644 --- a/drivers/acpi/executer/exregion.c +++ b/drivers/acpi/executer/exregion.c @@ -156,7 +156,7 @@ acpi_ex_system_memory_space_handler(u32 function, /* Create a new mapping starting at the address given */ mem_info->mapped_logical_address = - acpi_os_map_memory((acpi_native_uint) address, window_size); + acpi_os_map_memory((acpi_physical_address) address, window_size); if (!mem_info->mapped_logical_address) { ACPI_ERROR((AE_INFO, "Could not map memory at %8.8X%8.8X, size %X", diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c index 73e29e566a7..54085f16ec2 100644 --- a/drivers/acpi/executer/exresop.c +++ b/drivers/acpi/executer/exresop.c @@ -698,5 +698,9 @@ acpi_ex_resolve_operands(u16 opcode, } } + ACPI_DUMP_OPERANDS(walk_state->operands, + acpi_ps_get_opcode_name(opcode), + walk_state->num_operands); + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c index 76c875bc315..38b55e35249 100644 --- a/drivers/acpi/executer/exstore.c +++ b/drivers/acpi/executer/exstore.c @@ -343,12 +343,6 @@ acpi_ex_store(union acpi_operand_object *source_desc, acpi_ut_get_object_type_name(dest_desc), dest_desc)); - ACPI_DUMP_STACK_ENTRY(source_desc); - ACPI_DUMP_STACK_ENTRY(dest_desc); - ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ExStore", - 2, - "Target is not a Reference or Constant object"); - return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 6cf10cbc1ee..55c17afbe66 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -148,7 +148,7 @@ acpi_fan_write_state(struct file *file, const char __user * buffer, int result = 0; struct seq_file *m = file->private_data; struct acpi_device *device = m->private; - char state_string[12] = { '\0' }; + char state_string[3] = { '\0' }; if (count > sizeof(state_string) - 1) return -EINVAL; @@ -157,6 +157,12 @@ acpi_fan_write_state(struct file *file, const char __user * buffer, return -EFAULT; state_string[count] = '\0'; + if ((state_string[0] < '0') || (state_string[0] > '3')) + return -EINVAL; + if (state_string[1] == '\n') + state_string[1] = '\0'; + if (state_string[1] != '\0') + return -EINVAL; result = acpi_bus_set_power(device->handle, simple_strtoul(state_string, NULL, 0)); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 9b227d4dc9c..0f2dd81736b 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -166,6 +166,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) "firmware_node"); ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, "physical_node"); + if (acpi_dev->wakeup.flags.valid) + device_set_wakeup_capable(dev, true); } return 0; @@ -336,6 +338,9 @@ static int __init acpi_rtc_init(void) if (acpi_disabled) return 0; + if (acpi_disabled) + return 0; + if (dev) { rtc_wake_setup(); rtc_info.wake_on = rtc_wake_on; diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c index 14bc4f456ae..0b80db9d919 100644 --- a/drivers/acpi/hardware/hwgpe.c +++ b/drivers/acpi/hardware/hwgpe.c @@ -55,6 +55,54 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /****************************************************************************** * + * FUNCTION: acpi_hw_low_disable_gpe + * + * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled + * + * RETURN: Status + * + * DESCRIPTION: Disable a single GPE in the enable register. + * + ******************************************************************************/ + +acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) +{ + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + u32 enable_mask; + + /* Get the info block for the entire GPE register */ + + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + return (AE_NOT_EXIST); + } + + /* Get current value of the enable register that contains this GPE */ + + status = acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, &enable_mask, + &gpe_register_info->enable_address); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Clear just the bit that corresponds to this GPE */ + + ACPI_CLEAR_BIT(enable_mask, + ((u32) 1 << + (gpe_event_info->gpe_number - + gpe_register_info->base_gpe_number))); + + /* Write the updated enable mask */ + + status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, enable_mask, + &gpe_register_info->enable_address); + + return (status); +} + +/****************************************************************************** + * * FUNCTION: acpi_hw_write_gpe_enable_reg * * PARAMETERS: gpe_event_info - Info block for the GPE to be enabled @@ -68,7 +116,7 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, ******************************************************************************/ acpi_status -acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info) +acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status; @@ -138,7 +186,6 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) * ******************************************************************************/ -#ifdef ACPI_FUTURE_USAGE acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, acpi_event_status * event_status) @@ -198,7 +245,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, unlock_and_exit: return (status); } -#endif /* ACPI_FUTURE_USAGE */ /****************************************************************************** * diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c index 5445751b8a3..0ab22004728 100644 --- a/drivers/acpi/namespace/nsdump.c +++ b/drivers/acpi/namespace/nsdump.c @@ -73,7 +73,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle, void acpi_ns_print_pathname(u32 num_segments, char *pathname) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_NAME(ns_print_pathname); @@ -515,12 +515,12 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, if (obj_type > ACPI_TYPE_LOCAL_MAX) { acpi_os_printf - ("(Ptr to ACPI Object type %X [UNKNOWN])\n", + ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n", obj_type); bytes_to_dump = 32; } else { acpi_os_printf - ("(Ptr to ACPI Object type %X [%s])\n", + ("(Pointer to ACPI Object type %.2X [%s])\n", obj_type, acpi_ut_get_type_name(obj_type)); bytes_to_dump = sizeof(union acpi_operand_object); diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c index 14bdfa92bea..d369164e00b 100644 --- a/drivers/acpi/namespace/nseval.c +++ b/drivers/acpi/namespace/nseval.c @@ -138,6 +138,41 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info) return_ACPI_STATUS(AE_NULL_OBJECT); } + /* + * Calculate the number of arguments being passed to the method + */ + + info->param_count = 0; + if (info->parameters) { + while (info->parameters[info->param_count]) + info->param_count++; + } + + /* Error if too few arguments were passed in */ + + if (info->param_count < info->obj_desc->method.param_count) { + ACPI_ERROR((AE_INFO, + "Insufficient arguments - " + "method [%4.4s] needs %d, found %d", + acpi_ut_get_node_name(info->resolved_node), + info->obj_desc->method.param_count, + info->param_count)); + return_ACPI_STATUS(AE_MISSING_ARGUMENTS); + } + + /* Just a warning if too many arguments */ + + else if (info->param_count > + info->obj_desc->method.param_count) { + ACPI_WARNING((AE_INFO, + "Excess arguments - " + "method [%4.4s] needs %d, found %d", + acpi_ut_get_node_name(info-> + resolved_node), + info->obj_desc->method.param_count, + info->param_count)); + } + ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:", ACPI_LV_INFO, _COMPONENT); diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c index 6d6d930c8e1..e4c57510d79 100644 --- a/drivers/acpi/namespace/nsinit.c +++ b/drivers/acpi/namespace/nsinit.c @@ -542,7 +542,6 @@ acpi_ns_init_one_device(acpi_handle obj_handle, info->prefix_node = device_node; info->pathname = METHOD_NAME__INI; info->parameters = NULL; - info->parameter_type = ACPI_PARAM_ARGS; info->flags = ACPI_IGNORE_RETURN_VALUE; /* diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c index 2c92f6cf5ce..a4a412b7c02 100644 --- a/drivers/acpi/namespace/nsload.c +++ b/drivers/acpi/namespace/nsload.c @@ -71,8 +71,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle); ******************************************************************************/ acpi_status -acpi_ns_load_table(acpi_native_uint table_index, - struct acpi_namespace_node *node) +acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) { acpi_status status; diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c index 46a79b0103b..a82271a9dbb 100644 --- a/drivers/acpi/namespace/nsparse.c +++ b/drivers/acpi/namespace/nsparse.c @@ -63,13 +63,13 @@ ACPI_MODULE_NAME("nsparse") * ******************************************************************************/ acpi_status -acpi_ns_one_complete_parse(acpi_native_uint pass_number, - acpi_native_uint table_index, - struct acpi_namespace_node * start_node) +acpi_ns_one_complete_parse(u32 pass_number, + u32 table_index, + struct acpi_namespace_node *start_node) { union acpi_parse_object *parse_root; acpi_status status; - acpi_native_uint aml_length; + u32 aml_length; u8 *aml_start; struct acpi_walk_state *walk_state; struct acpi_table_header *table; @@ -112,8 +112,8 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number, aml_start = (u8 *) table + sizeof(struct acpi_table_header); aml_length = table->length - sizeof(struct acpi_table_header); status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, - aml_start, (u32) aml_length, - NULL, (u8) pass_number); + aml_start, aml_length, NULL, + (u8) pass_number); } if (ACPI_FAILURE(status)) { @@ -158,8 +158,7 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number, ******************************************************************************/ acpi_status -acpi_ns_parse_table(acpi_native_uint table_index, - struct acpi_namespace_node *start_node) +acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) { acpi_status status; diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c index 64c039843ed..b0817e1127b 100644 --- a/drivers/acpi/namespace/nsutils.c +++ b/drivers/acpi/namespace/nsutils.c @@ -73,9 +73,9 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search); ******************************************************************************/ void -acpi_ns_report_error(char *module_name, +acpi_ns_report_error(const char *module_name, u32 line_number, - char *internal_name, acpi_status lookup_status) + const char *internal_name, acpi_status lookup_status) { acpi_status status; u32 bad_name; @@ -130,11 +130,11 @@ acpi_ns_report_error(char *module_name, ******************************************************************************/ void -acpi_ns_report_method_error(char *module_name, +acpi_ns_report_method_error(const char *module_name, u32 line_number, - char *message, + const char *message, struct acpi_namespace_node *prefix_node, - char *path, acpi_status method_status) + const char *path, acpi_status method_status) { acpi_status status; struct acpi_namespace_node *node = prefix_node; @@ -167,7 +167,8 @@ acpi_ns_report_method_error(char *module_name, ******************************************************************************/ void -acpi_ns_print_node_pathname(struct acpi_namespace_node *node, char *message) +acpi_ns_print_node_pathname(struct acpi_namespace_node *node, + const char *message) { struct acpi_buffer buffer; acpi_status status; @@ -296,7 +297,7 @@ u32 acpi_ns_local(acpi_object_type type) void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) { - char *next_external_char; + const char *next_external_char; u32 i; ACPI_FUNCTION_ENTRY(); @@ -363,9 +364,9 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) { u32 num_segments = info->num_segments; char *internal_name = info->internal_name; - char *external_name = info->next_external_char; + const char *external_name = info->next_external_char; char *result = NULL; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ns_build_internal_name); @@ -400,12 +401,11 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) result = &internal_name[i]; } else if (num_segments == 2) { internal_name[i] = AML_DUAL_NAME_PREFIX; - result = &internal_name[(acpi_native_uint) (i + 1)]; + result = &internal_name[(acpi_size) i + 1]; } else { internal_name[i] = AML_MULTI_NAME_PREFIX_OP; - internal_name[(acpi_native_uint) (i + 1)] = - (char)num_segments; - result = &internal_name[(acpi_native_uint) (i + 2)]; + internal_name[(acpi_size) i + 1] = (char)num_segments; + result = &internal_name[(acpi_size) i + 2]; } } @@ -472,7 +472,8 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) * *******************************************************************************/ -acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name) +acpi_status +acpi_ns_internalize_name(const char *external_name, char **converted_name) { char *internal_name; struct acpi_namestring_info info; @@ -528,15 +529,15 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name) acpi_status acpi_ns_externalize_name(u32 internal_name_length, - char *internal_name, + const char *internal_name, u32 * converted_name_length, char **converted_name) { - acpi_native_uint names_index = 0; - acpi_native_uint num_segments = 0; - acpi_native_uint required_length; - acpi_native_uint prefix_length = 0; - acpi_native_uint i = 0; - acpi_native_uint j = 0; + u32 names_index = 0; + u32 num_segments = 0; + u32 required_length; + u32 prefix_length = 0; + u32 i = 0; + u32 j = 0; ACPI_FUNCTION_TRACE(ns_externalize_name); @@ -582,9 +583,8 @@ acpi_ns_externalize_name(u32 internal_name_length, /* <count> 4-byte names */ names_index = prefix_length + 2; - num_segments = (acpi_native_uint) (u8) - internal_name[(acpi_native_uint) - (prefix_length + 1)]; + num_segments = (u8) + internal_name[(acpi_size) prefix_length + 1]; break; case AML_DUAL_NAME_PREFIX: @@ -823,7 +823,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) acpi_status acpi_ns_get_node(struct acpi_namespace_node *prefix_node, - char *pathname, + const char *pathname, u32 flags, struct acpi_namespace_node **return_node) { union acpi_generic_state scope_info; diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c index a8d549187c8..38be5865d95 100644 --- a/drivers/acpi/namespace/nsxfeval.c +++ b/drivers/acpi/namespace/nsxfeval.c @@ -182,7 +182,6 @@ acpi_evaluate_object(acpi_handle handle, } info->pathname = pathname; - info->parameter_type = ACPI_PARAM_ARGS; /* Convert and validate the device handle */ @@ -442,7 +441,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, u32 flags; struct acpica_device_id hid; struct acpi_compatible_id_list *cid; - acpi_native_uint i; + u32 i; int found; status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 658e5f3abae..cb9864e39ba 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -120,10 +120,10 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) struct acpi_srat_mem_affinity *p = (struct acpi_srat_mem_affinity *)header; ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n", + "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s\n", (unsigned long)p->base_address, (unsigned long)p->length, - p->memory_type, p->proximity_domain, + p->proximity_domain, (p->flags & ACPI_SRAT_MEM_ENABLED)? "enabled" : "disabled", (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)? diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c index e9446377884..d830b29b85b 100644 --- a/drivers/acpi/parser/psargs.c +++ b/drivers/acpi/parser/psargs.c @@ -76,7 +76,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) { u8 *aml = parser_state->aml; u32 package_length = 0; - acpi_native_uint byte_count; + u32 byte_count; u8 byte_zero_mask = 0x3F; /* Default [0:5] */ ACPI_FUNCTION_TRACE(ps_get_next_package_length); @@ -86,7 +86,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) * used to encode the package length, either 0,1,2, or 3 */ byte_count = (aml[0] >> 6); - parser_state->aml += (byte_count + 1); + parser_state->aml += ((acpi_size) byte_count + 1); /* Get bytes 3, 2, 1 as needed */ diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c index 52581454c47..270469aae84 100644 --- a/drivers/acpi/parser/psxface.c +++ b/drivers/acpi/parser/psxface.c @@ -333,9 +333,9 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) static void acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action) { - acpi_native_uint i; + u32 i; - if ((info->parameter_type == ACPI_PARAM_ARGS) && (info->parameters)) { + if (info->parameters) { /* Update reference count for each parameter */ diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 89022a74fae..11acaee14d6 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -162,7 +162,7 @@ do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt) !strcmp(prt->source, quirk->source) && strlen(prt->source) >= strlen(quirk->actual_source)) { printk(KERN_WARNING PREFIX "firmware reports " - "%04x:%02x:%02x[%c] connected to %s; " + "%04x:%02x:%02x PCI INT %c connected to %s; " "changing to %s\n", entry->id.segment, entry->id.bus, entry->id.device, 'A' + entry->pin, @@ -429,7 +429,7 @@ acpi_pci_irq_derive(struct pci_dev *dev, { struct pci_dev *bridge = dev; int irq = -1; - u8 bridge_pin = 0; + u8 bridge_pin = 0, orig_pin = pin; if (!dev) @@ -463,8 +463,8 @@ acpi_pci_irq_derive(struct pci_dev *dev, } if (irq < 0) { - printk(KERN_WARNING PREFIX "Unable to derive IRQ for device %s\n", - pci_name(dev)); + dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n", + 'A' + orig_pin); return -1; } @@ -487,6 +487,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) int triggering = ACPI_LEVEL_SENSITIVE; int polarity = ACPI_ACTIVE_LOW; char *link = NULL; + char link_desc[16]; int rc; @@ -503,7 +504,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) pin--; if (!dev->bus) { - printk(KERN_ERR PREFIX "Invalid (NULL) 'bus' field\n"); + dev_err(&dev->dev, "invalid (NULL) 'bus' field\n"); return -ENODEV; } @@ -538,8 +539,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) * driver reported one, then use it. Exit in any case. */ if (irq < 0) { - printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI", - pci_name(dev), ('A' + pin)); + dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin); /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF)) { printk(" - using IRQ %d\n", dev->irq); @@ -554,21 +554,21 @@ int acpi_pci_irq_enable(struct pci_dev *dev) rc = acpi_register_gsi(irq, triggering, polarity); if (rc < 0) { - printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: failed " - "to register GSI\n", pci_name(dev), ('A' + pin)); + dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", + 'A' + pin); return rc; } dev->irq = rc; - printk(KERN_INFO PREFIX "PCI Interrupt %s[%c] -> ", - pci_name(dev), 'A' + pin); - if (link) - printk("Link [%s] -> ", link); + snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); + else + link_desc[0] = '\0'; - printk("GSI %u (%s, %s) -> IRQ %d\n", irq, - (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", - (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); + dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n", + 'A' + pin, link_desc, irq, + (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", + (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); return 0; } @@ -616,10 +616,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) * (e.g. PCI_UNDEFINED_IRQ). */ - printk(KERN_INFO PREFIX "PCI interrupt for device %s disabled\n", - pci_name(dev)); - + dev_info(&dev->dev, "PCI INT %c disabled\n", 'A' + pin); acpi_unregister_gsi(gsi); - - return; } diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c new file mode 100644 index 00000000000..b9ab030a52d --- /dev/null +++ b/drivers/acpi/pci_slot.c @@ -0,0 +1,368 @@ +/* + * pci_slot.c - ACPI PCI Slot Driver + * + * The code here is heavily leveraged from the acpiphp module. + * Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance. + * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code + * review and fixes. + * + * Copyright (C) 2007 Alex Chiang <achiang@hp.com> + * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/acpi.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +static int debug; +static int check_sta_before_sun; + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>" +#define DRIVER_DESC "ACPI PCI Slot Detection Driver" +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); +MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); +module_param(debug, bool, 0644); + +#define _COMPONENT ACPI_PCI_COMPONENT +ACPI_MODULE_NAME("pci_slot"); + +#define MY_NAME "pci_slot" +#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) +#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) +#define dbg(format, arg...) \ + do { \ + if (debug) \ + printk(KERN_DEBUG "%s: " format, \ + MY_NAME , ## arg); \ + } while (0) + +#define SLOT_NAME_SIZE 20 /* Inspired by #define in acpiphp.h */ + +struct acpi_pci_slot { + acpi_handle root_handle; /* handle of the root bridge */ + struct pci_slot *pci_slot; /* corresponding pci_slot */ + struct list_head list; /* node in the list of slots */ +}; + +static int acpi_pci_slot_add(acpi_handle handle); +static void acpi_pci_slot_remove(acpi_handle handle); + +static LIST_HEAD(slot_list); +static DEFINE_MUTEX(slot_list_lock); +static struct acpi_pci_driver acpi_pci_slot_driver = { + .add = acpi_pci_slot_add, + .remove = acpi_pci_slot_remove, +}; + +static int +check_slot(acpi_handle handle, int *device, unsigned long *sun) +{ + int retval = 0; + unsigned long adr, sta; + acpi_status status; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + dbg("Checking slot on path: %s\n", (char *)buffer.pointer); + + if (check_sta_before_sun) { + /* If SxFy doesn't have _STA, we just assume it's there */ + status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); + if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) { + retval = -1; + goto out; + } + } + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); + if (ACPI_FAILURE(status)) { + dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer); + retval = -1; + goto out; + } + + *device = (adr >> 16) & 0xffff; + + /* No _SUN == not a slot == bail */ + status = acpi_evaluate_integer(handle, "_SUN", NULL, sun); + if (ACPI_FAILURE(status)) { + dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer); + retval = -1; + goto out; + } + +out: + kfree(buffer.pointer); + return retval; +} + +struct callback_args { + acpi_walk_callback user_function; /* only for walk_p2p_bridge */ + struct pci_bus *pci_bus; + acpi_handle root_handle; +}; + +/* + * register_slot + * + * Called once for each SxFy object in the namespace. Don't worry about + * calling pci_create_slot multiple times for the same pci_bus:device, + * since each subsequent call simply bumps the refcount on the pci_slot. + * + * The number of calls to pci_destroy_slot from unregister_slot is + * symmetrical. + */ +static acpi_status +register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + int device; + unsigned long sun; + char name[SLOT_NAME_SIZE]; + struct acpi_pci_slot *slot; + struct pci_slot *pci_slot; + struct callback_args *parent_context = context; + struct pci_bus *pci_bus = parent_context->pci_bus; + + if (check_slot(handle, &device, &sun)) + return AE_OK; + + slot = kmalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + err("%s: cannot allocate memory\n", __func__); + return AE_OK; + } + + snprintf(name, sizeof(name), "%u", (u32)sun); + pci_slot = pci_create_slot(pci_bus, device, name); + if (IS_ERR(pci_slot)) { + err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); + kfree(slot); + } + + slot->root_handle = parent_context->root_handle; + slot->pci_slot = pci_slot; + INIT_LIST_HEAD(&slot->list); + mutex_lock(&slot_list_lock); + list_add(&slot->list, &slot_list); + mutex_unlock(&slot_list_lock); + + dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n", + pci_slot, pci_bus->number, device, name); + + return AE_OK; +} + +/* + * walk_p2p_bridge - discover and walk p2p bridges + * @handle: points to an acpi_pci_root + * @context: p2p_bridge_context pointer + * + * Note that when we call ourselves recursively, we pass a different + * value of pci_bus in the child_context. + */ +static acpi_status +walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + int device, function; + unsigned long adr; + acpi_status status; + acpi_handle dummy_handle; + acpi_walk_callback user_function; + + struct pci_dev *dev; + struct pci_bus *pci_bus; + struct callback_args child_context; + struct callback_args *parent_context = context; + + pci_bus = parent_context->pci_bus; + user_function = parent_context->user_function; + + status = acpi_get_handle(handle, "_ADR", &dummy_handle); + if (ACPI_FAILURE(status)) + return AE_OK; + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); + if (ACPI_FAILURE(status)) + return AE_OK; + + device = (adr >> 16) & 0xffff; + function = adr & 0xffff; + + dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function)); + if (!dev || !dev->subordinate) + goto out; + + child_context.pci_bus = dev->subordinate; + child_context.user_function = user_function; + child_context.root_handle = parent_context->root_handle; + + dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number); + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + user_function, &child_context, NULL); + if (ACPI_FAILURE(status)) + goto out; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + walk_p2p_bridge, &child_context, NULL); +out: + pci_dev_put(dev); + return AE_OK; +} + +/* + * walk_root_bridge - generic root bridge walker + * @handle: points to an acpi_pci_root + * @user_function: user callback for slot objects + * + * Call user_function for all objects underneath this root bridge. + * Walk p2p bridges underneath us and call user_function on those too. + */ +static int +walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function) +{ + int seg, bus; + unsigned long tmp; + acpi_status status; + acpi_handle dummy_handle; + struct pci_bus *pci_bus; + struct callback_args context; + + /* If the bridge doesn't have _STA, we assume it is always there */ + status = acpi_get_handle(handle, "_STA", &dummy_handle); + if (ACPI_SUCCESS(status)) { + status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp); + if (ACPI_FAILURE(status)) { + info("%s: _STA evaluation failure\n", __func__); + return 0; + } + if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0) + /* don't register this object */ + return 0; + } + + status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp); + seg = ACPI_SUCCESS(status) ? tmp : 0; + + status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp); + bus = ACPI_SUCCESS(status) ? tmp : 0; + + pci_bus = pci_find_bus(seg, bus); + if (!pci_bus) + return 0; + + context.pci_bus = pci_bus; + context.user_function = user_function; + context.root_handle = handle; + + dbg("root bridge walk, pci_bus = %x\n", pci_bus->number); + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + user_function, &context, NULL); + if (ACPI_FAILURE(status)) + return status; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + walk_p2p_bridge, &context, NULL); + if (ACPI_FAILURE(status)) + err("%s: walk_p2p_bridge failure - %d\n", __func__, status); + + return status; +} + +/* + * acpi_pci_slot_add + * @handle: points to an acpi_pci_root + */ +static int +acpi_pci_slot_add(acpi_handle handle) +{ + acpi_status status; + + status = walk_root_bridge(handle, register_slot); + if (ACPI_FAILURE(status)) + err("%s: register_slot failure - %d\n", __func__, status); + + return status; +} + +/* + * acpi_pci_slot_remove + * @handle: points to an acpi_pci_root + */ +static void +acpi_pci_slot_remove(acpi_handle handle) +{ + struct acpi_pci_slot *slot, *tmp; + + mutex_lock(&slot_list_lock); + list_for_each_entry_safe(slot, tmp, &slot_list, list) { + if (slot->root_handle == handle) { + list_del(&slot->list); + pci_destroy_slot(slot->pci_slot); + kfree(slot); + } + } + mutex_unlock(&slot_list_lock); +} + +static int do_sta_before_sun(const struct dmi_system_id *d) +{ + info("%s detected: will evaluate _STA before calling _SUN\n", d->ident); + check_sta_before_sun = 1; + return 0; +} + +static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { + /* + * Fujitsu Primequest machines will return 1023 to indicate an + * error if the _SUN method is evaluated on SxFy objects that + * are not present (as indicated by _STA), so for those machines, + * we want to check _STA before evaluating _SUN. + */ + { + .callback = do_sta_before_sun, + .ident = "Fujitsu PRIMEQUEST", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"), + DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"), + }, + }, + {} +}; + +static int __init +acpi_pci_slot_init(void) +{ + dmi_check_system(acpi_pci_slot_dmi_table); + acpi_pci_register_driver(&acpi_pci_slot_driver); + return 0; +} + +static void __exit +acpi_pci_slot_exit(void) +{ + acpi_pci_unregister_driver(&acpi_pci_slot_driver); +} + +module_init(acpi_pci_slot_init); +module_exit(acpi_pci_slot_exit); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 81e4f081a4a..4ab21cb1c8c 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -292,69 +292,135 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) return 0; } +/** + * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in + * ACPI 3.0) _PSW (Power State Wake) + * @dev: Device to handle. + * @enable: 0 - disable, 1 - enable the wake capabilities of the device. + * @sleep_state: Target sleep state of the system. + * @dev_state: Target power state of the device. + * + * Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power + * State Wake) for the device, if present. On failure reset the device's + * wakeup.flags.valid flag. + * + * RETURN VALUE: + * 0 if either _DSW or _PSW has been successfully executed + * 0 if neither _DSW nor _PSW has been found + * -ENODEV if the execution of either _DSW or _PSW has failed + */ +int acpi_device_sleep_wake(struct acpi_device *dev, + int enable, int sleep_state, int dev_state) +{ + union acpi_object in_arg[3]; + struct acpi_object_list arg_list = { 3, in_arg }; + acpi_status status = AE_OK; + + /* + * Try to execute _DSW first. + * + * Three agruments are needed for the _DSW object: + * Argument 0: enable/disable the wake capabilities + * Argument 1: target system state + * Argument 2: target device state + * When _DSW object is called to disable the wake capabilities, maybe + * the first argument is filled. The values of the other two agruments + * are meaningless. + */ + in_arg[0].type = ACPI_TYPE_INTEGER; + in_arg[0].integer.value = enable; + in_arg[1].type = ACPI_TYPE_INTEGER; + in_arg[1].integer.value = sleep_state; + in_arg[2].type = ACPI_TYPE_INTEGER; + in_arg[2].integer.value = dev_state; + status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL); + if (ACPI_SUCCESS(status)) { + return 0; + } else if (status != AE_NOT_FOUND) { + printk(KERN_ERR PREFIX "_DSW execution failed\n"); + dev->wakeup.flags.valid = 0; + return -ENODEV; + } + + /* Execute _PSW */ + arg_list.count = 1; + in_arg[0].integer.value = enable; + status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); + if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { + printk(KERN_ERR PREFIX "_PSW execution failed\n"); + dev->wakeup.flags.valid = 0; + return -ENODEV; + } + + return 0; +} + /* * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): * 1. Power on the power resources required for the wakeup device - * 2. Enable _PSW (power state wake) for the device if present + * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power + * State Wake) for the device, if present */ -int acpi_enable_wakeup_device_power(struct acpi_device *dev) +int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) { - union acpi_object arg = { ACPI_TYPE_INTEGER }; - struct acpi_object_list arg_list = { 1, &arg }; - acpi_status status = AE_OK; - int i; - int ret = 0; + int i, err; if (!dev || !dev->wakeup.flags.valid) - return -1; + return -EINVAL; + + /* + * Do not execute the code below twice in a row without calling + * acpi_disable_wakeup_device_power() in between for the same device + */ + if (dev->wakeup.flags.prepared) + return 0; - arg.integer.value = 1; /* Open power resource */ for (i = 0; i < dev->wakeup.resources.count; i++) { - ret = acpi_power_on(dev->wakeup.resources.handles[i], dev); + int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev); if (ret) { printk(KERN_ERR PREFIX "Transition power state\n"); dev->wakeup.flags.valid = 0; - return -1; + return -ENODEV; } } - /* Execute PSW */ - status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); - if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { - printk(KERN_ERR PREFIX "Evaluate _PSW\n"); - dev->wakeup.flags.valid = 0; - ret = -1; - } + /* + * Passing 3 as the third argument below means the device may be placed + * in arbitrary power state afterwards. + */ + err = acpi_device_sleep_wake(dev, 1, sleep_state, 3); + if (!err) + dev->wakeup.flags.prepared = 1; - return ret; + return err; } /* * Shutdown a wakeup device, counterpart of above method - * 1. Disable _PSW (power state wake) + * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power + * State Wake) for the device, if present * 2. Shutdown down the power resources */ int acpi_disable_wakeup_device_power(struct acpi_device *dev) { - union acpi_object arg = { ACPI_TYPE_INTEGER }; - struct acpi_object_list arg_list = { 1, &arg }; - acpi_status status = AE_OK; - int i; - int ret = 0; - + int i, ret; if (!dev || !dev->wakeup.flags.valid) - return -1; + return -EINVAL; - arg.integer.value = 0; - /* Execute PSW */ - status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); - if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { - printk(KERN_ERR PREFIX "Evaluate _PSW\n"); - dev->wakeup.flags.valid = 0; - return -1; - } + /* + * Do not execute the code below twice in a row without calling + * acpi_enable_wakeup_device_power() in between for the same device + */ + if (!dev->wakeup.flags.prepared) + return 0; + + dev->wakeup.flags.prepared = 0; + + ret = acpi_device_sleep_wake(dev, 0, 0, 0); + if (ret) + return ret; /* Close power resource */ for (i = 0; i < dev->wakeup.resources.count; i++) { @@ -362,7 +428,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) if (ret) { printk(KERN_ERR PREFIX "Transition power state\n"); dev->wakeup.flags.valid = 0; - return -1; + return -ENODEV; } } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 9dd0fa93b9e..ec0f2d581ec 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -118,8 +118,31 @@ static const struct file_operations acpi_processor_info_fops = { .release = single_release, }; -struct acpi_processor *processors[NR_CPUS]; +DEFINE_PER_CPU(struct acpi_processor *, processors); struct acpi_processor_errata errata __read_mostly; +static int set_no_mwait(const struct dmi_system_id *id) +{ + printk(KERN_NOTICE PREFIX "%s detected - " + "disable mwait for CPU C-stetes\n", id->ident); + idle_nomwait = 1; + return 0; +} + +static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { + { + set_no_mwait, "IFL91 board", { + DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), + DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), + DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, + { + set_no_mwait, "Extensa 5220", { + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "ACER"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, + {}, +}; /* -------------------------------------------------------------------------- Errata Handling @@ -265,7 +288,20 @@ static int acpi_processor_set_pdc(struct acpi_processor *pr) if (!pdc_in) return status; + if (idle_nomwait) { + /* + * If mwait is disabled for CPU C-states, the C2C3_FFH access + * mode will be disabled in the parameter of _PDC object. + * Of course C1_FFH access mode will also be disabled. + */ + union acpi_object *obj; + u32 *buffer = NULL; + obj = pdc_in->pointer; + buffer = (u32 *)(obj->buffer.pointer); + buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); + + } status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL); if (ACPI_FAILURE(status)) @@ -614,14 +650,14 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid) return 0; } -static void *processor_device_array[NR_CPUS]; +static DEFINE_PER_CPU(void *, processor_device_array); static int __cpuinit acpi_processor_start(struct acpi_device *device) { int result = 0; acpi_status status = AE_OK; struct acpi_processor *pr; - + struct sys_device *sysdev; pr = acpi_driver_data(device); @@ -638,20 +674,24 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device) * ACPI id of processors can be reported wrongly by the BIOS. * Don't trust it blindly */ - if (processor_device_array[pr->id] != NULL && - processor_device_array[pr->id] != device) { + if (per_cpu(processor_device_array, pr->id) != NULL && + per_cpu(processor_device_array, pr->id) != device) { printk(KERN_WARNING "BIOS reported wrong ACPI id " "for the processor\n"); return -ENODEV; } - processor_device_array[pr->id] = device; + per_cpu(processor_device_array, pr->id) = device; - processors[pr->id] = pr; + per_cpu(processors, pr->id) = pr; result = acpi_processor_add_fs(device); if (result) goto end; + sysdev = get_cpu_sysdev(pr->id); + if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) + return -EFAULT; + status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify, pr); @@ -749,7 +789,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct acpi_processor *pr = processors[cpu]; + struct acpi_processor *pr = per_cpu(processors, cpu); if (action == CPU_ONLINE && pr) { acpi_processor_ppc_has_changed(pr); @@ -810,6 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type) status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify); + sysfs_remove_link(&device->dev.kobj, "sysdev"); + acpi_processor_remove_fs(device); if (pr->cdev) { @@ -819,8 +861,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type) pr->cdev = NULL; } - processors[pr->id] = NULL; - processor_device_array[pr->id] = NULL; + per_cpu(processors, pr->id) = NULL; + per_cpu(processor_device_array, pr->id) = NULL; kfree(pr); return 0; @@ -1014,9 +1056,9 @@ static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) static int acpi_processor_handle_eject(struct acpi_processor *pr) { - if (cpu_online(pr->id)) { - return (-EINVAL); - } + if (cpu_online(pr->id)) + cpu_down(pr->id); + arch_unregister_cpu(pr->id); acpi_unmap_lsapic(pr->id); return (0); @@ -1068,8 +1110,6 @@ static int __init acpi_processor_init(void) { int result = 0; - - memset(&processors, 0, sizeof(processors)); memset(&errata, 0, sizeof(errata)); #ifdef CONFIG_SMP @@ -1083,6 +1123,11 @@ static int __init acpi_processor_init(void) return -ENOMEM; acpi_processor_dir->owner = THIS_MODULE; + /* + * Check whether the system is DMI table. If yes, OSPM + * should not use mwait for CPU-states. + */ + dmi_check_system(processor_idle_dmi_table); result = cpuidle_register_driver(&acpi_idle_driver); if (result < 0) goto out_proc; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 4976e5db2b3..d592dbb1d12 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -41,6 +41,7 @@ #include <linux/pm_qos_params.h> #include <linux/clockchips.h> #include <linux/cpuidle.h> +#include <linux/cpuidle.h> /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -57,6 +58,7 @@ #include <acpi/acpi_bus.h> #include <acpi/processor.h> +#include <asm/processor.h> #define ACPI_PROCESSOR_COMPONENT 0x01000000 #define ACPI_PROCESSOR_CLASS "processor" @@ -401,7 +403,7 @@ static void acpi_processor_idle(void) */ local_irq_disable(); - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (!pr) { local_irq_enable(); return; @@ -955,6 +957,21 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) } else { continue; } + if (cx.type == ACPI_STATE_C1 && + (idle_halt || idle_nomwait)) { + /* + * In most cases the C1 space_id obtained from + * _CST object is FIXED_HARDWARE access mode. + * But when the option of idle=halt is added, + * the entry_method type should be changed from + * CSTATE_FFH to CSTATE_HALT. + * When the option of idle=nomwait is added, + * the C1 entry_method type should be + * CSTATE_HALT. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } } else { snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", cx.address); @@ -1431,7 +1448,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; @@ -1471,7 +1488,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, u32 t1, t2; int sleep_ticks = 0; - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; @@ -1549,7 +1566,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, u32 t1, t2; int sleep_ticks = 0; - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; @@ -1780,6 +1797,15 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, return 0; if (!first_run) { + if (idle_halt) { + /* + * When the boot option of "idle=halt" is added, halt + * is used for CPU IDLE. + * In such case C2/C3 is meaningless. So the max_cstate + * is set to one. + */ + max_cstate = 1; + } dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index d80b2d1441a..b4749969c6b 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -89,7 +89,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb, if (event != CPUFREQ_INCOMPATIBLE) goto out; - pr = processors[policy->cpu]; + pr = per_cpu(processors, policy->cpu); if (!pr || !pr->performance) goto out; @@ -572,7 +572,7 @@ int acpi_processor_preregister_performance( /* Call _PSD for all CPUs */ for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) { /* Look only at processors in ACPI namespace */ continue; @@ -603,7 +603,7 @@ int acpi_processor_preregister_performance( * domain info. */ for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -624,7 +624,7 @@ int acpi_processor_preregister_performance( cpus_clear(covered_cpus); for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -651,7 +651,7 @@ int acpi_processor_preregister_performance( if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -680,7 +680,7 @@ int acpi_processor_preregister_performance( if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -697,7 +697,7 @@ int acpi_processor_preregister_performance( err_ret: for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr || !pr->performance) continue; @@ -728,7 +728,7 @@ acpi_processor_register_performance(struct acpi_processor_performance mutex_lock(&performance_mutex); - pr = processors[cpu]; + pr = per_cpu(processors, cpu); if (!pr) { mutex_unlock(&performance_mutex); return -ENODEV; @@ -766,7 +766,7 @@ acpi_processor_unregister_performance(struct acpi_processor_performance mutex_lock(&performance_mutex); - pr = processors[cpu]; + pr = per_cpu(processors, cpu); if (!pr) { mutex_unlock(&performance_mutex); return; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index bb06738860c..0622ace0522 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -71,7 +71,7 @@ static int acpi_processor_update_tsd_coord(void) * coordination between all CPUs. */ for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -93,7 +93,7 @@ static int acpi_processor_update_tsd_coord(void) cpus_clear(covered_cpus); for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -119,7 +119,7 @@ static int acpi_processor_update_tsd_coord(void) if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -152,7 +152,7 @@ static int acpi_processor_update_tsd_coord(void) if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -172,7 +172,7 @@ static int acpi_processor_update_tsd_coord(void) err_ret: for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -214,7 +214,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) struct acpi_processor_throttling *p_throttling; cpu = p_tstate->cpu; - pr = processors[cpu]; + pr = per_cpu(processors, cpu); if (!pr) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); return 0; @@ -1035,7 +1035,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * cpus. */ for_each_cpu_mask(i, online_throttling_cpus) { - match_pr = processors[i]; + match_pr = per_cpu(processors, i); /* * If the pointer is invalid, we will report the * error message and continue. @@ -1232,7 +1232,10 @@ static ssize_t acpi_processor_write_throttling(struct file *file, int result = 0; struct seq_file *m = file->private_data; struct acpi_processor *pr = m->private; - char state_string[12] = { '\0' }; + char state_string[5] = ""; + char *charp = NULL; + size_t state_val = 0; + char tmpbuf[5] = ""; if (!pr || (count > sizeof(state_string) - 1)) return -EINVAL; @@ -1241,10 +1244,23 @@ static ssize_t acpi_processor_write_throttling(struct file *file, return -EFAULT; state_string[count] = '\0'; + if ((count > 0) && (state_string[count-1] == '\n')) + state_string[count-1] = '\0'; - result = acpi_processor_set_throttling(pr, - simple_strtoul(state_string, - NULL, 0)); + charp = state_string; + if ((state_string[0] == 't') || (state_string[0] == 'T')) + charp++; + + state_val = simple_strtoul(charp, NULL, 0); + if (state_val >= pr->throttling.state_count) + return -EINVAL; + + snprintf(tmpbuf, 5, "%zu", state_val); + + if (strcmp(tmpbuf, charp) != 0) + return -EINVAL; + + result = acpi_processor_set_throttling(pr, state_val); if (result) return result; diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c new file mode 100644 index 00000000000..a6b662c00b6 --- /dev/null +++ b/drivers/acpi/reboot.c @@ -0,0 +1,50 @@ + +#include <linux/pci.h> +#include <linux/acpi.h> +#include <acpi/reboot.h> + +void acpi_reboot(void) +{ + struct acpi_generic_address *rr; + struct pci_bus *bus0; + u8 reset_value; + unsigned int devfn; + + if (acpi_disabled) + return; + + rr = &acpi_gbl_FADT.reset_register; + + /* Is the reset register supported? */ + if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || + rr->bit_width != 8 || rr->bit_offset != 0) + return; + + reset_value = acpi_gbl_FADT.reset_value; + + /* The reset register can only exist in I/O, Memory or PCI config space + * on a device on bus 0. */ + switch (rr->space_id) { + case ACPI_ADR_SPACE_PCI_CONFIG: + /* The reset register can only live on bus 0. */ + bus0 = pci_find_bus(0, 0); + if (!bus0) + return; + /* Form PCI device/function pair. */ + devfn = PCI_DEVFN((rr->address >> 32) & 0xffff, + (rr->address >> 16) & 0xffff); + printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG."); + /* Write the value that resets us. */ + pci_bus_write_config_byte(bus0, devfn, + (rr->address & 0xffff), reset_value); + break; + + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + case ACPI_ADR_SPACE_SYSTEM_IO: + printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n"); + acpi_hw_low_level_write(8, reset_value, rr); + break; + } + /* Wait ten seconds */ + acpi_os_stall(10000000); +} diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c index 8a112d11d49..f61ebc679e6 100644 --- a/drivers/acpi/resources/rscalc.c +++ b/drivers/acpi/resources/rscalc.c @@ -73,7 +73,7 @@ acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length); static u8 acpi_rs_count_set_bits(u16 bit_field) { - acpi_native_uint bits_set; + u8 bits_set; ACPI_FUNCTION_ENTRY(); @@ -84,7 +84,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field) bit_field &= (u16) (bit_field - 1); } - return ((u8) bits_set); + return bits_set; } /******************************************************************************* diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c index faddaee1bc0..7804a8c40e7 100644 --- a/drivers/acpi/resources/rscreate.c +++ b/drivers/acpi/resources/rscreate.c @@ -181,9 +181,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, } /* - * Loop through the ACPI_INTERNAL_OBJECTS - Each object - * should be a package that in turn contains an - * acpi_integer Address, a u8 Pin, a Name and a u8 source_index. + * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a + * package that in turn contains an acpi_integer Address, a u8 Pin, + * a Name, and a u8 source_index. */ top_object_list = package_object->package.elements; number_of_elements = package_object->package.count; @@ -240,9 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, /* 1) First subobject: Dereference the PRT.Address */ obj_desc = sub_object_list[0]; - if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { - user_prt->address = obj_desc->integer.value; - } else { + if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%X].Address) Need Integer, found %s", index, @@ -250,12 +248,12 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, return_ACPI_STATUS(AE_BAD_DATA); } + user_prt->address = obj_desc->integer.value; + /* 2) Second subobject: Dereference the PRT.Pin */ obj_desc = sub_object_list[1]; - if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { - user_prt->pin = (u32) obj_desc->integer.value; - } else { + if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%X].Pin) Need Integer, found %s", index, @@ -284,6 +282,25 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, } } + user_prt->pin = (u32) obj_desc->integer.value; + + /* + * If the BIOS has erroneously reversed the _PRT source_name (index 2) + * and the source_index (index 3), fix it. _PRT is important enough to + * workaround this BIOS error. This also provides compatibility with + * other ACPI implementations. + */ + obj_desc = sub_object_list[3]; + if (!obj_desc + || (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) { + sub_object_list[3] = sub_object_list[2]; + sub_object_list[2] = obj_desc; + + ACPI_WARNING((AE_INFO, + "(PRT[%X].Source) SourceName and SourceIndex are reversed, fixed", + index)); + } + /* * 3) Third subobject: Dereference the PRT.source_name * The name may be unresolved (slack mode), so allow a null object @@ -364,9 +381,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, /* 4) Fourth subobject: Dereference the PRT.source_index */ obj_desc = sub_object_list[source_index_index]; - if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { - user_prt->source_index = (u32) obj_desc->integer.value; - } else { + if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%X].SourceIndex) Need Integer, found %s", index, @@ -374,6 +389,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, return_ACPI_STATUS(AE_BAD_DATA); } + user_prt->source_index = (u32) obj_desc->integer.value; + /* Point to the next union acpi_operand_object in the top level package */ top_object_list++; diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c index de1ac3881b2..96a6c035325 100644 --- a/drivers/acpi/resources/rsmisc.c +++ b/drivers/acpi/resources/rsmisc.c @@ -82,7 +82,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); - if (((acpi_native_uint) resource) & 0x3) { + if (((acpi_size) resource) & 0x3) { /* Each internal resource struct is expected to be 32-bit aligned */ diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c index befe2302f41..f7b3bcd59ba 100644 --- a/drivers/acpi/resources/rsutils.c +++ b/drivers/acpi/resources/rsutils.c @@ -62,7 +62,7 @@ ACPI_MODULE_NAME("rsutils") ******************************************************************************/ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list) { - acpi_native_uint i; + u8 i; u8 bit_count; ACPI_FUNCTION_ENTRY(); @@ -71,7 +71,7 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list) for (i = 0, bit_count = 0; mask; i++) { if (mask & 0x0001) { - list[bit_count] = (u8) i; + list[bit_count] = i; bit_count++; } @@ -96,8 +96,8 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list) u16 acpi_rs_encode_bitmask(u8 * list, u8 count) { - acpi_native_uint i; - acpi_native_uint mask; + u32 i; + u16 mask; ACPI_FUNCTION_ENTRY(); @@ -107,7 +107,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count) mask |= (0x1 << list[i]); } - return ((u16) mask); + return mask; } /******************************************************************************* @@ -130,7 +130,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count) void acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_ENTRY(); @@ -679,7 +679,6 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node, info->prefix_node = node; info->pathname = METHOD_NAME__SRS; info->parameters = args; - info->parameter_type = ACPI_PARAM_ARGS; info->flags = ACPI_IGNORE_RETURN_VALUE; /* diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 6d85289f1c1..f3132aa47a6 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -6,6 +6,8 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/acpi.h> +#include <linux/signal.h> +#include <linux/kthread.h> #include <acpi/acpi_drivers.h> #include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */ @@ -92,17 +94,37 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha } static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); -static int acpi_eject_operation(acpi_handle handle, int lockable) +static int acpi_bus_hot_remove_device(void *context) { + struct acpi_device *device; + acpi_handle handle = context; struct acpi_object_list arg_list; union acpi_object arg; acpi_status status = AE_OK; - /* - * TBD: evaluate _PS3? - */ + if (acpi_bus_get_device(handle, &device)) + return 0; + + if (!device) + return 0; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Hot-removing device %s...\n", device->dev.bus_id)); + - if (lockable) { + if (acpi_bus_trim(device, 1)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Removing device failed\n")); + return -1; + } + + /* power off device */ + status = acpi_evaluate_object(handle, "_PS3", NULL, NULL); + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Power-off device failed\n")); + + if (device->flags.lockable) { arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; @@ -118,26 +140,22 @@ static int acpi_eject_operation(acpi_handle handle, int lockable) /* * TBD: _EJD support. */ - status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); - if (ACPI_FAILURE(status)) { - return (-ENODEV); - } + if (ACPI_FAILURE(status)) + return -ENODEV; - return (0); + return 0; } static ssize_t acpi_eject_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - int result; int ret = count; - int islockable; acpi_status status; - acpi_handle handle; acpi_object_type type = 0; struct acpi_device *acpi_device = to_acpi_device(d); + struct task_struct *task; if ((!count) || (buf[0] != '1')) { return -EINVAL; @@ -154,18 +172,12 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, goto err; } - islockable = acpi_device->flags.lockable; - handle = acpi_device->handle; - - result = acpi_bus_trim(acpi_device, 1); - - if (!result) - result = acpi_eject_operation(handle, islockable); - - if (result) { - ret = -EBUSY; - } - err: + /* remove the device in another thread to fix the deadlock issue */ + task = kthread_run(acpi_bus_hot_remove_device, + acpi_device->handle, "acpi_hot_remove_device"); + if (IS_ERR(task)) + ret = PTR_ERR(task); +err: return ret; } @@ -691,9 +703,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) acpi_status status = 0; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package = NULL; - union acpi_object in_arg[3]; - struct acpi_object_list arg_list = { 3, in_arg }; - acpi_status psw_status = AE_OK; + int psw_error; struct acpi_device_id button_device_ids[] = { {"PNP0C0D", 0}, @@ -725,39 +735,11 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) * So it is necessary to call _DSW object first. Only when it is not * present will the _PSW object used. */ - /* - * Three agruments are needed for the _DSW object. - * Argument 0: enable/disable the wake capabilities - * When _DSW object is called to disable the wake capabilities, maybe - * the first argument is filled. The value of the other two agruments - * is meaningless. - */ - in_arg[0].type = ACPI_TYPE_INTEGER; - in_arg[0].integer.value = 0; - in_arg[1].type = ACPI_TYPE_INTEGER; - in_arg[1].integer.value = 0; - in_arg[2].type = ACPI_TYPE_INTEGER; - in_arg[2].integer.value = 0; - psw_status = acpi_evaluate_object(device->handle, "_DSW", - &arg_list, NULL); - if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND)) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in evaluate _DSW\n")); - /* - * When the _DSW object is not present, OSPM will call _PSW object. - */ - if (psw_status == AE_NOT_FOUND) { - /* - * Only one agruments is required for the _PSW object. - * agrument 0: enable/disable the wake capabilities - */ - arg_list.count = 1; - in_arg[0].integer.value = 0; - psw_status = acpi_evaluate_object(device->handle, "_PSW", - &arg_list, NULL); - if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND)) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in " - "evaluate _PSW\n")); - } + psw_error = acpi_device_sleep_wake(device, 0, 0, 0); + if (psw_error) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "error in _DSW or _PSW evaluation\n")); + /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) device->wakeup.flags.run_wake = 1; diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index 495c63a3e0a..0489a7d1d42 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c @@ -24,10 +24,6 @@ u8 sleep_states[ACPI_S_STATE_COUNT]; -#ifdef CONFIG_PM_SLEEP -static u32 acpi_target_sleep_state = ACPI_STATE_S0; -#endif - static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP @@ -49,9 +45,96 @@ static int acpi_sleep_prepare(u32 acpi_state) return 0; } -#ifdef CONFIG_SUSPEND -static struct platform_suspend_ops acpi_suspend_ops; +#ifdef CONFIG_PM_SLEEP +static u32 acpi_target_sleep_state = ACPI_STATE_S0; + +/* + * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the + * user to request that behavior by using the 'acpi_old_suspend_ordering' + * kernel command line option that causes the following variable to be set. + */ +static bool old_suspend_ordering; +void __init acpi_old_suspend_ordering(void) +{ + old_suspend_ordering = true; +} + +/** + * acpi_pm_disable_gpes - Disable the GPEs. + */ +static int acpi_pm_disable_gpes(void) +{ + acpi_hw_disable_all_gpes(); + return 0; +} + +/** + * __acpi_pm_prepare - Prepare the platform to enter the target state. + * + * If necessary, set the firmware waking vector and do arch-specific + * nastiness to get the wakeup code to the waking vector. + */ +static int __acpi_pm_prepare(void) +{ + int error = acpi_sleep_prepare(acpi_target_sleep_state); + + if (error) + acpi_target_sleep_state = ACPI_STATE_S0; + return error; +} + +/** + * acpi_pm_prepare - Prepare the platform to enter the target sleep + * state and disable the GPEs. + */ +static int acpi_pm_prepare(void) +{ + int error = __acpi_pm_prepare(); + + if (!error) + acpi_hw_disable_all_gpes(); + return error; +} + +/** + * acpi_pm_finish - Instruct the platform to leave a sleep state. + * + * This is called after we wake back up (or if entering the sleep state + * failed). + */ +static void acpi_pm_finish(void) +{ + u32 acpi_state = acpi_target_sleep_state; + + if (acpi_state == ACPI_STATE_S0) + return; + + printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", + acpi_state); + acpi_disable_wakeup_device(acpi_state); + acpi_leave_sleep_state(acpi_state); + + /* reset firmware waking vector */ + acpi_set_firmware_waking_vector((acpi_physical_address) 0); + + acpi_target_sleep_state = ACPI_STATE_S0; +} + +/** + * acpi_pm_end - Finish up suspend sequence. + */ +static void acpi_pm_end(void) +{ + /* + * This is necessary in case acpi_pm_finish() is not called during a + * failing transition to a sleep state. + */ + acpi_target_sleep_state = ACPI_STATE_S0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_SUSPEND extern void do_suspend_lowlevel(void); static u32 acpi_suspend_states[] = { @@ -61,13 +144,10 @@ static u32 acpi_suspend_states[] = { [PM_SUSPEND_MAX] = ACPI_STATE_S5 }; -static int init_8259A_after_S1; - /** * acpi_suspend_begin - Set the target system sleep state to the state * associated with given @pm_state, if supported. */ - static int acpi_suspend_begin(suspend_state_t pm_state) { u32 acpi_state = acpi_suspend_states[pm_state]; @@ -84,25 +164,6 @@ static int acpi_suspend_begin(suspend_state_t pm_state) } /** - * acpi_suspend_prepare - Do preliminary suspend work. - * - * If necessary, set the firmware waking vector and do arch-specific - * nastiness to get the wakeup code to the waking vector. - */ - -static int acpi_suspend_prepare(void) -{ - int error = acpi_sleep_prepare(acpi_target_sleep_state); - - if (error) { - acpi_target_sleep_state = ACPI_STATE_S0; - return error; - } - - return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT; -} - -/** * acpi_suspend_enter - Actually enter a sleep state. * @pm_state: ignored * @@ -110,7 +171,6 @@ static int acpi_suspend_prepare(void) * assembly, which in turn call acpi_enter_sleep_state(). * It's unfortunate, but it works. Please fix if you're feeling frisky. */ - static int acpi_suspend_enter(suspend_state_t pm_state) { acpi_status status = AE_OK; @@ -167,46 +227,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state) return ACPI_SUCCESS(status) ? 0 : -EFAULT; } -/** - * acpi_suspend_finish - Instruct the platform to leave a sleep state. - * - * This is called after we wake back up (or if entering the sleep state - * failed). - */ - -static void acpi_suspend_finish(void) -{ - u32 acpi_state = acpi_target_sleep_state; - - acpi_disable_wakeup_device(acpi_state); - acpi_leave_sleep_state(acpi_state); - - /* reset firmware waking vector */ - acpi_set_firmware_waking_vector((acpi_physical_address) 0); - - acpi_target_sleep_state = ACPI_STATE_S0; - -#ifdef CONFIG_X86 - if (init_8259A_after_S1) { - printk("Broken toshiba laptop -> kicking interrupts\n"); - init_8259A(0); - } -#endif -} - -/** - * acpi_suspend_end - Finish up suspend sequence. - */ - -static void acpi_suspend_end(void) -{ - /* - * This is necessary in case acpi_suspend_finish() is not called during a - * failing transition to a sleep state. - */ - acpi_target_sleep_state = ACPI_STATE_S0; -} - static int acpi_suspend_state_valid(suspend_state_t pm_state) { u32 acpi_state; @@ -226,30 +246,39 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state) static struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, - .prepare = acpi_suspend_prepare, + .prepare = acpi_pm_prepare, .enter = acpi_suspend_enter, - .finish = acpi_suspend_finish, - .end = acpi_suspend_end, + .finish = acpi_pm_finish, + .end = acpi_pm_end, }; -/* - * Toshiba fails to preserve interrupts over S1, reinitialization - * of 8259 is needed after S1 resume. +/** + * acpi_suspend_begin_old - Set the target system sleep state to the + * state associated with given @pm_state, if supported, and + * execute the _PTS control method. This function is used if the + * pre-ACPI 2.0 suspend ordering has been requested. */ -static int __init init_ints_after_s1(const struct dmi_system_id *d) +static int acpi_suspend_begin_old(suspend_state_t pm_state) { - printk(KERN_WARNING "%s with broken S1 detected.\n", d->ident); - init_8259A_after_S1 = 1; - return 0; + int error = acpi_suspend_begin(pm_state); + + if (!error) + error = __acpi_pm_prepare(); + return error; } -static struct dmi_system_id __initdata acpisleep_dmi_table[] = { - { - .callback = init_ints_after_s1, - .ident = "Toshiba Satellite 4030cdt", - .matches = {DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),}, - }, - {}, +/* + * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has + * been requested. + */ +static struct platform_suspend_ops acpi_suspend_ops_old = { + .valid = acpi_suspend_state_valid, + .begin = acpi_suspend_begin_old, + .prepare = acpi_pm_disable_gpes, + .enter = acpi_suspend_enter, + .finish = acpi_pm_finish, + .end = acpi_pm_end, + .recover = acpi_pm_finish, }; #endif /* CONFIG_SUSPEND */ @@ -257,22 +286,9 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { static int acpi_hibernation_begin(void) { acpi_target_sleep_state = ACPI_STATE_S4; - return 0; } -static int acpi_hibernation_prepare(void) -{ - int error = acpi_sleep_prepare(ACPI_STATE_S4); - - if (error) { - acpi_target_sleep_state = ACPI_STATE_S0; - return error; - } - - return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT; -} - static int acpi_hibernation_enter(void) { acpi_status status = AE_OK; @@ -302,52 +318,55 @@ static void acpi_hibernation_leave(void) acpi_leave_sleep_state_prep(ACPI_STATE_S4); } -static void acpi_hibernation_finish(void) +static void acpi_pm_enable_gpes(void) { - acpi_disable_wakeup_device(ACPI_STATE_S4); - acpi_leave_sleep_state(ACPI_STATE_S4); - - /* reset firmware waking vector */ - acpi_set_firmware_waking_vector((acpi_physical_address) 0); - - acpi_target_sleep_state = ACPI_STATE_S0; + acpi_hw_enable_all_runtime_gpes(); } -static void acpi_hibernation_end(void) -{ - /* - * This is necessary in case acpi_hibernation_finish() is not called - * during a failing transition to the sleep state. - */ - acpi_target_sleep_state = ACPI_STATE_S0; -} +static struct platform_hibernation_ops acpi_hibernation_ops = { + .begin = acpi_hibernation_begin, + .end = acpi_pm_end, + .pre_snapshot = acpi_pm_prepare, + .finish = acpi_pm_finish, + .prepare = acpi_pm_prepare, + .enter = acpi_hibernation_enter, + .leave = acpi_hibernation_leave, + .pre_restore = acpi_pm_disable_gpes, + .restore_cleanup = acpi_pm_enable_gpes, +}; -static int acpi_hibernation_pre_restore(void) +/** + * acpi_hibernation_begin_old - Set the target system sleep state to + * ACPI_STATE_S4 and execute the _PTS control method. This + * function is used if the pre-ACPI 2.0 suspend ordering has been + * requested. + */ +static int acpi_hibernation_begin_old(void) { - acpi_status status; - - status = acpi_hw_disable_all_gpes(); - - return ACPI_SUCCESS(status) ? 0 : -EFAULT; -} + int error = acpi_sleep_prepare(ACPI_STATE_S4); -static void acpi_hibernation_restore_cleanup(void) -{ - acpi_hw_enable_all_runtime_gpes(); + if (!error) + acpi_target_sleep_state = ACPI_STATE_S4; + return error; } -static struct platform_hibernation_ops acpi_hibernation_ops = { - .begin = acpi_hibernation_begin, - .end = acpi_hibernation_end, - .pre_snapshot = acpi_hibernation_prepare, - .finish = acpi_hibernation_finish, - .prepare = acpi_hibernation_prepare, +/* + * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has + * been requested. + */ +static struct platform_hibernation_ops acpi_hibernation_ops_old = { + .begin = acpi_hibernation_begin_old, + .end = acpi_pm_end, + .pre_snapshot = acpi_pm_disable_gpes, + .finish = acpi_pm_finish, + .prepare = acpi_pm_disable_gpes, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, - .pre_restore = acpi_hibernation_pre_restore, - .restore_cleanup = acpi_hibernation_restore_cleanup, + .pre_restore = acpi_pm_disable_gpes, + .restore_cleanup = acpi_pm_enable_gpes, + .recover = acpi_pm_finish, }; -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATION */ int acpi_suspend(u32 acpi_state) { @@ -368,8 +387,8 @@ int acpi_suspend(u32 acpi_state) /** * acpi_pm_device_sleep_state - return preferred power state of ACPI device * in the system sleep state given by %acpi_target_sleep_state - * @dev: device to examine - * @wake: if set, the device should be able to wake up the system + * @dev: device to examine; its driver model wakeup flags control + * whether it should be able to wake up the system * @d_min_p: used to store the upper limit of allowed states range * Return value: preferred power state of the device on success, -ENODEV on * failure (ie. if there's no 'struct acpi_device' for @dev) @@ -387,7 +406,7 @@ int acpi_suspend(u32 acpi_state) * via @wake. */ -int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) +int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) { acpi_handle handle = DEVICE_ACPI_HANDLE(dev); struct acpi_device *adev; @@ -426,7 +445,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) * can wake the system. _S0W may be valid, too. */ if (acpi_target_sleep_state == ACPI_STATE_S0 || - (wake && adev->wakeup.state.enabled && + (device_may_wakeup(dev) && adev->wakeup.state.enabled && adev->wakeup.sleep_state <= acpi_target_sleep_state)) { acpi_status status; @@ -448,6 +467,31 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) *d_min_p = d_min; return d_max; } + +/** + * acpi_pm_device_sleep_wake - enable or disable the system wake-up + * capability of given device + * @dev: device to handle + * @enable: 'true' - enable, 'false' - disable the wake-up capability + */ +int acpi_pm_device_sleep_wake(struct device *dev, bool enable) +{ + acpi_handle handle; + struct acpi_device *adev; + + if (!device_may_wakeup(dev)) + return -EINVAL; + + handle = DEVICE_ACPI_HANDLE(dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { + printk(KERN_DEBUG "ACPI handle has no context!\n"); + return -ENODEV; + } + + return enable ? + acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : + acpi_disable_wakeup_device_power(adev); +} #endif static void acpi_power_off_prepare(void) @@ -472,8 +516,6 @@ int __init acpi_sleep_init(void) u8 type_a, type_b; #ifdef CONFIG_SUSPEND int i = 0; - - dmi_check_system(acpisleep_dmi_table); #endif if (acpi_disabled) @@ -491,13 +533,15 @@ int __init acpi_sleep_init(void) } } - suspend_set_ops(&acpi_suspend_ops); + suspend_set_ops(old_suspend_ordering ? + &acpi_suspend_ops_old : &acpi_suspend_ops); #endif #ifdef CONFIG_HIBERNATION status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); if (ACPI_SUCCESS(status)) { - hibernation_set_ops(&acpi_hibernation_ops); + hibernation_set_ops(old_suspend_ordering ? + &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; printk(" S4"); } diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c index ed8e41becf0..38655eb132d 100644 --- a/drivers/acpi/sleep/wakeup.c +++ b/drivers/acpi/sleep/wakeup.c @@ -42,7 +42,7 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) continue; spin_unlock(&acpi_device_lock); - acpi_enable_wakeup_device_power(dev); + acpi_enable_wakeup_device_power(dev, sleep_state); spin_lock(&acpi_device_lock); } spin_unlock(&acpi_device_lock); @@ -66,13 +66,15 @@ void acpi_enable_wakeup_device(u8 sleep_state) list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); + if (!dev->wakeup.flags.valid) continue; + /* If users want to disable run-wake GPE, * we only disable it for wake and leave it for runtime */ - if (!dev->wakeup.state.enabled || - sleep_state > (u32) dev->wakeup.sleep_state) { + if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) + || sleep_state > (u32) dev->wakeup.sleep_state) { if (dev->wakeup.flags.run_wake) { spin_unlock(&acpi_device_lock); /* set_gpe_type will disable GPE, leave it like that */ @@ -110,8 +112,9 @@ void acpi_disable_wakeup_device(u8 sleep_state) if (!dev->wakeup.flags.valid) continue; - if (!dev->wakeup.state.enabled || - sleep_state > (u32) dev->wakeup.sleep_state) { + + if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) + || sleep_state > (u32) dev->wakeup.sleep_state) { if (dev->wakeup.flags.run_wake) { spin_unlock(&acpi_device_lock); acpi_set_gpe_type(dev->wakeup.gpe_device, diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 5bd2dec9a7a..d8e3f153b29 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c @@ -167,7 +167,13 @@ static int acpi_system_sysfs_init(void) #define COUNT_ERROR 2 /* other */ #define NUM_COUNTERS_EXTRA 3 -static u32 *all_counters; +#define ACPI_EVENT_VALID 0x01 +struct event_counter { + u32 count; + u32 flags; +}; + +static struct event_counter *all_counters; static u32 num_gpes; static u32 num_counters; static struct attribute **all_attrs; @@ -202,9 +208,44 @@ static int count_num_gpes(void) return count; } +static int get_gpe_device(int index, acpi_handle *handle) +{ + struct acpi_gpe_xrupt_info *gpe_xrupt_info; + struct acpi_gpe_block_info *gpe_block; + acpi_cpu_flags flags; + struct acpi_namespace_node *node; + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; + while (gpe_xrupt_info) { + gpe_block = gpe_xrupt_info->gpe_block_list_head; + node = gpe_block->node; + while (gpe_block) { + index -= gpe_block->register_count * + ACPI_GPE_REGISTER_WIDTH; + if (index < 0) { + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + /* return NULL if it's FADT GPE */ + if (node->type != ACPI_TYPE_DEVICE) + *handle = NULL; + else + *handle = node; + return 0; + } + node = gpe_block->node; + gpe_block = gpe_block->next; + } + gpe_xrupt_info = gpe_xrupt_info->next; + } + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + + return -ENODEV; +} + static void delete_gpe_attr_array(void) { - u32 *tmp = all_counters; + struct event_counter *tmp = all_counters; all_counters = NULL; kfree(tmp); @@ -230,9 +271,10 @@ void acpi_os_gpe_count(u32 gpe_number) return; if (gpe_number < num_gpes) - all_counters[gpe_number]++; + all_counters[gpe_number].count++; else - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++; + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]. + count++; return; } @@ -243,44 +285,144 @@ void acpi_os_fixed_event_count(u32 event_number) return; if (event_number < ACPI_NUM_FIXED_EVENTS) - all_counters[num_gpes + event_number]++; + all_counters[num_gpes + event_number].count++; else - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++; + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]. + count++; return; } +static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle) +{ + int result = 0; + + if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) + goto end; + + if (index < num_gpes) { + result = get_gpe_device(index, handle); + if (result) { + ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, + "Invalid GPE 0x%x\n", index)); + goto end; + } + result = acpi_get_gpe_status(*handle, index, + ACPI_NOT_ISR, status); + } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) + result = acpi_get_event_status(index - num_gpes, status); + + /* + * sleep/power button GPE/Fixed Event is enabled after acpi_system_init, + * check the status at runtime and mark it as valid once it's enabled + */ + if (!result && (*status & ACPI_EVENT_FLAG_ENABLED)) + all_counters[index].flags |= ACPI_EVENT_VALID; +end: + return result; +} + static ssize_t counter_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI] = + int index = attr - counter_attrs; + int size; + acpi_handle handle; + acpi_event_status status; + int result = 0; + + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count = acpi_irq_handled; - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE] = + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count = acpi_gpe_count; - return sprintf(buf, "%d\n", all_counters[attr - counter_attrs]); + size = sprintf(buf, "%8d", all_counters[index].count); + + /* "gpe_all" or "sci" */ + if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) + goto end; + + result = get_status(index, &status, &handle); + if (result) + goto end; + + if (!(all_counters[index].flags & ACPI_EVENT_VALID)) + size += sprintf(buf + size, " invalid"); + else if (status & ACPI_EVENT_FLAG_ENABLED) + size += sprintf(buf + size, " enable"); + else + size += sprintf(buf + size, " disable"); + +end: + size += sprintf(buf + size, "\n"); + return result ? result : size; } /* * counter_set() sets the specified counter. * setting the total "sci" file to any value clears all counters. + * enable/disable/clear a gpe/fixed event in user space. */ static ssize_t counter_set(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) { int index = attr - counter_attrs; + acpi_event_status status; + acpi_handle handle; + int result = 0; if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { int i; for (i = 0; i < num_counters; ++i) - all_counters[i] = 0; + all_counters[i].count = 0; acpi_gpe_count = 0; acpi_irq_handled = 0; + goto end; + } + /* show the event status for both GPEs and Fixed Events */ + result = get_status(index, &status, &handle); + if (result) + goto end; + + if (!(all_counters[index].flags & ACPI_EVENT_VALID)) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Can not change Invalid GPE/Fixed Event status\n")); + return -EINVAL; + } + + if (index < num_gpes) { + if (!strcmp(buf, "disable\n") && + (status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_disable_gpe(handle, index, ACPI_NOT_ISR); + else if (!strcmp(buf, "enable\n") && + !(status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_enable_gpe(handle, index, ACPI_NOT_ISR); + else if (!strcmp(buf, "clear\n") && + (status & ACPI_EVENT_FLAG_SET)) + result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); + else + all_counters[index].count = strtoul(buf, NULL, 0); + } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { + int event = index - num_gpes; + if (!strcmp(buf, "disable\n") && + (status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_disable_event(event, ACPI_NOT_ISR); + else if (!strcmp(buf, "enable\n") && + !(status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_enable_event(event, ACPI_NOT_ISR); + else if (!strcmp(buf, "clear\n") && + (status & ACPI_EVENT_FLAG_SET)) + result = acpi_clear_event(event); + else + all_counters[index].count = strtoul(buf, NULL, 0); } else - all_counters[index] = strtoul(buf, NULL, 0); + all_counters[index].count = strtoul(buf, NULL, 0); - return size; + if (ACPI_FAILURE(result)) + result = -EINVAL; +end: + return result ? result : size; } void acpi_irq_stats_init(void) @@ -298,7 +440,8 @@ void acpi_irq_stats_init(void) if (all_attrs == NULL) return; - all_counters = kzalloc(sizeof(u32) * (num_counters), GFP_KERNEL); + all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), + GFP_KERNEL); if (all_counters == NULL) goto fail; diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c index 949d4114eb9..ccb5b64bbef 100644 --- a/drivers/acpi/tables/tbfadt.c +++ b/drivers/acpi/tables/tbfadt.c @@ -124,7 +124,7 @@ static struct acpi_fadt_info fadt_info_table[] = { static void inline acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, - u8 bit_width, u64 address) + u8 byte_width, u64 address) { /* @@ -136,7 +136,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, /* All other fields are byte-wide */ generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; - generic_address->bit_width = bit_width; + generic_address->bit_width = byte_width << 3; generic_address->bit_offset = 0; generic_address->access_width = 0; } @@ -155,7 +155,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, * ******************************************************************************/ -void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags) +void acpi_tb_parse_fadt(u32 table_index, u8 flags) { u32 length; struct acpi_table_header *table; @@ -280,7 +280,7 @@ static void acpi_tb_convert_fadt(void) { u8 pm1_register_length; struct acpi_generic_address *target; - acpi_native_uint i; + u32 i; /* Update the local FADT table header length */ @@ -343,9 +343,11 @@ static void acpi_tb_convert_fadt(void) * * The PM event blocks are split into two register blocks, first is the * PM Status Register block, followed immediately by the PM Enable Register - * block. Each is of length (pm1_event_length/2) + * block. Each is of length (xpm1x_event_block.bit_width/2) */ - pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length); + WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1a_event_block.bit_width)); + pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT + .xpm1a_event_block.bit_width); /* The PM1A register block is required */ @@ -360,14 +362,17 @@ static void acpi_tb_convert_fadt(void) /* The PM1B register block is optional, ignore if not present */ if (acpi_gbl_FADT.xpm1b_event_block.address) { + WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1b_event_block.bit_width)); + pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT + .xpm1b_event_block + .bit_width); acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, pm1_register_length, (acpi_gbl_FADT.xpm1b_event_block. address + pm1_register_length)); /* Don't forget to copy space_id of the GAS */ acpi_gbl_xpm1b_enable.space_id = - acpi_gbl_FADT.xpm1a_event_block.space_id; - + acpi_gbl_FADT.xpm1b_event_block.space_id; } } @@ -396,7 +401,7 @@ static void acpi_tb_validate_fadt(void) u32 *address32; struct acpi_generic_address *address64; u8 length; - acpi_native_uint i; + u32 i; /* Examine all of the 64-bit extended address fields (X fields) */ diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c index 9ca3afc98c8..531584defbb 100644 --- a/drivers/acpi/tables/tbfind.c +++ b/drivers/acpi/tables/tbfind.c @@ -65,10 +65,9 @@ ACPI_MODULE_NAME("tbfind") ******************************************************************************/ acpi_status acpi_tb_find_table(char *signature, - char *oem_id, - char *oem_table_id, acpi_native_uint * table_index) + char *oem_id, char *oem_table_id, u32 *table_index) { - acpi_native_uint i; + u32 i; acpi_status status; struct acpi_table_header header; diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c index 5336ce88f89..b22185f55a1 100644 --- a/drivers/acpi/tables/tbinstal.c +++ b/drivers/acpi/tables/tbinstal.c @@ -107,11 +107,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc) ******************************************************************************/ acpi_status -acpi_tb_add_table(struct acpi_table_desc *table_desc, - acpi_native_uint * table_index) +acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index) { - acpi_native_uint i; - acpi_native_uint length; + u32 i; + u32 length; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(tb_add_table); @@ -207,8 +206,8 @@ acpi_status acpi_tb_resize_root_table_list(void) /* Increase the Table Array size */ - tables = ACPI_ALLOCATE_ZEROED((acpi_gbl_root_table_list.size + - ACPI_ROOT_TABLE_SIZE_INCREMENT) + tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list. + size + ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc)); if (!tables) { ACPI_ERROR((AE_INFO, @@ -220,7 +219,7 @@ acpi_status acpi_tb_resize_root_table_list(void) if (acpi_gbl_root_table_list.tables) { ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, - acpi_gbl_root_table_list.size * + (acpi_size) acpi_gbl_root_table_list.size * sizeof(struct acpi_table_desc)); if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { @@ -253,7 +252,7 @@ acpi_status acpi_tb_resize_root_table_list(void) acpi_status acpi_tb_store_table(acpi_physical_address address, struct acpi_table_header *table, - u32 length, u8 flags, acpi_native_uint * table_index) + u32 length, u8 flags, u32 *table_index) { acpi_status status = AE_OK; @@ -334,7 +333,7 @@ void acpi_tb_delete_table(struct acpi_table_desc *table_desc) void acpi_tb_terminate(void) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(tb_terminate); @@ -374,7 +373,7 @@ void acpi_tb_terminate(void) * ******************************************************************************/ -void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index) +void acpi_tb_delete_namespace_by_owner(u32 table_index) { acpi_owner_id owner_id; @@ -403,7 +402,7 @@ void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index) * ******************************************************************************/ -acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index) +acpi_status acpi_tb_allocate_owner_id(u32 table_index) { acpi_status status = AE_BAD_PARAMETER; @@ -431,7 +430,7 @@ acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index) * ******************************************************************************/ -acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index) +acpi_status acpi_tb_release_owner_id(u32 table_index) { acpi_status status = AE_BAD_PARAMETER; @@ -462,8 +461,7 @@ acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index) * ******************************************************************************/ -acpi_status -acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id) +acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id) { acpi_status status = AE_BAD_PARAMETER; @@ -490,7 +488,7 @@ acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id) * ******************************************************************************/ -u8 acpi_tb_is_table_loaded(acpi_native_uint table_index) +u8 acpi_tb_is_table_loaded(u32 table_index) { u8 is_loaded = FALSE; @@ -518,7 +516,7 @@ u8 acpi_tb_is_table_loaded(acpi_native_uint table_index) * ******************************************************************************/ -void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded) +void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded) { (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c index bc019b9b6a6..0cc92ef5236 100644 --- a/drivers/acpi/tables/tbutils.c +++ b/drivers/acpi/tables/tbutils.c @@ -49,8 +49,8 @@ ACPI_MODULE_NAME("tbutils") /* Local prototypes */ static acpi_physical_address -acpi_tb_get_root_table_entry(u8 * table_entry, - acpi_native_uint table_entry_size); +acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); + /******************************************************************************* * * FUNCTION: acpi_tb_check_xsdt @@ -238,7 +238,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) * ******************************************************************************/ -u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length) +u8 acpi_tb_checksum(u8 *buffer, u32 length) { u8 sum = 0; u8 *end = buffer + length; @@ -268,7 +268,7 @@ u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length) void acpi_tb_install_table(acpi_physical_address address, - u8 flags, char *signature, acpi_native_uint table_index) + u8 flags, char *signature, u32 table_index) { struct acpi_table_header *table; @@ -336,8 +336,7 @@ acpi_tb_install_table(acpi_physical_address address, ******************************************************************************/ static acpi_physical_address -acpi_tb_get_root_table_entry(u8 * table_entry, - acpi_native_uint table_entry_size) +acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) { u64 address64; @@ -395,8 +394,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) { struct acpi_table_rsdp *rsdp; - acpi_native_uint table_entry_size; - acpi_native_uint i; + u32 table_entry_size; + u32 i; u32 table_count; struct acpi_table_header *table; acpi_physical_address address; diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c index 0e319604d3e..fd7770aa106 100644 --- a/drivers/acpi/tables/tbxface.c +++ b/drivers/acpi/tables/tbxface.c @@ -125,7 +125,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array, /* Root Table Array has been statically allocated by the host */ ACPI_MEMSET(initial_table_array, 0, - initial_table_count * + (acpi_size) initial_table_count * sizeof(struct acpi_table_desc)); acpi_gbl_root_table_list.tables = initial_table_array; @@ -183,9 +183,9 @@ acpi_status acpi_reallocate_root_table(void) return_ACPI_STATUS(AE_SUPPORT); } - new_size = - (acpi_gbl_root_table_list.count + - ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc); + new_size = ((acpi_size) acpi_gbl_root_table_list.count + + ACPI_ROOT_TABLE_SIZE_INCREMENT) * + sizeof(struct acpi_table_desc); /* Create new array and copy the old array */ @@ -222,7 +222,7 @@ acpi_status acpi_reallocate_root_table(void) acpi_status acpi_load_table(struct acpi_table_header *table_ptr) { acpi_status status; - acpi_native_uint table_index; + u32 table_index; struct acpi_table_desc table_desc; if (!table_ptr) @@ -264,11 +264,10 @@ ACPI_EXPORT_SYMBOL(acpi_load_table) *****************************************************************************/ acpi_status acpi_get_table_header(char *signature, - acpi_native_uint instance, - struct acpi_table_header * out_table_header) + u32 instance, struct acpi_table_header *out_table_header) { - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; struct acpi_table_header *header; /* Parameter validation */ @@ -378,10 +377,10 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id) *****************************************************************************/ acpi_status acpi_get_table(char *signature, - acpi_native_uint instance, struct acpi_table_header **out_table) + u32 instance, struct acpi_table_header **out_table) { - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; acpi_status status; /* Parameter validation */ @@ -435,8 +434,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table) * ******************************************************************************/ acpi_status -acpi_get_table_by_index(acpi_native_uint table_index, - struct acpi_table_header ** table) +acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table) { acpi_status status; @@ -493,7 +491,7 @@ static acpi_status acpi_tb_load_namespace(void) { acpi_status status; struct acpi_table_header *table; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(tb_load_namespace); diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c index b8c0dfa084f..2d157e0f98d 100644 --- a/drivers/acpi/tables/tbxfroot.c +++ b/drivers/acpi/tables/tbxfroot.c @@ -118,7 +118,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) * ******************************************************************************/ -acpi_status acpi_find_root_pointer(acpi_native_uint * table_address) +acpi_status acpi_find_root_pointer(acpi_size *table_address) { u8 *table_ptr; u8 *mem_rover; @@ -153,7 +153,7 @@ acpi_status acpi_find_root_pointer(acpi_native_uint * table_address) * 1b) Search EBDA paragraphs (EBDA is required to be a * minimum of 1_k length) */ - table_ptr = acpi_os_map_memory((acpi_native_uint) + table_ptr = acpi_os_map_memory((acpi_physical_address) physical_address, ACPI_EBDA_WINDOW_SIZE); if (!table_ptr) { diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c index ede084829a7..3dfb8a442b2 100644 --- a/drivers/acpi/utilities/utalloc.c +++ b/drivers/acpi/utilities/utalloc.c @@ -309,7 +309,8 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer, * ******************************************************************************/ -void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line) +void *acpi_ut_allocate(acpi_size size, + u32 component, const char *module, u32 line) { void *allocation; @@ -353,7 +354,7 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line) ******************************************************************************/ void *acpi_ut_allocate_zeroed(acpi_size size, - u32 component, char *module, u32 line) + u32 component, const char *module, u32 line) { void *allocation; diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c index 655c290aca7..53499ac9098 100644 --- a/drivers/acpi/utilities/utcopy.c +++ b/drivers/acpi/utilities/utcopy.c @@ -572,7 +572,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, acpi_status status = AE_OK; union acpi_operand_object *package_object; union acpi_operand_object **package_elements; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage); @@ -599,7 +599,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, /* Truncate package and delete it */ - package_object->package.count = (u32) i; + package_object->package.count = i; package_elements[i] = NULL; acpi_ut_remove_reference(package_object); return_ACPI_STATUS(status); diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c index f938f465efa..fd66ecb6741 100644 --- a/drivers/acpi/utilities/utdebug.c +++ b/drivers/acpi/utilities/utdebug.c @@ -157,7 +157,8 @@ void ACPI_INTERNAL_VAR_XFACE acpi_ut_debug_print(u32 requested_debug_level, u32 line_number, const char *function_name, - char *module_name, u32 component_id, char *format, ...) + const char *module_name, + u32 component_id, const char *format, ...) { acpi_thread_id thread_id; va_list args; @@ -228,7 +229,8 @@ void ACPI_INTERNAL_VAR_XFACE acpi_ut_debug_print_raw(u32 requested_debug_level, u32 line_number, const char *function_name, - char *module_name, u32 component_id, char *format, ...) + const char *module_name, + u32 component_id, const char *format, ...) { va_list args; @@ -261,7 +263,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw) ******************************************************************************/ void acpi_ut_trace(u32 line_number, - const char *function_name, char *module_name, u32 component_id) + const char *function_name, + const char *module_name, u32 component_id) { acpi_gbl_nesting_level++; @@ -293,7 +296,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace) void acpi_ut_trace_ptr(u32 line_number, const char *function_name, - char *module_name, u32 component_id, void *pointer) + const char *module_name, u32 component_id, void *pointer) { acpi_gbl_nesting_level++; acpi_ut_track_stack_ptr(); @@ -324,7 +327,7 @@ acpi_ut_trace_ptr(u32 line_number, void acpi_ut_trace_str(u32 line_number, const char *function_name, - char *module_name, u32 component_id, char *string) + const char *module_name, u32 component_id, char *string) { acpi_gbl_nesting_level++; @@ -356,7 +359,7 @@ acpi_ut_trace_str(u32 line_number, void acpi_ut_trace_u32(u32 line_number, const char *function_name, - char *module_name, u32 component_id, u32 integer) + const char *module_name, u32 component_id, u32 integer) { acpi_gbl_nesting_level++; @@ -386,7 +389,8 @@ acpi_ut_trace_u32(u32 line_number, void acpi_ut_exit(u32 line_number, - const char *function_name, char *module_name, u32 component_id) + const char *function_name, + const char *module_name, u32 component_id) { acpi_ut_debug_print(ACPI_LV_FUNCTIONS, @@ -417,7 +421,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit) void acpi_ut_status_exit(u32 line_number, const char *function_name, - char *module_name, u32 component_id, acpi_status status) + const char *module_name, + u32 component_id, acpi_status status) { if (ACPI_SUCCESS(status)) { @@ -458,7 +463,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) void acpi_ut_value_exit(u32 line_number, const char *function_name, - char *module_name, u32 component_id, acpi_integer value) + const char *module_name, + u32 component_id, acpi_integer value) { acpi_ut_debug_print(ACPI_LV_FUNCTIONS, @@ -490,7 +496,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) void acpi_ut_ptr_exit(u32 line_number, const char *function_name, - char *module_name, u32 component_id, u8 * ptr) + const char *module_name, u32 component_id, u8 *ptr) { acpi_ut_debug_print(ACPI_LV_FUNCTIONS, @@ -519,8 +525,8 @@ acpi_ut_ptr_exit(u32 line_number, void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) { - acpi_native_uint i = 0; - acpi_native_uint j; + u32 i = 0; + u32 j; u32 temp32; u8 buf_char; @@ -539,7 +545,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) /* Print current offset */ - acpi_os_printf("%6.4X: ", (u32) i); + acpi_os_printf("%6.4X: ", i); /* Print 16 hex chars */ @@ -549,7 +555,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) /* Dump fill spaces */ acpi_os_printf("%*s", ((display * 2) + 1), " "); - j += (acpi_native_uint) display; + j += display; continue; } @@ -557,32 +563,38 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) case DB_BYTE_DISPLAY: default: /* Default is BYTE display */ - acpi_os_printf("%02X ", buffer[i + j]); + acpi_os_printf("%02X ", + buffer[(acpi_size) i + j]); break; case DB_WORD_DISPLAY: - ACPI_MOVE_16_TO_32(&temp32, &buffer[i + j]); + ACPI_MOVE_16_TO_32(&temp32, + &buffer[(acpi_size) i + j]); acpi_os_printf("%04X ", temp32); break; case DB_DWORD_DISPLAY: - ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j]); + ACPI_MOVE_32_TO_32(&temp32, + &buffer[(acpi_size) i + j]); acpi_os_printf("%08X ", temp32); break; case DB_QWORD_DISPLAY: - ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j]); + ACPI_MOVE_32_TO_32(&temp32, + &buffer[(acpi_size) i + j]); acpi_os_printf("%08X", temp32); - ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j + 4]); + ACPI_MOVE_32_TO_32(&temp32, + &buffer[(acpi_size) i + j + + 4]); acpi_os_printf("%08X ", temp32); break; } - j += (acpi_native_uint) display; + j += display; } /* @@ -596,7 +608,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) return; } - buf_char = buffer[i + j]; + buf_char = buffer[(acpi_size) i + j]; if (ACPI_IS_PRINT(buf_char)) { acpi_os_printf("%c", buf_char); } else { diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c index 1fbc35139e8..c5c791a575c 100644 --- a/drivers/acpi/utilities/utdelete.c +++ b/drivers/acpi/utilities/utdelete.c @@ -442,7 +442,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) union acpi_generic_state *state_list = NULL; union acpi_operand_object *next_object = NULL; union acpi_generic_state *state; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object); diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c index 05e61be267d..352747e49c7 100644 --- a/drivers/acpi/utilities/uteval.c +++ b/drivers/acpi/utilities/uteval.c @@ -97,7 +97,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) acpi_status status; union acpi_operand_object *string_desc; union acpi_operand_object *return_desc; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ut_osi_implementation); @@ -217,7 +217,6 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, info->prefix_node = prefix_node; info->pathname = path; - info->parameter_type = ACPI_PARAM_ARGS; /* Evaluate the object/method */ @@ -514,7 +513,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node, u32 count; u32 size; struct acpi_compatible_id_list *cid_list; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ut_execute_CID); diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c index 1f057b71db1..f34be677355 100644 --- a/drivers/acpi/utilities/utmisc.c +++ b/drivers/acpi/utilities/utmisc.c @@ -64,7 +64,7 @@ ACPI_MODULE_NAME("utmisc") ******************************************************************************/ const char *acpi_ut_validate_exception(acpi_status status) { - acpi_status sub_status; + u32 sub_status; const char *exception = NULL; ACPI_FUNCTION_ENTRY(); @@ -85,32 +85,28 @@ const char *acpi_ut_validate_exception(acpi_status status) case AE_CODE_PROGRAMMER: if (sub_status <= AE_CODE_PGM_MAX) { - exception = - acpi_gbl_exception_names_pgm[sub_status - 1]; + exception = acpi_gbl_exception_names_pgm[sub_status]; } break; case AE_CODE_ACPI_TABLES: if (sub_status <= AE_CODE_TBL_MAX) { - exception = - acpi_gbl_exception_names_tbl[sub_status - 1]; + exception = acpi_gbl_exception_names_tbl[sub_status]; } break; case AE_CODE_AML: if (sub_status <= AE_CODE_AML_MAX) { - exception = - acpi_gbl_exception_names_aml[sub_status - 1]; + exception = acpi_gbl_exception_names_aml[sub_status]; } break; case AE_CODE_CONTROL: if (sub_status <= AE_CODE_CTRL_MAX) { - exception = - acpi_gbl_exception_names_ctrl[sub_status - 1]; + exception = acpi_gbl_exception_names_ctrl[sub_status]; } break; @@ -165,9 +161,9 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table) acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) { - acpi_native_uint i; - acpi_native_uint j; - acpi_native_uint k; + u32 i; + u32 j; + u32 k; acpi_status status; ACPI_FUNCTION_TRACE(ut_allocate_owner_id); @@ -273,7 +269,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr) { acpi_owner_id owner_id = *owner_id_ptr; acpi_status status; - acpi_native_uint index; + u32 index; u32 bit; ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id); @@ -593,7 +589,7 @@ acpi_ut_display_init_pathname(u8 type, * ******************************************************************************/ -u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position) +u8 acpi_ut_valid_acpi_char(char character, u32 position) { if (!((character >= 'A' && character <= 'Z') || @@ -628,7 +624,7 @@ u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position) u8 acpi_ut_valid_acpi_name(u32 name) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_ENTRY(); @@ -657,7 +653,7 @@ u8 acpi_ut_valid_acpi_name(u32 name) acpi_name acpi_ut_repair_name(char *name) { - acpi_native_uint i; + u32 i; char new_name[ACPI_NAME_SIZE]; for (i = 0; i < ACPI_NAME_SIZE; i++) { @@ -1024,7 +1020,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE -acpi_ut_error(char *module_name, u32 line_number, char *format, ...) +acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...) { va_list args; @@ -1037,8 +1033,8 @@ acpi_ut_error(char *module_name, u32 line_number, char *format, ...) } void ACPI_INTERNAL_VAR_XFACE -acpi_ut_exception(char *module_name, - u32 line_number, acpi_status status, char *format, ...) +acpi_ut_exception(const char *module_name, + u32 line_number, acpi_status status, const char *format, ...) { va_list args; @@ -1054,7 +1050,8 @@ acpi_ut_exception(char *module_name, EXPORT_SYMBOL(acpi_ut_exception); void ACPI_INTERNAL_VAR_XFACE -acpi_ut_warning(char *module_name, u32 line_number, char *format, ...) +acpi_ut_warning(const char *module_name, + u32 line_number, const char *format, ...) { va_list args; @@ -1067,7 +1064,7 @@ acpi_ut_warning(char *module_name, u32 line_number, char *format, ...) } void ACPI_INTERNAL_VAR_XFACE -acpi_ut_info(char *module_name, u32 line_number, char *format, ...) +acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...) { va_list args; diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c index f7d602b1a89..7331dde9e1b 100644 --- a/drivers/acpi/utilities/utmutex.c +++ b/drivers/acpi/utilities/utmutex.c @@ -218,7 +218,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) * the mutex ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ - for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) { + for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (i == mutex_id) { ACPI_ERROR((AE_INFO, @@ -315,7 +315,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) * ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ - for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) { + for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (i == mutex_id) { continue; diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c index e68466de804..e25484495e6 100644 --- a/drivers/acpi/utilities/utobject.c +++ b/drivers/acpi/utilities/utobject.c @@ -83,7 +83,8 @@ acpi_ut_get_element_length(u8 object_type, * ******************************************************************************/ -union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name, +union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char + *module_name, u32 line_number, u32 component_id, acpi_object_type @@ -175,8 +176,8 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count) * Create the element array. Count+1 allows the array to be null * terminated. */ - package_elements = ACPI_ALLOCATE_ZEROED((acpi_size) - (count + 1) * sizeof(void *)); + package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count + + 1) * sizeof(void *)); if (!package_elements) { acpi_ut_remove_reference(package_desc); return_PTR(NULL); @@ -347,7 +348,7 @@ u8 acpi_ut_valid_internal_object(void *object) * ******************************************************************************/ -void *acpi_ut_allocate_object_desc_dbg(char *module_name, +void *acpi_ut_allocate_object_desc_dbg(const char *module_name, u32 line_number, u32 component_id) { union acpi_operand_object *object; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index d089c4519d4..64c889331f3 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -631,6 +631,76 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) * device : video output device (LCD, CRT, ..) * * Return Value: + * Maximum brightness level + * + * Allocate and initialize device->brightness. + */ + +static int +acpi_video_init_brightness(struct acpi_video_device *device) +{ + union acpi_object *obj = NULL; + int i, max_level = 0, count = 0; + union acpi_object *o; + struct acpi_video_device_brightness *br = NULL; + + if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available " + "LCD brightness level\n")); + goto out; + } + + if (obj->package.count < 2) + goto out; + + br = kzalloc(sizeof(*br), GFP_KERNEL); + if (!br) { + printk(KERN_ERR "can't allocate memory\n"); + goto out; + } + + br->levels = kmalloc(obj->package.count * sizeof *(br->levels), + GFP_KERNEL); + if (!br->levels) + goto out_free; + + for (i = 0; i < obj->package.count; i++) { + o = (union acpi_object *)&obj->package.elements[i]; + if (o->type != ACPI_TYPE_INTEGER) { + printk(KERN_ERR PREFIX "Invalid data\n"); + continue; + } + br->levels[count] = (u32) o->integer.value; + + if (br->levels[count] > max_level) + max_level = br->levels[count]; + count++; + } + + if (count < 2) + goto out_free_levels; + + br->count = count; + device->brightness = br; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "found %d brightness levels\n", count)); + kfree(obj); + return max_level; + +out_free_levels: + kfree(br->levels); +out_free: + kfree(br); +out: + device->brightness = NULL; + kfree(obj); + return 0; +} + +/* + * Arg: + * device : video output device (LCD, CRT, ..) + * + * Return Value: * None * * Find out all required AML methods defined under the output @@ -640,10 +710,7 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) static void acpi_video_device_find_cap(struct acpi_video_device *device) { acpi_handle h_dummy1; - int i; u32 max_level = 0; - union acpi_object *obj = NULL; - struct acpi_video_device_brightness *br = NULL; memset(&device->cap, 0, sizeof(device->cap)); @@ -672,53 +739,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) device->cap._DSS = 1; } - if (ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { - - if (obj->package.count >= 2) { - int count = 0; - union acpi_object *o; - - br = kzalloc(sizeof(*br), GFP_KERNEL); - if (!br) { - printk(KERN_ERR "can't allocate memory\n"); - } else { - br->levels = kmalloc(obj->package.count * - sizeof *(br->levels), GFP_KERNEL); - if (!br->levels) - goto out; - - for (i = 0; i < obj->package.count; i++) { - o = (union acpi_object *)&obj->package. - elements[i]; - if (o->type != ACPI_TYPE_INTEGER) { - printk(KERN_ERR PREFIX "Invalid data\n"); - continue; - } - br->levels[count] = (u32) o->integer.value; - - if (br->levels[count] > max_level) - max_level = br->levels[count]; - count++; - } - out: - if (count < 2) { - kfree(br->levels); - kfree(br); - } else { - br->count = count; - device->brightness = br; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "found %d brightness levels\n", - count)); - } - } - } - - } else { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available LCD brightness level\n")); - } - - kfree(obj); + max_level = acpi_video_init_brightness(device); if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){ int result; @@ -1695,6 +1716,8 @@ static void acpi_video_switch_brightness(struct acpi_video_device *device, int event) { unsigned long level_current, level_next; + if (!device->brightness) + return; acpi_video_device_lcd_get_level_current(device, &level_current); level_next = acpi_video_get_next_level(device, level_current, event); acpi_video_device_lcd_set_level(device, level_next); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 911ec600fe7..3f940393d6c 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -453,6 +453,8 @@ int platform_driver_register(struct platform_driver *drv) drv->driver.suspend = platform_drv_suspend; if (drv->resume) drv->driver.resume = platform_drv_resume; + if (drv->pm) + drv->driver.pm = &drv->pm->base; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(platform_driver_register); @@ -560,7 +562,9 @@ static int platform_match(struct device *dev, struct device_driver *drv) return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0); } -static int platform_suspend(struct device *dev, pm_message_t mesg) +#ifdef CONFIG_PM_SLEEP + +static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) { int ret = 0; @@ -570,7 +574,7 @@ static int platform_suspend(struct device *dev, pm_message_t mesg) return ret; } -static int platform_suspend_late(struct device *dev, pm_message_t mesg) +static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg) { struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_device *pdev; @@ -583,7 +587,7 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg) return ret; } -static int platform_resume_early(struct device *dev) +static int platform_legacy_resume_early(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_device *pdev; @@ -596,7 +600,7 @@ static int platform_resume_early(struct device *dev) return ret; } -static int platform_resume(struct device *dev) +static int platform_legacy_resume(struct device *dev) { int ret = 0; @@ -606,15 +610,291 @@ static int platform_resume(struct device *dev) return ret; } +static int platform_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +static void platform_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#ifdef CONFIG_SUSPEND + +static int platform_pm_suspend(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->suspend) + ret = drv->pm->suspend(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int platform_pm_suspend_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->suspend_noirq) + ret = pdrv->pm->suspend_noirq(dev); + } else { + ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int platform_pm_resume(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->resume) + ret = drv->pm->resume(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +static int platform_pm_resume_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->resume_noirq) + ret = pdrv->pm->resume_noirq(dev); + } else { + ret = platform_legacy_resume_early(dev); + } + + return ret; +} + +#else /* !CONFIG_SUSPEND */ + +#define platform_pm_suspend NULL +#define platform_pm_resume NULL +#define platform_pm_suspend_noirq NULL +#define platform_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATION + +static int platform_pm_freeze(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze) + ret = drv->pm->freeze(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_FREEZE); + } + + return ret; +} + +static int platform_pm_freeze_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->freeze_noirq) + ret = pdrv->pm->freeze_noirq(dev); + } else { + ret = platform_legacy_suspend_late(dev, PMSG_FREEZE); + } + + return ret; +} + +static int platform_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->thaw) + ret = drv->pm->thaw(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +static int platform_pm_thaw_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->thaw_noirq) + ret = pdrv->pm->thaw_noirq(dev); + } else { + ret = platform_legacy_resume_early(dev); + } + + return ret; +} + +static int platform_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->poweroff) + ret = drv->pm->poweroff(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int platform_pm_poweroff_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->poweroff_noirq) + ret = pdrv->pm->poweroff_noirq(dev); + } else { + ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int platform_pm_restore(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->restore) + ret = drv->pm->restore(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +static int platform_pm_restore_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->restore_noirq) + ret = pdrv->pm->restore_noirq(dev); + } else { + ret = platform_legacy_resume_early(dev); + } + + return ret; +} + +#else /* !CONFIG_HIBERNATION */ + +#define platform_pm_freeze NULL +#define platform_pm_thaw NULL +#define platform_pm_poweroff NULL +#define platform_pm_restore NULL +#define platform_pm_freeze_noirq NULL +#define platform_pm_thaw_noirq NULL +#define platform_pm_poweroff_noirq NULL +#define platform_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATION */ + +struct pm_ext_ops platform_pm_ops = { + .base = { + .prepare = platform_pm_prepare, + .complete = platform_pm_complete, + .suspend = platform_pm_suspend, + .resume = platform_pm_resume, + .freeze = platform_pm_freeze, + .thaw = platform_pm_thaw, + .poweroff = platform_pm_poweroff, + .restore = platform_pm_restore, + }, + .suspend_noirq = platform_pm_suspend_noirq, + .resume_noirq = platform_pm_resume_noirq, + .freeze_noirq = platform_pm_freeze_noirq, + .thaw_noirq = platform_pm_thaw_noirq, + .poweroff_noirq = platform_pm_poweroff_noirq, + .restore_noirq = platform_pm_restore_noirq, +}; + +#define PLATFORM_PM_OPS_PTR &platform_pm_ops + +#else /* !CONFIG_PM_SLEEP */ + +#define PLATFORM_PM_OPS_PTR NULL + +#endif /* !CONFIG_PM_SLEEP */ + struct bus_type platform_bus_type = { .name = "platform", .dev_attrs = platform_dev_attrs, .match = platform_match, .uevent = platform_uevent, - .suspend = platform_suspend, - .suspend_late = platform_suspend_late, - .resume_early = platform_resume_early, - .resume = platform_resume, + .pm = PLATFORM_PM_OPS_PTR, }; EXPORT_SYMBOL_GPL(platform_bus_type); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 45cc3d9eacb..3250c5257b7 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -12,11 +12,9 @@ * and add it to the list of power-controlled devices. sysfs entries for * controlling device power management will also be added. * - * A different set of lists than the global subsystem list are used to - * keep track of power info because we use different lists to hold - * devices based on what stage of the power management process they - * are in. The power domain dependencies may also differ from the - * ancestral dependencies that the subsystem list maintains. + * A separate list is used for keeping track of power info, because the power + * domain dependencies may differ from the ancestral dependencies that the + * subsystem list maintains. */ #include <linux/device.h> @@ -30,31 +28,40 @@ #include "power.h" /* - * The entries in the dpm_active list are in a depth first order, simply + * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and * are inserted at the back of the list on discovery. * - * All the other lists are kept in the same order, for consistency. - * However the lists aren't always traversed in the same order. - * Semaphores must be acquired from the top (i.e., front) down - * and released in the opposite order. Devices must be suspended - * from the bottom (i.e., end) up and resumed in the opposite order. - * That way no parent will be suspended while it still has an active - * child. - * * Since device_pm_add() may be called with a device semaphore held, * we must never try to acquire a device semaphore while holding * dpm_list_mutex. */ -LIST_HEAD(dpm_active); -static LIST_HEAD(dpm_off); -static LIST_HEAD(dpm_off_irq); +LIST_HEAD(dpm_list); static DEFINE_MUTEX(dpm_list_mtx); -/* 'true' if all devices have been suspended, protected by dpm_list_mtx */ -static bool all_sleeping; +/* + * Set once the preparation of devices for a PM transition has started, reset + * before starting to resume devices. Protected by dpm_list_mtx. + */ +static bool transition_started; + +/** + * device_pm_lock - lock the list of active devices used by the PM core + */ +void device_pm_lock(void) +{ + mutex_lock(&dpm_list_mtx); +} + +/** + * device_pm_unlock - unlock the list of active devices used by the PM core + */ +void device_pm_unlock(void) +{ + mutex_unlock(&dpm_list_mtx); +} /** * device_pm_add - add a device to the list of active devices @@ -68,17 +75,25 @@ int device_pm_add(struct device *dev) dev->bus ? dev->bus->name : "No Bus", kobject_name(&dev->kobj)); mutex_lock(&dpm_list_mtx); - if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { - if (dev->parent->power.sleeping) - dev_warn(dev, "parent %s is sleeping\n", + if (dev->parent) { + if (dev->parent->power.status >= DPM_SUSPENDING) { + dev_warn(dev, "parent %s is sleeping, will not add\n", dev->parent->bus_id); - else - dev_warn(dev, "all devices are sleeping\n"); + WARN_ON(true); + } + } else if (transition_started) { + /* + * We refuse to register parentless devices while a PM + * transition is in progress in order to avoid leaving them + * unhandled down the road + */ WARN_ON(true); } error = dpm_sysfs_add(dev); - if (!error) - list_add_tail(&dev->power.entry, &dpm_active); + if (!error) { + dev->power.status = DPM_ON; + list_add_tail(&dev->power.entry, &dpm_list); + } mutex_unlock(&dpm_list_mtx); return error; } @@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev) mutex_unlock(&dpm_list_mtx); } +/** + * pm_op - execute the PM operation appropiate for given PM event + * @dev: Device. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + */ +static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state) +{ + int error = 0; + + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + if (ops->suspend) { + error = ops->suspend(dev); + suspend_report_result(ops->suspend, error); + } + break; + case PM_EVENT_RESUME: + if (ops->resume) { + error = ops->resume(dev); + suspend_report_result(ops->resume, error); + } + break; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATION + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + if (ops->freeze) { + error = ops->freeze(dev); + suspend_report_result(ops->freeze, error); + } + break; + case PM_EVENT_HIBERNATE: + if (ops->poweroff) { + error = ops->poweroff(dev); + suspend_report_result(ops->poweroff, error); + } + break; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + if (ops->thaw) { + error = ops->thaw(dev); + suspend_report_result(ops->thaw, error); + } + break; + case PM_EVENT_RESTORE: + if (ops->restore) { + error = ops->restore(dev); + suspend_report_result(ops->restore, error); + } + break; +#endif /* CONFIG_HIBERNATION */ + default: + error = -EINVAL; + } + return error; +} + +/** + * pm_noirq_op - execute the PM operation appropiate for given PM event + * @dev: Device. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + * + * The operation is executed with interrupts disabled by the only remaining + * functional CPU in the system. + */ +static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops, + pm_message_t state) +{ + int error = 0; + + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + if (ops->suspend_noirq) { + error = ops->suspend_noirq(dev); + suspend_report_result(ops->suspend_noirq, error); + } + break; + case PM_EVENT_RESUME: + if (ops->resume_noirq) { + error = ops->resume_noirq(dev); + suspend_report_result(ops->resume_noirq, error); + } + break; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATION + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + if (ops->freeze_noirq) { + error = ops->freeze_noirq(dev); + suspend_report_result(ops->freeze_noirq, error); + } + break; + case PM_EVENT_HIBERNATE: + if (ops->poweroff_noirq) { + error = ops->poweroff_noirq(dev); + suspend_report_result(ops->poweroff_noirq, error); + } + break; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + if (ops->thaw_noirq) { + error = ops->thaw_noirq(dev); + suspend_report_result(ops->thaw_noirq, error); + } + break; + case PM_EVENT_RESTORE: + if (ops->restore_noirq) { + error = ops->restore_noirq(dev); + suspend_report_result(ops->restore_noirq, error); + } + break; +#endif /* CONFIG_HIBERNATION */ + default: + error = -EINVAL; + } + return error; +} + +static char *pm_verb(int event) +{ + switch (event) { + case PM_EVENT_SUSPEND: + return "suspend"; + case PM_EVENT_RESUME: + return "resume"; + case PM_EVENT_FREEZE: + return "freeze"; + case PM_EVENT_QUIESCE: + return "quiesce"; + case PM_EVENT_HIBERNATE: + return "hibernate"; + case PM_EVENT_THAW: + return "thaw"; + case PM_EVENT_RESTORE: + return "restore"; + case PM_EVENT_RECOVER: + return "recover"; + default: + return "(unknown PM event)"; + } +} + +static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) +{ + dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), + ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? + ", may wakeup" : ""); +} + +static void pm_dev_err(struct device *dev, pm_message_t state, char *info, + int error) +{ + printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", + kobject_name(&dev->kobj), pm_verb(state.event), info, error); +} + /*------------------------- Resume routines -------------------------*/ /** - * resume_device_early - Power on one device (early resume). + * resume_device_noirq - Power on one device (early resume). * @dev: Device. + * @state: PM transition of the system being carried out. * * Must be called with interrupts disabled. */ -static int resume_device_early(struct device *dev) +static int resume_device_noirq(struct device *dev, pm_message_t state) { int error = 0; TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->bus && dev->bus->resume_early) { - dev_dbg(dev, "EARLY resume\n"); + if (!dev->bus) + goto End; + + if (dev->bus->pm) { + pm_dev_dbg(dev, state, "EARLY "); + error = pm_noirq_op(dev, dev->bus->pm, state); + } else if (dev->bus->resume_early) { + pm_dev_dbg(dev, state, "legacy EARLY "); error = dev->bus->resume_early(dev); } - + End: TRACE_RESUME(error); return error; } /** * dpm_power_up - Power on all regular (non-sysdev) devices. + * @state: PM transition of the system being carried out. * - * Walk the dpm_off_irq list and power each device up. This - * is used for devices that required they be powered down with - * interrupts disabled. As devices are powered on, they are moved - * to the dpm_off list. + * Execute the appropriate "noirq resume" callback for all devices marked + * as DPM_OFF_IRQ. * * Must be called with interrupts disabled and only one CPU running. */ -static void dpm_power_up(void) +static void dpm_power_up(pm_message_t state) { + struct device *dev; - while (!list_empty(&dpm_off_irq)) { - struct list_head *entry = dpm_off_irq.next; - struct device *dev = to_device(entry); + list_for_each_entry(dev, &dpm_list, power.entry) + if (dev->power.status > DPM_OFF) { + int error; - list_move_tail(entry, &dpm_off); - resume_device_early(dev); - } + dev->power.status = DPM_OFF; + error = resume_device_noirq(dev, state); + if (error) + pm_dev_err(dev, state, " early", error); + } } /** * device_power_up - Turn on all devices that need special attention. + * @state: PM transition of the system being carried out. * * Power on system devices, then devices that required we shut them down * with interrupts disabled. * * Must be called with interrupts disabled. */ -void device_power_up(void) +void device_power_up(pm_message_t state) { sysdev_resume(); - dpm_power_up(); + dpm_power_up(state); } EXPORT_SYMBOL_GPL(device_power_up); /** * resume_device - Restore state for one device. * @dev: Device. - * + * @state: PM transition of the system being carried out. */ -static int resume_device(struct device *dev) +static int resume_device(struct device *dev, pm_message_t state) { int error = 0; @@ -175,21 +360,40 @@ static int resume_device(struct device *dev) down(&dev->sem); - if (dev->bus && dev->bus->resume) { - dev_dbg(dev,"resuming\n"); - error = dev->bus->resume(dev); + if (dev->bus) { + if (dev->bus->pm) { + pm_dev_dbg(dev, state, ""); + error = pm_op(dev, &dev->bus->pm->base, state); + } else if (dev->bus->resume) { + pm_dev_dbg(dev, state, "legacy "); + error = dev->bus->resume(dev); + } + if (error) + goto End; } - if (!error && dev->type && dev->type->resume) { - dev_dbg(dev,"resuming\n"); - error = dev->type->resume(dev); + if (dev->type) { + if (dev->type->pm) { + pm_dev_dbg(dev, state, "type "); + error = pm_op(dev, dev->type->pm, state); + } else if (dev->type->resume) { + pm_dev_dbg(dev, state, "legacy type "); + error = dev->type->resume(dev); + } + if (error) + goto End; } - if (!error && dev->class && dev->class->resume) { - dev_dbg(dev,"class resume\n"); - error = dev->class->resume(dev); + if (dev->class) { + if (dev->class->pm) { + pm_dev_dbg(dev, state, "class "); + error = pm_op(dev, dev->class->pm, state); + } else if (dev->class->resume) { + pm_dev_dbg(dev, state, "legacy class "); + error = dev->class->resume(dev); + } } - + End: up(&dev->sem); TRACE_RESUME(error); @@ -198,78 +402,161 @@ static int resume_device(struct device *dev) /** * dpm_resume - Resume every device. + * @state: PM transition of the system being carried out. * - * Resume the devices that have either not gone through - * the late suspend, or that did go through it but also - * went through the early resume. + * Execute the appropriate "resume" callback for all devices the status of + * which indicates that they are inactive. + */ +static void dpm_resume(pm_message_t state) +{ + struct list_head list; + + INIT_LIST_HEAD(&list); + mutex_lock(&dpm_list_mtx); + transition_started = false; + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.next); + + get_device(dev); + if (dev->power.status >= DPM_OFF) { + int error; + + dev->power.status = DPM_RESUMING; + mutex_unlock(&dpm_list_mtx); + + error = resume_device(dev, state); + + mutex_lock(&dpm_list_mtx); + if (error) + pm_dev_err(dev, state, "", error); + } else if (dev->power.status == DPM_SUSPENDING) { + /* Allow new children of the device to be registered */ + dev->power.status = DPM_RESUMING; + } + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &list); + put_device(dev); + } + list_splice(&list, &dpm_list); + mutex_unlock(&dpm_list_mtx); +} + +/** + * complete_device - Complete a PM transition for given device + * @dev: Device. + * @state: PM transition of the system being carried out. + */ +static void complete_device(struct device *dev, pm_message_t state) +{ + down(&dev->sem); + + if (dev->class && dev->class->pm && dev->class->pm->complete) { + pm_dev_dbg(dev, state, "completing class "); + dev->class->pm->complete(dev); + } + + if (dev->type && dev->type->pm && dev->type->pm->complete) { + pm_dev_dbg(dev, state, "completing type "); + dev->type->pm->complete(dev); + } + + if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) { + pm_dev_dbg(dev, state, "completing "); + dev->bus->pm->base.complete(dev); + } + + up(&dev->sem); +} + +/** + * dpm_complete - Complete a PM transition for all devices. + * @state: PM transition of the system being carried out. * - * Take devices from the dpm_off_list, resume them, - * and put them on the dpm_locked list. + * Execute the ->complete() callbacks for all devices that are not marked + * as DPM_ON. */ -static void dpm_resume(void) +static void dpm_complete(pm_message_t state) { + struct list_head list; + + INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); - all_sleeping = false; - while(!list_empty(&dpm_off)) { - struct list_head *entry = dpm_off.next; - struct device *dev = to_device(entry); + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.prev); - list_move_tail(entry, &dpm_active); - dev->power.sleeping = false; - mutex_unlock(&dpm_list_mtx); - resume_device(dev); - mutex_lock(&dpm_list_mtx); + get_device(dev); + if (dev->power.status > DPM_ON) { + dev->power.status = DPM_ON; + mutex_unlock(&dpm_list_mtx); + + complete_device(dev, state); + + mutex_lock(&dpm_list_mtx); + } + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &list); + put_device(dev); } + list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); } /** * device_resume - Restore state of each device in system. + * @state: PM transition of the system being carried out. * * Resume all the devices, unlock them all, and allow new * devices to be registered once again. */ -void device_resume(void) +void device_resume(pm_message_t state) { might_sleep(); - dpm_resume(); + dpm_resume(state); + dpm_complete(state); } EXPORT_SYMBOL_GPL(device_resume); /*------------------------- Suspend routines -------------------------*/ -static inline char *suspend_verb(u32 event) +/** + * resume_event - return a PM message representing the resume event + * corresponding to given sleep state. + * @sleep_state: PM message representing a sleep state. + */ +static pm_message_t resume_event(pm_message_t sleep_state) { - switch (event) { - case PM_EVENT_SUSPEND: return "suspend"; - case PM_EVENT_FREEZE: return "freeze"; - case PM_EVENT_PRETHAW: return "prethaw"; - default: return "(unknown suspend event)"; + switch (sleep_state.event) { + case PM_EVENT_SUSPEND: + return PMSG_RESUME; + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return PMSG_RECOVER; + case PM_EVENT_HIBERNATE: + return PMSG_RESTORE; } -} - -static void -suspend_device_dbg(struct device *dev, pm_message_t state, char *info) -{ - dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event), - ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ? - ", may wakeup" : ""); + return PMSG_ON; } /** - * suspend_device_late - Shut down one device (late suspend). + * suspend_device_noirq - Shut down one device (late suspend). * @dev: Device. - * @state: Power state device is entering. + * @state: PM transition of the system being carried out. * * This is called with interrupts off and only a single CPU running. */ -static int suspend_device_late(struct device *dev, pm_message_t state) +static int suspend_device_noirq(struct device *dev, pm_message_t state) { int error = 0; - if (dev->bus && dev->bus->suspend_late) { - suspend_device_dbg(dev, state, "LATE "); + if (!dev->bus) + return 0; + + if (dev->bus->pm) { + pm_dev_dbg(dev, state, "LATE "); + error = pm_noirq_op(dev, dev->bus->pm, state); + } else if (dev->bus->suspend_late) { + pm_dev_dbg(dev, state, "legacy LATE "); error = dev->bus->suspend_late(dev, state); suspend_report_result(dev->bus->suspend_late, error); } @@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state) /** * device_power_down - Shut down special devices. - * @state: Power state to enter. + * @state: PM transition of the system being carried out. * - * Power down devices that require interrupts to be disabled - * and move them from the dpm_off list to the dpm_off_irq list. + * Power down devices that require interrupts to be disabled. * Then power down system devices. * * Must be called with interrupts disabled and only one CPU running. */ int device_power_down(pm_message_t state) { + struct device *dev; int error = 0; - while (!list_empty(&dpm_off)) { - struct list_head *entry = dpm_off.prev; - struct device *dev = to_device(entry); - - error = suspend_device_late(dev, state); + list_for_each_entry_reverse(dev, &dpm_list, power.entry) { + error = suspend_device_noirq(dev, state); if (error) { - printk(KERN_ERR "Could not power down device %s: " - "error %d\n", - kobject_name(&dev->kobj), error); + pm_dev_err(dev, state, " late", error); break; } - if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_off_irq); + dev->power.status = DPM_OFF_IRQ; } - if (!error) error = sysdev_suspend(state); if (error) - dpm_power_up(); + dpm_power_up(resume_event(state)); return error; } EXPORT_SYMBOL_GPL(device_power_down); @@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down); /** * suspend_device - Save state of one device. * @dev: Device. - * @state: Power state device is entering. + * @state: PM transition of the system being carried out. */ static int suspend_device(struct device *dev, pm_message_t state) { @@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state) down(&dev->sem); - if (dev->class && dev->class->suspend) { - suspend_device_dbg(dev, state, "class "); - error = dev->class->suspend(dev, state); - suspend_report_result(dev->class->suspend, error); + if (dev->class) { + if (dev->class->pm) { + pm_dev_dbg(dev, state, "class "); + error = pm_op(dev, dev->class->pm, state); + } else if (dev->class->suspend) { + pm_dev_dbg(dev, state, "legacy class "); + error = dev->class->suspend(dev, state); + suspend_report_result(dev->class->suspend, error); + } + if (error) + goto End; } - if (!error && dev->type && dev->type->suspend) { - suspend_device_dbg(dev, state, "type "); - error = dev->type->suspend(dev, state); - suspend_report_result(dev->type->suspend, error); + if (dev->type) { + if (dev->type->pm) { + pm_dev_dbg(dev, state, "type "); + error = pm_op(dev, dev->type->pm, state); + } else if (dev->type->suspend) { + pm_dev_dbg(dev, state, "legacy type "); + error = dev->type->suspend(dev, state); + suspend_report_result(dev->type->suspend, error); + } + if (error) + goto End; } - if (!error && dev->bus && dev->bus->suspend) { - suspend_device_dbg(dev, state, ""); - error = dev->bus->suspend(dev, state); - suspend_report_result(dev->bus->suspend, error); + if (dev->bus) { + if (dev->bus->pm) { + pm_dev_dbg(dev, state, ""); + error = pm_op(dev, &dev->bus->pm->base, state); + } else if (dev->bus->suspend) { + pm_dev_dbg(dev, state, "legacy "); + error = dev->bus->suspend(dev, state); + suspend_report_result(dev->bus->suspend, error); + } } - + End: up(&dev->sem); return error; @@ -349,67 +648,139 @@ static int suspend_device(struct device *dev, pm_message_t state) /** * dpm_suspend - Suspend every device. - * @state: Power state to put each device in. - * - * Walk the dpm_locked list. Suspend each device and move it - * to the dpm_off list. + * @state: PM transition of the system being carried out. * - * (For historical reasons, if it returns -EAGAIN, that used to mean - * that the device would be called again with interrupts disabled. - * These days, we use the "suspend_late()" callback for that, so we - * print a warning and consider it an error). + * Execute the appropriate "suspend" callbacks for all devices. */ static int dpm_suspend(pm_message_t state) { + struct list_head list; int error = 0; + INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_active)) { - struct list_head *entry = dpm_active.prev; - struct device *dev = to_device(entry); + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.prev); - WARN_ON(dev->parent && dev->parent->power.sleeping); - - dev->power.sleeping = true; + get_device(dev); mutex_unlock(&dpm_list_mtx); + error = suspend_device(dev, state); + mutex_lock(&dpm_list_mtx); if (error) { - printk(KERN_ERR "Could not suspend device %s: " - "error %d%s\n", - kobject_name(&dev->kobj), - error, - (error == -EAGAIN ? - " (please convert to suspend_late)" : - "")); - dev->power.sleeping = false; + pm_dev_err(dev, state, "", error); + put_device(dev); break; } + dev->power.status = DPM_OFF; if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_off); + list_move(&dev->power.entry, &list); + put_device(dev); } - if (!error) - all_sleeping = true; + list_splice(&list, dpm_list.prev); mutex_unlock(&dpm_list_mtx); + return error; +} + +/** + * prepare_device - Execute the ->prepare() callback(s) for given device. + * @dev: Device. + * @state: PM transition of the system being carried out. + */ +static int prepare_device(struct device *dev, pm_message_t state) +{ + int error = 0; + + down(&dev->sem); + + if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) { + pm_dev_dbg(dev, state, "preparing "); + error = dev->bus->pm->base.prepare(dev); + suspend_report_result(dev->bus->pm->base.prepare, error); + if (error) + goto End; + } + + if (dev->type && dev->type->pm && dev->type->pm->prepare) { + pm_dev_dbg(dev, state, "preparing type "); + error = dev->type->pm->prepare(dev); + suspend_report_result(dev->type->pm->prepare, error); + if (error) + goto End; + } + + if (dev->class && dev->class->pm && dev->class->pm->prepare) { + pm_dev_dbg(dev, state, "preparing class "); + error = dev->class->pm->prepare(dev); + suspend_report_result(dev->class->pm->prepare, error); + } + End: + up(&dev->sem); + + return error; +} + +/** + * dpm_prepare - Prepare all devices for a PM transition. + * @state: PM transition of the system being carried out. + * + * Execute the ->prepare() callback for all devices. + */ +static int dpm_prepare(pm_message_t state) +{ + struct list_head list; + int error = 0; + + INIT_LIST_HEAD(&list); + mutex_lock(&dpm_list_mtx); + transition_started = true; + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.next); + + get_device(dev); + dev->power.status = DPM_PREPARING; + mutex_unlock(&dpm_list_mtx); + error = prepare_device(dev, state); + + mutex_lock(&dpm_list_mtx); + if (error) { + dev->power.status = DPM_ON; + if (error == -EAGAIN) { + put_device(dev); + continue; + } + printk(KERN_ERR "PM: Failed to prepare device %s " + "for power transition: error %d\n", + kobject_name(&dev->kobj), error); + put_device(dev); + break; + } + dev->power.status = DPM_SUSPENDING; + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &list); + put_device(dev); + } + list_splice(&list, &dpm_list); + mutex_unlock(&dpm_list_mtx); return error; } /** * device_suspend - Save state and stop all devices in system. - * @state: new power management state + * @state: PM transition of the system being carried out. * - * Prevent new devices from being registered, then lock all devices - * and suspend them. + * Prepare and suspend all devices. */ int device_suspend(pm_message_t state) { int error; might_sleep(); - error = dpm_suspend(state); - if (error) - device_resume(); + error = dpm_prepare(state); + if (!error) + error = dpm_suspend(state); return error; } EXPORT_SYMBOL_GPL(device_suspend); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index a6894f2a4b9..a3252c0e288 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -4,7 +4,7 @@ * main.c */ -extern struct list_head dpm_active; /* The active device list */ +extern struct list_head dpm_list; /* The active device list */ static inline struct device *to_device(struct list_head *entry) { diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d11f74b038d..596aeecfdff 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -6,9 +6,6 @@ #include <linux/string.h> #include "power.h" -int (*platform_enable_wakeup)(struct device *dev, int is_on); - - /* * wakeup - Report/change current wakeup option for device * diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 87a7f1d0257..9b1b20b59e0 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value) static int show_dev_hash(unsigned int value) { int match = 0; - struct list_head * entry = dpm_active.prev; + struct list_head *entry = dpm_list.prev; - while (entry != &dpm_active) { + while (entry != &dpm_list) { struct device * dev = to_device(entry); unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); if (hash == value) { diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 0d1d2133d9b..61ad8d639ba 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -433,4 +433,16 @@ config VIRTIO_BLK This is the virtual block driver for virtio. It can be used with lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. +config BLK_DEV_HD + bool "Very old hard disk (MFM/RLL/IDE) driver" + depends on HAVE_IDE + depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN + help + This is a very old hard disk driver that lacks the enhanced + functionality of the newer ones. + + It is required for systems with ancient MFM/RLL/ESDI drives. + + If unsure, say N. + endif # BLK_DEV diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 5e584306be9..204332b2957 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -29,5 +29,6 @@ obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o obj-$(CONFIG_VIODASD) += viodasd.o obj-$(CONFIG_BLK_DEV_SX8) += sx8.o obj-$(CONFIG_BLK_DEV_UB) += ub.o +obj-$(CONFIG_BLK_DEV_HD) += hd.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o diff --git a/drivers/ide/legacy/hd.c b/drivers/block/hd.c index abdedf56643..682243bf2e4 100644 --- a/drivers/ide/legacy/hd.c +++ b/drivers/block/hd.c @@ -37,7 +37,6 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/ioport.h> -#include <linux/mc146818rtc.h> /* CMOS defines */ #include <linux/init.h> #include <linux/blkpg.h> #include <linux/hdreg.h> @@ -812,4 +811,4 @@ static int __init parse_hd_setup(char *line) } __setup("hd=", parse_hd_setup); -module_init(hd_init); +late_initcall(hd_init); diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index da8a1658a27..aaca40283be 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -59,6 +59,55 @@ struct apm_queue { }; /* + * thread states (for threads using a writable /dev/apm_bios fd): + * + * SUSPEND_NONE: nothing happening + * SUSPEND_PENDING: suspend event queued for thread and pending to be read + * SUSPEND_READ: suspend event read, pending acknowledgement + * SUSPEND_ACKED: acknowledgement received from thread (via ioctl), + * waiting for resume + * SUSPEND_ACKTO: acknowledgement timeout + * SUSPEND_DONE: thread had acked suspend and is now notified of + * resume + * + * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume + * + * A thread migrates in one of three paths: + * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE + * -6-> ACKTO -7-> NONE + * NONE -8-> WAIT -9-> NONE + * + * While in PENDING or READ, the thread is accounted for in the + * suspend_acks_pending counter. + * + * The transitions are invoked as follows: + * 1: suspend event is signalled from the core PM code + * 2: the suspend event is read from the fd by the userspace thread + * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack) + * 4: core PM code signals that we have resumed + * 5: APM_IOC_SUSPEND ioctl returns + * + * 6: the notifier invoked from the core PM code timed out waiting + * for all relevant threds to enter ACKED state and puts those + * that haven't into ACKTO + * 7: those threads issue APM_IOC_SUSPEND ioctl too late, + * get an error + * + * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend), + * ioctl code invokes pm_suspend() + * 9: pm_suspend() returns indicating resume + */ +enum apm_suspend_state { + SUSPEND_NONE, + SUSPEND_PENDING, + SUSPEND_READ, + SUSPEND_ACKED, + SUSPEND_ACKTO, + SUSPEND_WAIT, + SUSPEND_DONE, +}; + +/* * The per-file APM data */ struct apm_user { @@ -69,13 +118,7 @@ struct apm_user { unsigned int reader: 1; int suspend_result; - unsigned int suspend_state; -#define SUSPEND_NONE 0 /* no suspend pending */ -#define SUSPEND_PENDING 1 /* suspend pending read */ -#define SUSPEND_READ 2 /* suspend read, pending ack */ -#define SUSPEND_ACKED 3 /* suspend acked */ -#define SUSPEND_WAIT 4 /* waiting for suspend */ -#define SUSPEND_DONE 5 /* suspend completed */ + enum apm_suspend_state suspend_state; struct apm_queue queue; }; @@ -83,7 +126,8 @@ struct apm_user { /* * Local variables */ -static int suspends_pending; +static atomic_t suspend_acks_pending = ATOMIC_INIT(0); +static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0); static int apm_disabled; static struct task_struct *kapmd_tsk; @@ -166,78 +210,6 @@ static void queue_event(apm_event_t event) wake_up_interruptible(&apm_waitqueue); } -/* - * queue_suspend_event - queue an APM suspend event. - * - * Check that we're in a state where we can suspend. If not, - * return -EBUSY. Otherwise, queue an event to all "writer" - * users. If there are no "writer" users, return '1' to - * indicate that we can immediately suspend. - */ -static int queue_suspend_event(apm_event_t event, struct apm_user *sender) -{ - struct apm_user *as; - int ret = 1; - - mutex_lock(&state_lock); - down_read(&user_list_lock); - - /* - * If a thread is still processing, we can't suspend, so reject - * the request. - */ - list_for_each_entry(as, &apm_user_list, list) { - if (as != sender && as->reader && as->writer && as->suser && - as->suspend_state != SUSPEND_NONE) { - ret = -EBUSY; - goto out; - } - } - - list_for_each_entry(as, &apm_user_list, list) { - if (as != sender && as->reader && as->writer && as->suser) { - as->suspend_state = SUSPEND_PENDING; - suspends_pending++; - queue_add_event(&as->queue, event); - ret = 0; - } - } - out: - up_read(&user_list_lock); - mutex_unlock(&state_lock); - wake_up_interruptible(&apm_waitqueue); - return ret; -} - -static void apm_suspend(void) -{ - struct apm_user *as; - int err = pm_suspend(PM_SUSPEND_MEM); - - /* - * Anyone on the APM queues will think we're still suspended. - * Send a message so everyone knows we're now awake again. - */ - queue_event(APM_NORMAL_RESUME); - - /* - * Finally, wake up anyone who is sleeping on the suspend. - */ - mutex_lock(&state_lock); - down_read(&user_list_lock); - list_for_each_entry(as, &apm_user_list, list) { - if (as->suspend_state == SUSPEND_WAIT || - as->suspend_state == SUSPEND_ACKED) { - as->suspend_result = err; - as->suspend_state = SUSPEND_DONE; - } - } - up_read(&user_list_lock); - mutex_unlock(&state_lock); - - wake_up(&apm_suspend_waitqueue); -} - static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) { struct apm_user *as = fp->private_data; @@ -308,25 +280,22 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) as->suspend_result = -EINTR; - if (as->suspend_state == SUSPEND_READ) { - int pending; - + switch (as->suspend_state) { + case SUSPEND_READ: /* * If we read a suspend command from /dev/apm_bios, * then the corresponding APM_IOC_SUSPEND ioctl is * interpreted as an acknowledge. */ as->suspend_state = SUSPEND_ACKED; - suspends_pending--; - pending = suspends_pending == 0; + atomic_dec(&suspend_acks_pending); mutex_unlock(&state_lock); /* - * If there are no further acknowledges required, - * suspend the system. + * suspend_acks_pending changed, the notifier needs to + * be woken up for this */ - if (pending) - apm_suspend(); + wake_up(&apm_suspend_waitqueue); /* * Wait for the suspend/resume to complete. If there @@ -342,35 +311,21 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) * try_to_freeze() in freezer_count() will not trigger */ freezer_count(); - } else { + break; + case SUSPEND_ACKTO: + as->suspend_result = -ETIMEDOUT; + mutex_unlock(&state_lock); + break; + default: as->suspend_state = SUSPEND_WAIT; mutex_unlock(&state_lock); /* * Otherwise it is a request to suspend the system. - * Queue an event for all readers, and expect an - * acknowledge from all writers who haven't already - * acknowledged. - */ - err = queue_suspend_event(APM_USER_SUSPEND, as); - if (err < 0) { - /* - * Avoid taking the lock here - this - * should be fine. - */ - as->suspend_state = SUSPEND_NONE; - break; - } - - if (err > 0) - apm_suspend(); - - /* - * Wait for the suspend/resume to complete. If there - * are pending acknowledges, we wait here for them. + * Just invoke pm_suspend(), we'll handle it from + * there via the notifier. */ - wait_event_freezable(apm_suspend_waitqueue, - as->suspend_state == SUSPEND_DONE); + as->suspend_result = pm_suspend(PM_SUSPEND_MEM); } mutex_lock(&state_lock); @@ -386,7 +341,6 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) static int apm_release(struct inode * inode, struct file * filp) { struct apm_user *as = filp->private_data; - int pending = 0; filp->private_data = NULL; @@ -396,18 +350,15 @@ static int apm_release(struct inode * inode, struct file * filp) /* * We are now unhooked from the chain. As far as new - * events are concerned, we no longer exist. However, we - * need to balance suspends_pending, which means the - * possibility of sleeping. + * events are concerned, we no longer exist. */ mutex_lock(&state_lock); - if (as->suspend_state != SUSPEND_NONE) { - suspends_pending -= 1; - pending = suspends_pending == 0; - } + if (as->suspend_state == SUSPEND_PENDING || + as->suspend_state == SUSPEND_READ) + atomic_dec(&suspend_acks_pending); mutex_unlock(&state_lock); - if (pending) - apm_suspend(); + + wake_up(&apm_suspend_waitqueue); kfree(as); return 0; @@ -545,7 +496,6 @@ static int kapmd(void *arg) { do { apm_event_t event; - int ret; wait_event_interruptible(kapmd_wait, !queue_empty(&kapmd_queue) || kthread_should_stop()); @@ -570,20 +520,13 @@ static int kapmd(void *arg) case APM_USER_SUSPEND: case APM_SYS_SUSPEND: - ret = queue_suspend_event(event, NULL); - if (ret < 0) { - /* - * We were busy. Try again in 50ms. - */ - queue_add_event(&kapmd_queue, event); - msleep(50); - } - if (ret > 0) - apm_suspend(); + pm_suspend(PM_SUSPEND_MEM); break; case APM_CRITICAL_SUSPEND: - apm_suspend(); + atomic_inc(&userspace_notification_inhibit); + pm_suspend(PM_SUSPEND_MEM); + atomic_dec(&userspace_notification_inhibit); break; } } while (1); @@ -591,6 +534,120 @@ static int kapmd(void *arg) return 0; } +static int apm_suspend_notifier(struct notifier_block *nb, + unsigned long event, + void *dummy) +{ + struct apm_user *as; + int err; + + /* short-cut emergency suspends */ + if (atomic_read(&userspace_notification_inhibit)) + return NOTIFY_DONE; + + switch (event) { + case PM_SUSPEND_PREPARE: + /* + * Queue an event to all "writer" users that we want + * to suspend and need their ack. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state != SUSPEND_WAIT && as->reader && + as->writer && as->suser) { + as->suspend_state = SUSPEND_PENDING; + atomic_inc(&suspend_acks_pending); + queue_add_event(&as->queue, APM_USER_SUSPEND); + } + } + + up_read(&user_list_lock); + mutex_unlock(&state_lock); + wake_up_interruptible(&apm_waitqueue); + + /* + * Wait for the the suspend_acks_pending variable to drop to + * zero, meaning everybody acked the suspend event (or the + * process was killed.) + * + * If the app won't answer within a short while we assume it + * locked up and ignore it. + */ + err = wait_event_interruptible_timeout( + apm_suspend_waitqueue, + atomic_read(&suspend_acks_pending) == 0, + 5*HZ); + + /* timed out */ + if (err == 0) { + /* + * Move anybody who timed out to "ack timeout" state. + * + * We could time out and the userspace does the ACK + * right after we time out but before we enter the + * locked section here, but that's fine. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state == SUSPEND_PENDING || + as->suspend_state == SUSPEND_READ) { + as->suspend_state = SUSPEND_ACKTO; + atomic_dec(&suspend_acks_pending); + } + } + up_read(&user_list_lock); + mutex_unlock(&state_lock); + } + + /* let suspend proceed */ + if (err >= 0) + return NOTIFY_OK; + + /* interrupted by signal */ + return NOTIFY_BAD; + + case PM_POST_SUSPEND: + /* + * Anyone on the APM queues will think we're still suspended. + * Send a message so everyone knows we're now awake again. + */ + queue_event(APM_NORMAL_RESUME); + + /* + * Finally, wake up anyone who is sleeping on the suspend. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state == SUSPEND_ACKED) { + /* + * TODO: maybe grab error code, needs core + * changes to push the error to the notifier + * chain (could use the second parameter if + * implemented) + */ + as->suspend_result = 0; + as->suspend_state = SUSPEND_DONE; + } + } + up_read(&user_list_lock); + mutex_unlock(&state_lock); + + wake_up(&apm_suspend_waitqueue); + return NOTIFY_OK; + + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block apm_notif_block = { + .notifier_call = apm_suspend_notifier, +}; + static int __init apm_init(void) { int ret; @@ -604,7 +661,7 @@ static int __init apm_init(void) if (IS_ERR(kapmd_tsk)) { ret = PTR_ERR(kapmd_tsk); kapmd_tsk = NULL; - return ret; + goto out; } wake_up_process(kapmd_tsk); @@ -613,16 +670,27 @@ static int __init apm_init(void) #endif ret = misc_register(&apm_device); - if (ret != 0) { - remove_proc_entry("apm", NULL); - kthread_stop(kapmd_tsk); - } + if (ret) + goto out_stop; + ret = register_pm_notifier(&apm_notif_block); + if (ret) + goto out_unregister; + + return 0; + + out_unregister: + misc_deregister(&apm_device); + out_stop: + remove_proc_entry("apm", NULL); + kthread_stop(kapmd_tsk); + out: return ret; } static void __exit apm_exit(void) { + unregister_pm_notifier(&apm_notif_block); misc_deregister(&apm_device); remove_proc_entry("apm", NULL); diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 59ca35156d8..e4a4fbd37d7 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -1439,7 +1439,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) DEBUGP(4, dev, "CMM_ABSENT flag set\n"); goto out; } - rc = EINVAL; + rc = -EINVAL; if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { DEBUGP(4, dev, "ioctype mismatch\n"); diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index cf707c8f08d..15b09b89588 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -98,6 +98,9 @@ if BLK_DEV_IDE comment "Please see Documentation/ide/ide.txt for help/info on IDE drives" +config IDE_TIMINGS + bool + config IDE_ATAPI bool @@ -326,6 +329,7 @@ config BLK_DEV_PLATFORM config BLK_DEV_CMD640 tristate "CMD640 chipset bugfix/support" depends on X86 + select IDE_TIMINGS ---help--- The CMD-Technologies CMD640 IDE chip is used on many common 486 and Pentium motherboards, usually in combination with a "Neptune" or @@ -455,6 +459,7 @@ config BLK_DEV_AEC62XX config BLK_DEV_ALI15X3 tristate "ALI M15x3 chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver ensures (U)DMA support for ALI 1533, 1543 and 1543C @@ -469,6 +474,7 @@ config BLK_DEV_ALI15X3 config BLK_DEV_AMD74XX tristate "AMD and nVidia IDE support" depends on !ARM + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver adds explicit support for AMD-7xx and AMD-8111 chips @@ -489,6 +495,7 @@ config BLK_DEV_ATIIXP config BLK_DEV_CMD64X tristate "CMD64{3|6|8|9} chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help Say Y here if you have an IDE controller which uses any of these @@ -503,6 +510,7 @@ config BLK_DEV_TRIFLEX config BLK_DEV_CY82C693 tristate "CY82C693 chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver adds detection and support for the CY82C693 chipset @@ -695,6 +703,7 @@ config BLK_DEV_SIS5513 config BLK_DEV_SL82C105 tristate "Winbond SL82c105 support" depends on (PPC || ARM) + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help If you have a Winbond SL82c105 IDE controller, say Y here to enable @@ -725,6 +734,7 @@ config BLK_DEV_TRM290 config BLK_DEV_VIA82CXXX tristate "VIA82CXXX chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver adds explicit support for VIA BusMastering IDE chips. @@ -751,6 +761,7 @@ endif config BLK_DEV_IDE_PMAC tristate "PowerMac on-board IDE support" depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y + select IDE_TIMINGS help This driver provides support for the on-board IDE controller on most of the recent Apple Power Macintoshes and PowerBooks. @@ -829,13 +840,6 @@ config BLK_DEV_IDE_RAPIDE Say Y here if you want to support the Yellowstone RapIDE controller manufactured for use with Acorn computers. -config BLK_DEV_IDE_BAST - tristate "Simtec BAST / Thorcom VR1000 IDE support" - depends on ARM && (ARCH_BAST || MACH_VR1000) - help - Say Y here if you want to support the onboard IDE channels on the - Simtec BAST or the Thorcom VR1000 - config IDE_H8300 tristate "H8300 IDE support" depends on H8300 @@ -919,51 +923,12 @@ config BLK_DEV_Q40IDE config BLK_DEV_PALMCHIP_BK3710 tristate "Palmchip bk3710 IDE controller support" depends on ARCH_DAVINCI + select IDE_TIMINGS select BLK_DEV_IDEDMA_SFF help Say Y here if you want to support the onchip IDE controller on the TI DaVinci SoC - -config BLK_DEV_MPC8xx_IDE - tristate "MPC8xx IDE support" - depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE - help - This option provides support for IDE on Motorola MPC8xx Systems. - Please see 'Type of MPC8xx IDE interface' for details. - - If unsure, say N. - -choice - prompt "Type of MPC8xx IDE interface" - depends on BLK_DEV_MPC8xx_IDE - default IDE_8xx_PCCARD - -config IDE_8xx_PCCARD - bool "8xx_PCCARD" - ---help--- - Select how the IDE devices are connected to the MPC8xx system: - - 8xx_PCCARD uses the 8xx internal PCMCIA interface in combination - with a PC Card (e.g. ARGOSY portable Hard Disk Adapter), - ATA PC Card HDDs or ATA PC Flash Cards (example: TQM8xxL - systems) - - 8xx_DIRECT is used for directly connected IDE devices using the 8xx - internal PCMCIA interface (example: IVMS8 systems) - - EXT_DIRECT is used for IDE devices directly connected to the 8xx - bus using some glue logic, but _not_ the 8xx internal - PCMCIA interface (example: IDIF860 systems) - -config IDE_8xx_DIRECT - bool "8xx_DIRECT" - -config IDE_EXT_DIRECT - bool "EXT_DIRECT" - -endchoice - # no isa -> no vlb if ISA && (ALPHA || X86 || MIPS) @@ -981,6 +946,7 @@ config BLK_DEV_4DRIVES config BLK_DEV_ALI14XX tristate "ALI M14xx support" + select IDE_TIMINGS help This driver is enabled at runtime using the "ali14xx.probe" kernel boot parameter. It enables support for the secondary IDE interface @@ -1000,6 +966,7 @@ config BLK_DEV_DTC2278 config BLK_DEV_HT6560B tristate "Holtek HT6560B support" + select IDE_TIMINGS help This driver is enabled at runtime using the "ht6560b.probe" kernel boot parameter. It enables support for the secondary IDE interface @@ -1009,6 +976,7 @@ config BLK_DEV_HT6560B config BLK_DEV_QD65XX tristate "QDI QD65xx support" + select IDE_TIMINGS help This driver is enabled at runtime using the "qd65xx.probe" kernel boot parameter. It permits faster I/O speeds to be set. See the @@ -1032,30 +1000,4 @@ config BLK_DEV_IDEDMA endif -config BLK_DEV_HD_ONLY - bool "Old hard disk (MFM/RLL/IDE) driver" - depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN - help - There are two drivers for MFM/RLL/IDE hard disks. Most people use - the newer enhanced driver, but this old one is still around for two - reasons. Some older systems have strange timing problems and seem to - work only with the old driver (which itself does not work with some - newer systems). The other reason is that the old driver is smaller, - since it lacks the enhanced functionality of the new one. This makes - it a good choice for systems with very tight memory restrictions, or - for systems with only older MFM/RLL/ESDI drives. Choosing the old - driver can save 13 KB or so of kernel memory. - - If you want to use this driver together with the new one you have - to use "hda=noprobe hdb=noprobe" kernel parameters to prevent the new - driver from probing the primary interface. - - If you are unsure, then just choose the Enhanced IDE/MFM/RLL driver - instead of this one. For more detailed information, read the - Disk-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - -config BLK_DEV_HD - def_bool BLK_DEV_HD_ONLY - endif # IDE diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index a2b3f84d710..5d414e301a5 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile @@ -11,9 +11,11 @@ EXTRA_CFLAGS += -Idrivers/ide -ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o +ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o \ + ide-pio-blacklist.o # core IDE code +ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o @@ -59,9 +61,3 @@ ifeq ($(CONFIG_BLK_DEV_PLATFORM), y) endif obj-$(CONFIG_BLK_DEV_IDE) += arm/ mips/ - -# old hd driver must be last -ifeq ($(CONFIG_BLK_DEV_HD), y) - hd-core-y += legacy/hd.o - obj-y += hd-core.o -endif diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile index 936e7b0237f..5bc26053afa 100644 --- a/drivers/ide/arm/Makefile +++ b/drivers/ide/arm/Makefile @@ -1,7 +1,6 @@ obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o -obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o ifeq ($(CONFIG_IDE_ARM), m) diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c deleted file mode 100644 index 8e8c28104b4..00000000000 --- a/drivers/ide/arm/bast-ide.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2003-2004 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * -*/ - -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/ide.h> -#include <linux/init.h> - -#include <asm/mach-types.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/arch/map.h> -#include <asm/arch/bast-map.h> -#include <asm/arch/bast-irq.h> - -#define DRV_NAME "bast-ide" - -static int __init bastide_register(unsigned int base, unsigned int aux, int irq) -{ - ide_hwif_t *hwif; - hw_regs_t hw; - int i; - u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; - - memset(&hw, 0, sizeof(hw)); - - base += BAST_IDE_CS; - aux += BAST_IDE_CS; - - for (i = 0; i <= 7; i++) { - hw.io_ports_array[i] = (unsigned long)base; - base += 0x20; - } - - hw.io_ports.ctl_addr = aux + (6 * 0x20); - hw.irq = irq; - hw.chipset = ide_generic; - - hwif = ide_find_port(); - if (hwif == NULL) - goto out; - - i = hwif->index; - - ide_init_port_data(hwif, i); - ide_init_port_hw(hwif, &hw); - hwif->port_ops = NULL; - - idx[0] = i; - - ide_device_add(idx, NULL); -out: - return 0; -} - -static int __init bastide_init(void) -{ - unsigned long base = BAST_VA_IDEPRI + BAST_IDE_CS; - - /* we can treat the VR1000 and the BAST the same */ - - if (!(machine_is_bast() || machine_is_vr1000())) - return 0; - - printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n"); - - if (!request_mem_region(base, 0x400000, DRV_NAME)) { - printk(KERN_ERR "%s: resources busy\n", DRV_NAME); - return -EBUSY; - } - - bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0); - bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1); - - return 0; -} - -module_init(bastide_init); - -MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Simtec BAST / Thorcom VR1000 IDE driver"); diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index 061456914ca..52f58c88578 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c @@ -21,6 +21,8 @@ #include <asm/dma.h> #include <asm/ecard.h> +#define DRV_NAME "icside" + #define ICS_IDENT_OFFSET 0x2280 #define ICS_ARCIN_V5_INTRSTAT 0x0000 @@ -68,6 +70,7 @@ struct icside_state { unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; + unsigned int sel; unsigned int type; ide_hwif_t *hwif[2]; }; @@ -165,7 +168,8 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = { static void icside_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = HWIF(drive); - struct icside_state *state = hwif->hwif_data; + struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; local_irq_save(flags); @@ -308,6 +312,7 @@ static int icside_dma_setup(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); struct request *rq = hwif->hwgroup->rq; unsigned int dma_mode; @@ -331,7 +336,7 @@ static int icside_dma_setup(ide_drive_t *drive) /* * Route the DMA signals to the correct interface. */ - writeb(hwif->select_data, hwif->config_data); + writeb(state->sel | hwif->channel, state->ioc_base); /* * Select the correct timing for this drive. @@ -359,7 +364,8 @@ static void icside_dma_exec_cmd(ide_drive_t *drive, u8 cmd) static int icside_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); - struct icside_state *state = hwif->hwif_data; + struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); return readb(state->irq_port + (hwif->channel ? @@ -411,36 +417,24 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) return -EOPNOTSUPP; } -static ide_hwif_t * -icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec) +static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, + struct cardinfo *info, struct expansion_card *ec) { unsigned long port = (unsigned long)base + info->dataoffset; - ide_hwif_t *hwif; - hwif = ide_find_port(); - if (hwif) { - /* - * Ensure we're using MMIO - */ - default_hwif_mmiops(hwif); - - hwif->io_ports.data_addr = port; - hwif->io_ports.error_addr = port + (1 << info->stepping); - hwif->io_ports.nsect_addr = port + (2 << info->stepping); - hwif->io_ports.lbal_addr = port + (3 << info->stepping); - hwif->io_ports.lbam_addr = port + (4 << info->stepping); - hwif->io_ports.lbah_addr = port + (5 << info->stepping); - hwif->io_ports.device_addr = port + (6 << info->stepping); - hwif->io_ports.status_addr = port + (7 << info->stepping); - hwif->io_ports.ctl_addr = - (unsigned long)base + info->ctrloffset; - hwif->irq = ec->irq; - hwif->chipset = ide_acorn; - hwif->gendev.parent = &ec->dev; - hwif->dev = &ec->dev; - } - - return hwif; + hw->io_ports.data_addr = port; + hw->io_ports.error_addr = port + (1 << info->stepping); + hw->io_ports.nsect_addr = port + (2 << info->stepping); + hw->io_ports.lbal_addr = port + (3 << info->stepping); + hw->io_ports.lbam_addr = port + (4 << info->stepping); + hw->io_ports.lbah_addr = port + (5 << info->stepping); + hw->io_ports.device_addr = port + (6 << info->stepping); + hw->io_ports.status_addr = port + (7 << info->stepping); + hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset; + + hw->irq = ec->irq; + hw->dev = &ec->dev; + hw->chipset = ide_acorn; } static int __init @@ -449,6 +443,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) ide_hwif_t *hwif; void __iomem *base; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; + hw_regs_t hw; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) @@ -466,12 +461,19 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) */ icside_irqdisable_arcin_v5(ec, 0); - hwif = icside_setup(base, &icside_cardinfo_v5, ec); + icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); + + hwif = ide_find_port(); if (!hwif) return -ENODEV; + ide_init_port_hw(hwif, &hw); + default_hwif_mmiops(hwif); + state->hwif[0] = hwif; + ecard_set_drvdata(ec, state); + idx[0] = hwif->index; ide_device_add(idx, NULL); @@ -497,6 +499,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) int ret; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; struct ide_port_info d = icside_v6_port_info; + hw_regs_t hw[2]; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!ioc_base) { @@ -525,43 +528,47 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) state->irq_port = easi_base; state->ioc_base = ioc_base; + state->sel = sel; /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); + icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); + icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); + /* * Find and register the interfaces. */ - hwif = icside_setup(easi_base, &icside_cardinfo_v6_1, ec); - mate = icside_setup(easi_base, &icside_cardinfo_v6_2, ec); + hwif = ide_find_port(); + if (hwif == NULL) + return -ENODEV; - if (!hwif || !mate) { - ret = -ENODEV; - goto out; + ide_init_port_hw(hwif, &hw[0]); + default_hwif_mmiops(hwif); + + idx[0] = hwif->index; + + mate = ide_find_port(); + if (mate) { + ide_init_port_hw(mate, &hw[1]); + default_hwif_mmiops(mate); + + idx[1] = mate->index; } state->hwif[0] = hwif; state->hwif[1] = mate; - hwif->hwif_data = state; - hwif->config_data = (unsigned long)ioc_base; - hwif->select_data = sel; - - mate->hwif_data = state; - mate->config_data = (unsigned long)ioc_base; - mate->select_data = sel | 1; + ecard_set_drvdata(ec, state); - if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { + if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { d.init_dma = icside_dma_init; d.port_ops = &icside_v6_port_ops; d.dma_ops = NULL; } - idx[0] = hwif->index; - idx[1] = mate->index; - ide_device_add(idx, &d); return 0; @@ -627,10 +634,8 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id) break; } - if (ret == 0) { - ecard_set_drvdata(ec, state); + if (ret == 0) goto out; - } kfree(state); release: diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c index 3839f572298..c79b85b6e4a 100644 --- a/drivers/ide/arm/palm_bk3710.c +++ b/drivers/ide/arm/palm_bk3710.c @@ -74,8 +74,6 @@ struct palm_bk3710_udmatiming { #define BK3710_IORDYTMP 0x78 #define BK3710_IORDYTMS 0x7C -#include "../ide-timing.h" - static unsigned ideclk_period; /* in nanoseconds */ static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = { @@ -402,7 +400,6 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev) i = hwif->index; - ide_init_port_data(hwif, i); ide_init_port_hw(hwif, &hw); default_hwif_mmiops(hwif); diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c index 1747b235877..43057e0303c 100644 --- a/drivers/ide/arm/rapide.c +++ b/drivers/ide/arm/rapide.c @@ -11,6 +11,10 @@ #include <asm/ecard.h> +static struct const ide_port_info rapide_port_info = { + .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, +}; + static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq) { @@ -44,25 +48,26 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - hwif = ide_find_port(); - if (hwif) { - memset(&hw, 0, sizeof(hw)); - rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); - hw.chipset = ide_generic; - hw.dev = &ec->dev; + memset(&hw, 0, sizeof(hw)); + rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); + hw.chipset = ide_generic; + hw.dev = &ec->dev; - ide_init_port_hw(hwif, &hw); + hwif = ide_find_port(); + if (hwif == NULL) { + ret = -ENOENT; + goto release; + } - hwif->host_flags = IDE_HFLAG_MMIO; - default_hwif_mmiops(hwif); + ide_init_port_hw(hwif, &hw); + default_hwif_mmiops(hwif); - idx[0] = hwif->index; + idx[0] = hwif->index; - ide_device_add(idx, NULL); + ide_device_add(idx, &rapide_port_info); - ecard_set_drvdata(ec, hwif); - goto out; - } + ecard_set_drvdata(ec, hwif); + goto out; release: ecard_release_resources(ec); diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c index ae37ee58bae..20fad6d542c 100644 --- a/drivers/ide/h8300/ide-h8300.c +++ b/drivers/ide/h8300/ide-h8300.c @@ -8,6 +8,8 @@ #include <asm/io.h> #include <asm/irq.h> +#define DRV_NAME "ide-h8300" + #define bswap(d) \ ({ \ u16 r; \ @@ -176,6 +178,10 @@ static inline void hwif_setup(ide_hwif_t *hwif) hwif->output_data = h8300_output_data; } +static const struct ide_port_info h8300_port_info = { + .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, +}; + static int __init h8300_ide_init(void) { hw_regs_t hw; @@ -183,6 +189,8 @@ static int __init h8300_ide_init(void) int index; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; + printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); + if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300")) goto out_busy; if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) { @@ -192,22 +200,17 @@ static int __init h8300_ide_init(void) hw_setup(&hw); - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); + hwif = ide_find_port_slot(&h8300_port_info); + if (hwif == NULL) return -ENOENT; - } index = hwif->index; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); hwif_setup(hwif); - hwif->host_flags = IDE_HFLAG_NO_IO_32BIT; - printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", index); idx[0] = index; - ide_device_add(idx, NULL); + ide_device_add(idx, &h8300_port_info); return 0; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index d9984715718..6e29dd53209 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -517,14 +517,9 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, int xferlen, ide_handler_t *handler) { - ide_startstop_t startstop; struct cdrom_info *info = drive->driver_data; ide_hwif_t *hwif = drive->hwif; - /* wait for the controller to be idle */ - if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY)) - return startstop; - /* FIXME: for Virtual DMA we must check harder */ if (info->dma) info->dma = !hwif->dma_ops->dma_setup(drive); @@ -604,28 +599,6 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, } /* - * Block read functions. - */ -static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len) -{ - while (len > 0) { - int dum = 0; - xf(drive, NULL, &dum, sizeof(dum)); - len -= sizeof(dum); - } -} - -static void ide_cd_drain_data(ide_drive_t *drive, int nsects) -{ - while (nsects > 0) { - static char dum[SECTOR_SIZE]; - - drive->hwif->input_data(drive, NULL, dum, sizeof(dum)); - nsects--; - } -} - -/* * Check the contents of the interrupt reason register from the cdrom * and attempt to recover if there are problems. Returns 0 if everything's * ok; nonzero if the request has been terminated. @@ -640,15 +613,12 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, if (ireason == (!rw << 1)) return 0; else if (ireason == (rw << 1)) { - ide_hwif_t *hwif = drive->hwif; - xfer_func_t *xf; /* whoops... */ printk(KERN_ERR "%s: %s: wrong transfer direction!\n", drive->name, __func__); - xf = rw ? hwif->output_data : hwif->input_data; - ide_cd_pad_transfer(drive, xf, len); + ide_pad_transfer(drive, rw, len); } else if (rw == 0 && ireason == 1) { /* * Some drives (ASUS) seem to tell us that status info is @@ -696,16 +666,9 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) static ide_startstop_t cdrom_newpc_intr(ide_drive_t *); -/* - * Routine to send a read/write packet command to the drive. This is usually - * called directly from cdrom_start_{read,write}(). However, for drq_interrupt - * devices, it is called from an interrupt when the drive is ready to accept - * the command. - */ -static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) +static ide_startstop_t ide_cd_prepare_rw_request(ide_drive_t *drive, + struct request *rq) { - struct request *rq = HWGROUP(drive)->rq; - if (rq_data_dir(rq) == READ) { unsigned short sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS; @@ -742,6 +705,19 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) /* set up the command */ rq->timeout = ATAPI_WAIT_PC; + return ide_started; +} + +/* + * Routine to send a read/write packet command to the drive. This is usually + * called directly from cdrom_start_{read,write}(). However, for drq_interrupt + * devices, it is called from an interrupt when the drive is ready to accept + * the command. + */ +static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) +{ + struct request *rq = drive->hwif->hwgroup->rq; + /* send the command to the drive and return */ return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); } @@ -768,9 +744,8 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive) return ide_stopped; } -static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive) +static void ide_cd_prepare_seek_request(ide_drive_t *drive, struct request *rq) { - struct request *rq = HWGROUP(drive)->rq; sector_t frame = rq->sector; sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS); @@ -780,17 +755,13 @@ static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive) put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]); rq->timeout = ATAPI_WAIT_PC; - return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr); } -static ide_startstop_t cdrom_start_seek(ide_drive_t *drive, unsigned int block) +static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive) { - struct cdrom_info *info = drive->driver_data; + struct request *rq = drive->hwif->hwgroup->rq; - info->dma = 0; - info->start_seek = jiffies; - return cdrom_start_packet_command(drive, 0, - cdrom_start_seek_continuation); + return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr); } /* @@ -1011,7 +982,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) - bio_cur_sectors(rq->bio), thislen >> 9); if (nskip > 0) { - ide_cd_drain_data(drive, nskip); + ide_pad_transfer(drive, write, nskip << 9); rq->current_nr_sectors -= nskip; thislen -= (nskip << 9); } @@ -1048,7 +1019,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) * If the buffers are full, pipe the rest into * oblivion. */ - ide_cd_drain_data(drive, thislen >> 9); + ide_pad_transfer(drive, 0, thislen); else { printk(KERN_ERR "%s: confused, missing data\n", drive->name); @@ -1096,7 +1067,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) /* pad, if necessary */ if (!blk_fs_request(rq) && len > 0) - ide_cd_pad_transfer(drive, xferfunc, len); + ide_pad_transfer(drive, write, len); if (blk_pc_request(rq)) { timeout = rq->timeout; @@ -1165,21 +1136,17 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) if (write) cd->devinfo.media_written = 1; - /* start sending the read/write request to the drive */ - return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont); + return ide_started; } static ide_startstop_t cdrom_do_newpc_cont(ide_drive_t *drive) { struct request *rq = HWGROUP(drive)->rq; - if (!rq->timeout) - rq->timeout = ATAPI_WAIT_PC; - return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); } -static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) +static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) { struct cdrom_info *info = drive->driver_data; @@ -1191,10 +1158,16 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) info->dma = 0; /* sg request */ - if (rq->bio) { - int mask = drive->queue->dma_alignment; - unsigned long addr = - (unsigned long)page_address(bio_page(rq->bio)); + if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) { + struct request_queue *q = drive->queue; + unsigned int alignment; + unsigned long addr; + unsigned long stack_mask = ~(THREAD_SIZE - 1); + + if (rq->bio) + addr = (unsigned long)bio_data(rq->bio); + else + addr = (unsigned long)rq->data; info->dma = drive->using_dma; @@ -1204,23 +1177,25 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) * NOTE! The "len" and "addr" checks should possibly have * separate masks. */ - if ((rq->data_len & 15) || (addr & mask)) + alignment = queue_dma_alignment(q) | q->dma_pad_mask; + if (addr & alignment || rq->data_len & alignment) info->dma = 0; - } - /* start sending the command to the drive */ - return cdrom_start_packet_command(drive, rq->data_len, - cdrom_do_newpc_cont); + if (!((addr & stack_mask) ^ + ((unsigned long)current->stack & stack_mask))) + info->dma = 0; + } } /* * cdrom driver request routine. */ -static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, +static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, sector_t block) { - ide_startstop_t action; struct cdrom_info *info = drive->driver_data; + ide_handler_t *fn; + int xferlen; if (blk_fs_request(rq)) { if (info->cd_flags & IDE_CD_FLAG_SEEKING) { @@ -1240,29 +1215,48 @@ static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, } if (rq_data_dir(rq) == READ && IDE_LARGE_SEEK(info->last_block, block, - IDECD_SEEK_THRESHOLD) && - drive->dsc_overlap) - action = cdrom_start_seek(drive, block); - else - action = cdrom_start_rw(drive, rq); + IDECD_SEEK_THRESHOLD) && + drive->dsc_overlap) { + xferlen = 0; + fn = cdrom_start_seek_continuation; + + info->dma = 0; + info->start_seek = jiffies; + + ide_cd_prepare_seek_request(drive, rq); + } else { + xferlen = 32768; + fn = cdrom_start_rw_cont; + + if (cdrom_start_rw(drive, rq) == ide_stopped) + return ide_stopped; + + if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped) + return ide_stopped; + } info->last_block = block; - return action; } else if (blk_sense_request(rq) || blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) { - return cdrom_do_block_pc(drive, rq); + xferlen = rq->data_len; + fn = cdrom_do_newpc_cont; + + if (!rq->timeout) + rq->timeout = ATAPI_WAIT_PC; + + cdrom_do_block_pc(drive, rq); } else if (blk_special_request(rq)) { /* right now this can only be a reset... */ cdrom_end_request(drive, 1); return ide_stopped; + } else { + blk_dump_rq_flags(rq, "ide-cd bad flags"); + cdrom_end_request(drive, 0); + return ide_stopped; } - blk_dump_rq_flags(rq, "ide-cd bad flags"); - cdrom_end_request(drive, 0); - return ide_stopped; + return cdrom_start_packet_command(drive, xferlen, fn); } - - /* * Ioctl handling. * @@ -1872,6 +1866,7 @@ static int ide_cdrom_setup(ide_drive_t *drive) blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn); blk_queue_dma_alignment(drive->queue, 31); + blk_queue_update_dma_pad(drive->queue, 15); drive->queue->unplug_delay = (1 * HZ) / 1000; if (!drive->queue->unplug_delay) drive->queue->unplug_delay = 1; @@ -1954,10 +1949,9 @@ static ide_driver_t ide_cdrom_driver = { .version = IDECD_VERSION, .media = ide_cdrom, .supports_dsc_overlap = 1, - .do_request = ide_do_rw_cdrom, + .do_request = ide_cd_do_request, .end_request = ide_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idecd_proc, #endif diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 5f49a4ae9dd..3a2e80237c1 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -985,7 +985,6 @@ static ide_driver_t idedisk_driver = { .do_request = ide_do_rw_disk, .end_request = ide_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idedisk_proc, #endif diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index b3689437269..011d72011cc 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -351,10 +351,7 @@ static void ide_floppy_callback(ide_drive_t *drive) static void idefloppy_init_pc(struct ide_atapi_pc *pc) { - memset(pc->c, 0, 12); - pc->retries = 0; - pc->flags = 0; - pc->req_xfer = 0; + memset(pc, 0, sizeof(*pc)); pc->buf = pc->pc_buf; pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; pc->callback = ide_floppy_callback; @@ -561,12 +558,6 @@ static void idefloppy_create_start_stop_cmd(struct ide_atapi_pc *pc, int start) pc->c[4] = start; } -static void idefloppy_create_test_unit_ready_cmd(struct ide_atapi_pc *pc) -{ - idefloppy_init_pc(pc); - pc->c[0] = GPCMD_TEST_UNIT_READY; -} - static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy, struct ide_atapi_pc *pc, struct request *rq, unsigned long sector) @@ -711,10 +702,10 @@ static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive) set_disk_ro(floppy->disk, floppy->wp); page = &pc.buf[8]; - transfer_rate = be16_to_cpu(*(u16 *)&pc.buf[8 + 2]); - sector_size = be16_to_cpu(*(u16 *)&pc.buf[8 + 6]); - cyls = be16_to_cpu(*(u16 *)&pc.buf[8 + 8]); - rpm = be16_to_cpu(*(u16 *)&pc.buf[8 + 28]); + transfer_rate = be16_to_cpup((__be16 *)&pc.buf[8 + 2]); + sector_size = be16_to_cpup((__be16 *)&pc.buf[8 + 6]); + cyls = be16_to_cpup((__be16 *)&pc.buf[8 + 8]); + rpm = be16_to_cpup((__be16 *)&pc.buf[8 + 28]); heads = pc.buf[8 + 4]; sectors = pc.buf[8 + 5]; @@ -789,8 +780,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) for (i = 0; i < desc_cnt; i++) { unsigned int desc_start = 4 + i*8; - blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]); - length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]); + blocks = be32_to_cpup((__be32 *)&pc.buf[desc_start]); + length = be16_to_cpup((__be16 *)&pc.buf[desc_start + 6]); debug_log("Descriptor %d: %dkB, %d blocks, %d sector size\n", i, blocks * length / 1024, blocks, length); @@ -911,8 +902,8 @@ static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg) if (u_index >= u_array_size) break; /* User-supplied buffer too small */ - blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]); - length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]); + blocks = be32_to_cpup((__be32 *)&pc.buf[desc_start]); + length = be16_to_cpup((__be16 *)&pc.buf[desc_start + 6]); if (put_user(blocks, argp)) return(-EFAULT); @@ -1138,7 +1129,6 @@ static ide_driver_t idefloppy_driver = { .do_request = idefloppy_do_request, .end_request = idefloppy_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idefloppy_proc, #endif @@ -1166,7 +1156,9 @@ static int idefloppy_open(struct inode *inode, struct file *filp) floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; /* Just in case */ - idefloppy_create_test_unit_ready_cmd(&pc); + idefloppy_init_pc(&pc); + pc.c[0] = GPCMD_TEST_UNIT_READY; + if (idefloppy_queue_pc_tail(drive, &pc)) { idefloppy_create_start_stop_cmd(&pc, 1); (void) idefloppy_queue_pc_tail(drive, &pc); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 28057747c1f..661b75a89d4 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -504,55 +504,6 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) EXPORT_SYMBOL_GPL(ide_error); -ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) -{ - if (drive->media != ide_disk) - rq->errors |= ERROR_RESET; - - ide_kill_rq(drive, rq); - - return ide_stopped; -} - -EXPORT_SYMBOL_GPL(__ide_abort); - -/** - * ide_abort - abort pending IDE operations - * @drive: drive the error occurred on - * @msg: message to report - * - * ide_abort kills and cleans up when we are about to do a - * host initiated reset on active commands. Longer term we - * want handlers to have sensible abort handling themselves - * - * This differs fundamentally from ide_error because in - * this case the command is doing just fine when we - * blow it away. - */ - -ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) -{ - struct request *rq; - - if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) - return ide_stopped; - - /* retry only "normal" I/O: */ - if (!blk_fs_request(rq)) { - rq->errors = 1; - ide_end_drive_cmd(drive, BUSY_STAT, 0); - return ide_stopped; - } - - if (rq->rq_disk) { - ide_driver_t *drv; - - drv = *(ide_driver_t **)rq->rq_disk->private_data; - return drv->abort(drive, rq); - } else - return __ide_abort(drive, rq); -} - static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { tf->nsect = drive->sect; @@ -766,6 +717,18 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, return ide_stopped; } +static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) +{ + switch (rq->cmd[0]) { + case REQ_DRIVE_RESET: + return ide_do_reset(drive); + default: + blk_dump_rq_flags(rq, "ide_special_rq - bad request"); + ide_end_request(drive, 0, 0); + return ide_stopped; + } +} + static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->data; @@ -869,7 +832,16 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) pm->pm_step == ide_pm_state_completed) ide_complete_pm_request(drive, rq); return startstop; - } + } else if (!rq->rq_disk && blk_special_request(rq)) + /* + * TODO: Once all ULDs have been modified to + * check for specific op codes rather than + * blindly accepting any special request, the + * check for ->rq_disk above may be replaced + * by a more suitable mechanism or even + * dropped entirely. + */ + return ide_special_rq(drive, rq); drv = *(ide_driver_t **)rq->rq_disk->private_data; return drv->do_request(drive, rq, block); diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 80ad4f234f3..44aaec256a3 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -905,6 +905,14 @@ void ide_execute_pkt_cmd(ide_drive_t *drive) } EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); +static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) +{ + struct request *rq = drive->hwif->hwgroup->rq; + + if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) + ide_end_request(drive, err ? err : 1, 0); +} + /* needed below */ static ide_startstop_t do_reset1 (ide_drive_t *, int); @@ -940,7 +948,7 @@ static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) } /* done polling */ hwgroup->polling = 0; - hwgroup->resetting = 0; + ide_complete_drive_reset(drive, 0); return ide_stopped; } @@ -956,12 +964,14 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive) ide_hwif_t *hwif = HWIF(drive); const struct ide_port_ops *port_ops = hwif->port_ops; u8 tmp; + int err = 0; if (port_ops && port_ops->reset_poll) { - if (port_ops->reset_poll(drive)) { + err = port_ops->reset_poll(drive); + if (err) { printk(KERN_ERR "%s: host reset_poll failure for %s.\n", hwif->name, drive->name); - return ide_stopped; + goto out; } } @@ -975,6 +985,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive) } printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp); drive->failures++; + err = -EIO; } else { printk("%s: reset: ", hwif->name); tmp = ide_read_error(drive); @@ -1001,10 +1012,12 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive) if (tmp & 0x80) printk("; slave: failed"); printk("\n"); + err = -EIO; } } +out: hwgroup->polling = 0; /* done polling */ - hwgroup->resetting = 0; /* done reset attempt */ + ide_complete_drive_reset(drive, err); return ide_stopped; } @@ -1090,7 +1103,6 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) /* For an ATAPI device, first try an ATAPI SRST. */ if (drive->media != ide_disk && !do_not_try_atapi) { - hwgroup->resetting = 1; pre_reset(drive); SELECT_DRIVE(drive); udelay (20); @@ -1112,10 +1124,10 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) if (io_ports->ctl_addr == 0) { spin_unlock_irqrestore(&ide_lock, flags); + ide_complete_drive_reset(drive, -ENXIO); return ide_stopped; } - hwgroup->resetting = 1; /* * Note that we also set nIEN while resetting the device, * to mask unwanted interrupts from the interface during the reset. diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 47af80df687..13af72f09ec 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -1,26 +1,11 @@ -#include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/interrupt.h> -#include <linux/major.h> -#include <linux/errno.h> -#include <linux/genhd.h> -#include <linux/blkpg.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/delay.h> #include <linux/hdreg.h> #include <linux/ide.h> #include <linux/bitops.h> -#include <asm/byteorder.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/io.h> - static const char *udma_str[] = { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44", "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" }; @@ -90,142 +75,6 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed) return min(speed, mode); } -/* - * Standard (generic) timings for PIO modes, from ATA2 specification. - * These timings are for access to the IDE data port register *only*. - * Some drives may specify a mode, while also specifying a different - * value for cycle_time (from drive identification data). - */ -const ide_pio_timings_t ide_pio_timings[6] = { - { 70, 165, 600 }, /* PIO Mode 0 */ - { 50, 125, 383 }, /* PIO Mode 1 */ - { 30, 100, 240 }, /* PIO Mode 2 */ - { 30, 80, 180 }, /* PIO Mode 3 with IORDY */ - { 25, 70, 120 }, /* PIO Mode 4 with IORDY */ - { 20, 50, 100 } /* PIO Mode 5 with IORDY (nonstandard) */ -}; - -EXPORT_SYMBOL_GPL(ide_pio_timings); - -/* - * Shared data/functions for determining best PIO mode for an IDE drive. - * Most of this stuff originally lived in cmd640.c, and changes to the - * ide_pio_blacklist[] table should be made with EXTREME CAUTION to avoid - * breaking the fragile cmd640.c support. - */ - -/* - * Black list. Some drives incorrectly report their maximal PIO mode, - * at least in respect to CMD640. Here we keep info on some known drives. - */ -static struct ide_pio_info { - const char *name; - int pio; -} ide_pio_blacklist [] = { - { "Conner Peripherals 540MB - CFS540A", 3 }, - - { "WDC AC2700", 3 }, - { "WDC AC2540", 3 }, - { "WDC AC2420", 3 }, - { "WDC AC2340", 3 }, - { "WDC AC2250", 0 }, - { "WDC AC2200", 0 }, - { "WDC AC21200", 4 }, - { "WDC AC2120", 0 }, - { "WDC AC2850", 3 }, - { "WDC AC1270", 3 }, - { "WDC AC1170", 1 }, - { "WDC AC1210", 1 }, - { "WDC AC280", 0 }, - { "WDC AC31000", 3 }, - { "WDC AC31200", 3 }, - - { "Maxtor 7131 AT", 1 }, - { "Maxtor 7171 AT", 1 }, - { "Maxtor 7213 AT", 1 }, - { "Maxtor 7245 AT", 1 }, - { "Maxtor 7345 AT", 1 }, - { "Maxtor 7546 AT", 3 }, - { "Maxtor 7540 AV", 3 }, - - { "SAMSUNG SHD-3121A", 1 }, - { "SAMSUNG SHD-3122A", 1 }, - { "SAMSUNG SHD-3172A", 1 }, - - { "ST5660A", 3 }, - { "ST3660A", 3 }, - { "ST3630A", 3 }, - { "ST3655A", 3 }, - { "ST3391A", 3 }, - { "ST3390A", 1 }, - { "ST3600A", 1 }, - { "ST3290A", 0 }, - { "ST3144A", 0 }, - { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on */ - /* drive) according to Seagates FIND-ATA program */ - - { "QUANTUM ELS127A", 0 }, - { "QUANTUM ELS170A", 0 }, - { "QUANTUM LPS240A", 0 }, - { "QUANTUM LPS210A", 3 }, - { "QUANTUM LPS270A", 3 }, - { "QUANTUM LPS365A", 3 }, - { "QUANTUM LPS540A", 3 }, - { "QUANTUM LIGHTNING 540A", 3 }, - { "QUANTUM LIGHTNING 730A", 3 }, - - { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */ - { "QUANTUM FIREBALL_640", 3 }, - { "QUANTUM FIREBALL_1080", 3 }, - { "QUANTUM FIREBALL_1280", 3 }, - { NULL, 0 } -}; - -/** - * ide_scan_pio_blacklist - check for a blacklisted drive - * @model: Drive model string - * - * This routine searches the ide_pio_blacklist for an entry - * matching the start/whole of the supplied model name. - * - * Returns -1 if no match found. - * Otherwise returns the recommended PIO mode from ide_pio_blacklist[]. - */ - -static int ide_scan_pio_blacklist (char *model) -{ - struct ide_pio_info *p; - - for (p = ide_pio_blacklist; p->name != NULL; p++) { - if (strncmp(p->name, model, strlen(p->name)) == 0) - return p->pio; - } - return -1; -} - -unsigned int ide_pio_cycle_time(ide_drive_t *drive, u8 pio) -{ - struct hd_driveid *id = drive->id; - int cycle_time = 0; - - if (id->field_valid & 2) { - if (id->capability & 8) - cycle_time = id->eide_pio_iordy; - else - cycle_time = id->eide_pio; - } - - /* conservative "downgrade" for all pre-ATA2 drives */ - if (pio < 3) { - if (cycle_time && cycle_time < ide_pio_timings[pio].cycle_time) - cycle_time = 0; /* use standard timing */ - } - - return cycle_time ? cycle_time : ide_pio_timings[pio].cycle_time; -} - -EXPORT_SYMBOL_GPL(ide_pio_cycle_time); - /** * ide_get_best_pio_mode - get PIO mode from drive * @drive: drive to consider diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c new file mode 100644 index 00000000000..a8c2c8f8660 --- /dev/null +++ b/drivers/ide/ide-pio-blacklist.c @@ -0,0 +1,94 @@ +/* + * PIO blacklist. Some drives incorrectly report their maximal PIO mode, + * at least in respect to CMD640. Here we keep info on some known drives. + * + * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION + * to avoid breaking the fragile cmd640.c support. + */ + +#include <linux/string.h> + +static struct ide_pio_info { + const char *name; + int pio; +} ide_pio_blacklist [] = { + { "Conner Peripherals 540MB - CFS540A", 3 }, + + { "WDC AC2700", 3 }, + { "WDC AC2540", 3 }, + { "WDC AC2420", 3 }, + { "WDC AC2340", 3 }, + { "WDC AC2250", 0 }, + { "WDC AC2200", 0 }, + { "WDC AC21200", 4 }, + { "WDC AC2120", 0 }, + { "WDC AC2850", 3 }, + { "WDC AC1270", 3 }, + { "WDC AC1170", 1 }, + { "WDC AC1210", 1 }, + { "WDC AC280", 0 }, + { "WDC AC31000", 3 }, + { "WDC AC31200", 3 }, + + { "Maxtor 7131 AT", 1 }, + { "Maxtor 7171 AT", 1 }, + { "Maxtor 7213 AT", 1 }, + { "Maxtor 7245 AT", 1 }, + { "Maxtor 7345 AT", 1 }, + { "Maxtor 7546 AT", 3 }, + { "Maxtor 7540 AV", 3 }, + + { "SAMSUNG SHD-3121A", 1 }, + { "SAMSUNG SHD-3122A", 1 }, + { "SAMSUNG SHD-3172A", 1 }, + + { "ST5660A", 3 }, + { "ST3660A", 3 }, + { "ST3630A", 3 }, + { "ST3655A", 3 }, + { "ST3391A", 3 }, + { "ST3390A", 1 }, + { "ST3600A", 1 }, + { "ST3290A", 0 }, + { "ST3144A", 0 }, + { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive) + according to Seagate's FIND-ATA program */ + + { "QUANTUM ELS127A", 0 }, + { "QUANTUM ELS170A", 0 }, + { "QUANTUM LPS240A", 0 }, + { "QUANTUM LPS210A", 3 }, + { "QUANTUM LPS270A", 3 }, + { "QUANTUM LPS365A", 3 }, + { "QUANTUM LPS540A", 3 }, + { "QUANTUM LIGHTNING 540A", 3 }, + { "QUANTUM LIGHTNING 730A", 3 }, + + { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */ + { "QUANTUM FIREBALL_640", 3 }, + { "QUANTUM FIREBALL_1080", 3 }, + { "QUANTUM FIREBALL_1280", 3 }, + { NULL, 0 } +}; + +/** + * ide_scan_pio_blacklist - check for a blacklisted drive + * @model: Drive model string + * + * This routine searches the ide_pio_blacklist for an entry + * matching the start/whole of the supplied model name. + * + * Returns -1 if no match found. + * Otherwise returns the recommended PIO mode from ide_pio_blacklist[]. + */ + +int ide_scan_pio_blacklist(char *model) +{ + struct ide_pio_info *p; + + for (p = ide_pio_blacklist; p->name != NULL; p++) { + if (strncmp(p->name, model, strlen(p->name)) == 0) + return p->pio; + } + return -1; +} diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index adbd0178416..03f2ef5470a 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c @@ -33,6 +33,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) ide_hwif_t *hwif; unsigned long base, ctl; + printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); + if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) return -1; @@ -62,10 +64,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) u8 index = hwif->index; u8 idx[4] = { index, 0xff, 0xff, 0xff }; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); - printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); pnp_set_drvdata(dev, hwif); ide_device_add(idx, NULL); diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index d21e51a02c3..235ebdb29b2 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -39,6 +39,8 @@ #include <asm/uaccess.h> #include <asm/io.h> +static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ + /** * generic_id - add a generic drive id * @drive: drive to make an ID block for @@ -1318,10 +1320,10 @@ static void ide_port_init_devices(ide_hwif_t *hwif) drive->unmask = 1; if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) drive->no_unmask = 1; - } - if (port_ops && port_ops->port_init_devs) - port_ops->port_init_devs(hwif); + if (port_ops && port_ops->init_dev) + port_ops->init_dev(drive); + } } static void ide_init_port(ide_hwif_t *hwif, unsigned int port, @@ -1473,22 +1475,29 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d) for (; i < MAX_HWIFS; i++) { hwif = &ide_hwifs[i]; if (hwif->chipset == ide_unknown) - return hwif; + goto out_found; } } else { for (i = 2; i < MAX_HWIFS; i++) { hwif = &ide_hwifs[i]; if (hwif->chipset == ide_unknown) - return hwif; + goto out_found; } for (i = 0; i < 2 && i < MAX_HWIFS; i++) { hwif = &ide_hwifs[i]; if (hwif->chipset == ide_unknown) - return hwif; + goto out_found; } } + printk(KERN_ERR "%s: no free slot for interface\n", + d ? d->name : "ide"); + return NULL; + +out_found: + ide_init_port_data(hwif, i); + return hwif; } EXPORT_SYMBOL_GPL(ide_find_port_slot); diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index f9cf1670e4e..b711ab96e28 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -2591,7 +2591,6 @@ static ide_driver_t idetape_driver = { .do_request = idetape_do_request, .end_request = idetape_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idetape_proc, #endif diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index cf55a48a7dd..1fbdb746dc8 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -8,28 +8,18 @@ * The big the bad and the ugly. */ -#include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/sched.h> #include <linux/interrupt.h> -#include <linux/major.h> #include <linux/errno.h> -#include <linux/genhd.h> -#include <linux/blkpg.h> #include <linux/slab.h> -#include <linux/pci.h> #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/ide.h> -#include <linux/bitops.h> #include <linux/scatterlist.h> -#include <asm/byteorder.h> -#include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -62,25 +52,6 @@ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) return ide_raw_taskfile(drive, &args, buf, 1); } -static int inline task_dma_ok(ide_task_t *task) -{ - if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED)) - return 1; - - switch (task->tf.command) { - case WIN_WRITEDMA_ONCE: - case WIN_WRITEDMA: - case WIN_WRITEDMA_EXT: - case WIN_READDMA_ONCE: - case WIN_READDMA: - case WIN_READDMA_EXT: - case WIN_IDENTIFY_DMA: - return 1; - } - - return 0; -} - static ide_startstop_t task_no_data_intr(ide_drive_t *); static ide_startstop_t set_geometry_intr(ide_drive_t *); static ide_startstop_t recal_intr(ide_drive_t *); @@ -139,8 +110,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task) WAIT_WORSTCASE, NULL); return ide_started; default: - if (task_dma_ok(task) == 0 || drive->using_dma == 0 || - dma_ops->dma_setup(drive)) + if (drive->using_dma == 0 || dma_ops->dma_setup(drive)) return ide_stopped; dma_ops->dma_exec_cmd(drive, tf->command); dma_ops->dma_start(drive); @@ -183,7 +153,6 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive) if (stat & (ERR_STAT|DRQ_STAT)) return ide_error(drive, "set_geometry_intr", stat); - BUG_ON(HWGROUP(drive)->handler != NULL); ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL); return ide_started; } diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timings.c index 2e91c5870b4..8c2f8327f48 100644 --- a/drivers/ide/ide-timing.h +++ b/drivers/ide/ide-timings.c @@ -1,11 +1,7 @@ -#ifndef _IDE_TIMING_H -#define _IDE_TIMING_H - /* * Copyright (c) 1999-2001 Vojtech Pavlik - */ - -/* + * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -27,27 +23,14 @@ #include <linux/kernel.h> #include <linux/hdreg.h> - -#define XFER_PIO_5 0x0d -#define XFER_UDMA_SLOW 0x4f - -struct ide_timing { - short mode; - short setup; /* t1 */ - short act8b; /* t2 for 8-bit io */ - short rec8b; /* t2i for 8-bit io */ - short cyc8b; /* t0 for 8-bit io */ - short active; /* t2 or tD */ - short recover; /* t2i or tK */ - short cycle; /* t0 */ - short udma; /* t2CYCTYP/2 */ -}; +#include <linux/ide.h> +#include <linux/module.h> /* * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). * These were taken from ATA/ATAPI-6 standard, rev 0a, except * for PIO 5, which is a nonstandard extension and UDMA6, which - * is currently supported only by Maxtor drives. + * is currently supported only by Maxtor drives. */ static struct ide_timing ide_timing[] = { @@ -61,12 +44,10 @@ static struct ide_timing ide_timing[] = { { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, - { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, - { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, - + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, @@ -81,29 +62,46 @@ static struct ide_timing ide_timing[] = { { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, - { -1 } + { 0xff } }; -#define IDE_TIMING_SETUP 0x01 -#define IDE_TIMING_ACT8B 0x02 -#define IDE_TIMING_REC8B 0x04 -#define IDE_TIMING_CYC8B 0x08 -#define IDE_TIMING_8BIT 0x0e -#define IDE_TIMING_ACTIVE 0x10 -#define IDE_TIMING_RECOVER 0x20 -#define IDE_TIMING_CYCLE 0x40 -#define IDE_TIMING_UDMA 0x80 -#define IDE_TIMING_ALL 0xff - -#define ENOUGH(v,unit) (((v)-1)/(unit)+1) -#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) - -#define XFER_MODE 0xf0 -#define XFER_MWDMA 0x20 -#define XFER_EPIO 0x01 -#define XFER_PIO 0x00 - -static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int T, int UT) +struct ide_timing *ide_timing_find_mode(u8 speed) +{ + struct ide_timing *t; + + for (t = ide_timing; t->mode != speed; t++) + if (t->mode == 0xff) + return NULL; + return t; +} +EXPORT_SYMBOL_GPL(ide_timing_find_mode); + +u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio) +{ + struct hd_driveid *id = drive->id; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); + u16 cycle = 0; + + if (id->field_valid & 2) { + if (id->capability & 8) + cycle = id->eide_pio_iordy; + else + cycle = id->eide_pio; + + /* conservative "downgrade" for all pre-ATA2 drives */ + if (pio < 3 && cycle < t->cycle) + cycle = 0; /* use standard timing */ + } + + return cycle ? cycle : t->cycle; +} +EXPORT_SYMBOL_GPL(ide_pio_cycle_time); + +#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1) +#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0) + +static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, + int T, int UT) { q->setup = EZ(t->setup * 1000, T); q->act8b = EZ(t->act8b * 1000, T); @@ -115,92 +113,83 @@ static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int q->udma = EZ(t->udma * 1000, UT); } -static void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what) -{ - if (what & IDE_TIMING_SETUP ) m->setup = max(a->setup, b->setup); - if (what & IDE_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); - if (what & IDE_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); - if (what & IDE_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); - if (what & IDE_TIMING_ACTIVE ) m->active = max(a->active, b->active); - if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover); - if (what & IDE_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); - if (what & IDE_TIMING_UDMA ) m->udma = max(a->udma, b->udma); -} - -static struct ide_timing* ide_timing_find_mode(short speed) +void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, + struct ide_timing *m, unsigned int what) { - struct ide_timing *t; - - for (t = ide_timing; t->mode != speed; t++) - if (t->mode < 0) - return NULL; - return t; + if (what & IDE_TIMING_SETUP) + m->setup = max(a->setup, b->setup); + if (what & IDE_TIMING_ACT8B) + m->act8b = max(a->act8b, b->act8b); + if (what & IDE_TIMING_REC8B) + m->rec8b = max(a->rec8b, b->rec8b); + if (what & IDE_TIMING_CYC8B) + m->cyc8b = max(a->cyc8b, b->cyc8b); + if (what & IDE_TIMING_ACTIVE) + m->active = max(a->active, b->active); + if (what & IDE_TIMING_RECOVER) + m->recover = max(a->recover, b->recover); + if (what & IDE_TIMING_CYCLE) + m->cycle = max(a->cycle, b->cycle); + if (what & IDE_TIMING_UDMA) + m->udma = max(a->udma, b->udma); } +EXPORT_SYMBOL_GPL(ide_timing_merge); -static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing *t, int T, int UT) +int ide_timing_compute(ide_drive_t *drive, u8 speed, + struct ide_timing *t, int T, int UT) { struct hd_driveid *id = drive->id; struct ide_timing *s, p; -/* - * Find the mode. - */ - - if (!(s = ide_timing_find_mode(speed))) + /* + * Find the mode. + */ + s = ide_timing_find_mode(speed); + if (s == NULL) return -EINVAL; -/* - * Copy the timing from the table. - */ - + /* + * Copy the timing from the table. + */ *t = *s; -/* - * If the drive is an EIDE drive, it can tell us it needs extended - * PIO/MWDMA cycle timing. - */ - + /* + * If the drive is an EIDE drive, it can tell us it needs extended + * PIO/MWDMA cycle timing. + */ if (id && id->field_valid & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); - switch (speed & XFER_MODE) { - - case XFER_PIO: - if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = id->eide_pio; - else p.cycle = p.cyc8b = id->eide_pio_iordy; - break; - - case XFER_MWDMA: - p.cycle = id->eide_dma_min; - break; - } + if (speed <= XFER_PIO_2) + p.cycle = p.cyc8b = id->eide_pio; + else if (speed <= XFER_PIO_5) + p.cycle = p.cyc8b = id->eide_pio_iordy; + else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) + p.cycle = id->eide_dma_min; ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); } -/* - * Convert the timing to bus clock counts. - */ - + /* + * Convert the timing to bus clock counts. + */ ide_timing_quantize(t, t, T, UT); -/* - * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T - * and some other commands. We have to ensure that the DMA cycle timing is - * slower/equal than the fastest PIO timing. - */ - - if ((speed & XFER_MODE) != XFER_PIO) { + /* + * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, + * S.M.A.R.T and some other commands. We have to ensure that the + * DMA cycle timing is slower/equal than the fastest PIO timing. + */ + if (speed >= XFER_SW_DMA_0) { u8 pio = ide_get_best_pio_mode(drive, 255, 5); ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT); ide_timing_merge(&p, t, t, IDE_TIMING_ALL); } -/* - * Lengthen active & recovery time so that cycle time is correct. - */ - + /* + * Lengthen active & recovery time so that cycle time is correct. + */ if (t->act8b + t->rec8b < t->cyc8b) { t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; t->rec8b = t->cyc8b - t->act8b; @@ -213,5 +202,4 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing return 0; } - -#endif +EXPORT_SYMBOL_GPL(ide_timing_compute); diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 2b8453510e0..d4a6b102a77 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -50,29 +50,16 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> -#include <linux/blkpg.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> -#include <linux/delay.h> #include <linux/ide.h> #include <linux/completion.h> -#include <linux/reboot.h> -#include <linux/cdrom.h> -#include <linux/seq_file.h> #include <linux/device.h> -#include <linux/bitops.h> - -#include <asm/byteorder.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/io.h> /* default maximum number of failures */ @@ -91,8 +78,6 @@ DEFINE_MUTEX(ide_cfg_mtx); __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); EXPORT_SYMBOL(ide_lock); -ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ - static void ide_port_init_devices_data(ide_hwif_t *); /* @@ -121,7 +106,6 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) ide_port_init_devices_data(hwif); } -EXPORT_SYMBOL_GPL(ide_init_port_data); static void ide_port_init_devices_data(ide_hwif_t *hwif) { @@ -150,18 +134,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) } } -static void __init init_ide_data (void) -{ - unsigned int index; - - /* Initialise all interface structures */ - for (index = 0; index < MAX_HWIFS; ++index) { - ide_hwif_t *hwif = &ide_hwifs[index]; - - ide_init_port_data(hwif, index); - } -} - void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) { ide_hwgroup_t *hwgroup = hwif->hwgroup; @@ -312,7 +284,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); hwif->irq = hw->irq; hwif->chipset = hw->chipset; - hwif->gendev.parent = hw->dev; + hwif->dev = hw->dev; + hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; hwif->ack_intr = hw->ack_intr; } EXPORT_SYMBOL_GPL(ide_init_port_hw); @@ -556,6 +529,22 @@ static int generic_ide_resume(struct device *dev) return err; } +static int generic_drive_reset(ide_drive_t *drive) +{ + struct request *rq; + int ret = 0; + + rq = blk_get_request(drive->queue, READ, __GFP_WAIT); + rq->cmd_type = REQ_TYPE_SPECIAL; + rq->cmd_len = 1; + rq->cmd[0] = REQ_DRIVE_RESET; + rq->cmd_flags |= REQ_SOFTBARRIER; + if (blk_execute_rq(drive->queue, NULL, rq, 1)) + ret = rq->errors; + blk_put_request(rq); + return ret; +} + int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, unsigned int cmd, unsigned long arg) { @@ -630,33 +619,8 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device if (!capable(CAP_SYS_ADMIN)) return -EACCES; - /* - * Abort the current command on the - * group if there is one, taking - * care not to allow anything else - * to be queued and to die on the - * spot if we miss one somehow - */ - - spin_lock_irqsave(&ide_lock, flags); - - if (HWGROUP(drive)->resetting) { - spin_unlock_irqrestore(&ide_lock, flags); - return -EBUSY; - } + return generic_drive_reset(drive); - ide_abort(drive, "drive reset"); - - BUG_ON(HWGROUP(drive)->handler); - - /* Ensure nothing gets queued after we - drop the lock. Reset will clear the busy */ - - HWGROUP(drive)->busy = 1; - spin_unlock_irqrestore(&ide_lock, flags); - (void) ide_do_reset(drive); - - return 0; case HDIO_GET_BUSSTATE: if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -1021,8 +985,6 @@ static int __init ide_init(void) goto out_port_class; } - init_ide_data(); - proc_ide_create(); return 0; diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c index 052125fafcf..4ec19737f3c 100644 --- a/drivers/ide/legacy/ali14xx.c +++ b/drivers/ide/legacy/ali14xx.c @@ -117,10 +117,11 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio) u8 param1, param2, param3, param4; unsigned long flags; int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); /* calculate timing, according to PIO mode */ time1 = ide_pio_cycle_time(drive, pio); - time2 = ide_pio_timings[pio].active_time; + time2 = t->active; param3 = param1 = (time2 * bus_speed + 999) / 1000; param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1; if (pio < 3) { diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c index 9a1d27ef3f8..0497e7f85b0 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/legacy/buddha.c @@ -227,7 +227,6 @@ fail_base2: if (hwif) { u8 index = hwif->index; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); idx[i] = index; diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index af11028b479..129a812bb57 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c @@ -111,7 +111,6 @@ static int __init falconide_init(void) u8 index = hwif->index; u8 idx[4] = { index, 0xff, 0xff, 0xff }; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); /* Atari has a byte-swapped IDE interface */ diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c index b78941680c3..7e74b20202d 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/legacy/gayle.c @@ -185,7 +185,6 @@ found: if (hwif) { u8 index = hwif->index; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); idx[i] = index; diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c index dd6dfb32e85..7bc8fd59ea9 100644 --- a/drivers/ide/legacy/ht6560b.c +++ b/drivers/ide/legacy/ht6560b.c @@ -216,6 +216,7 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio) if (pio) { unsigned int cycle_time; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); cycle_time = ide_pio_cycle_time(drive, pio); @@ -224,10 +225,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio) * actual cycle time for recovery and activity * according system bus speed. */ - active_time = ide_pio_timings[pio].active_time; - recovery_time = cycle_time - - active_time - - ide_pio_timings[pio].setup_time; + active_time = t->active; + recovery_time = cycle_time - active_time - t->setup; /* * Cycle times should be Vesa bus cycles */ @@ -311,16 +310,16 @@ static void ht6560b_set_pio_mode(ide_drive_t *drive, const u8 pio) #endif } -static void __init ht6560b_port_init_devs(ide_hwif_t *hwif) +static void __init ht6560b_init_dev(ide_drive_t *drive) { + ide_hwif_t *hwif = drive->hwif; /* Setting default configurations for drives. */ int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT; if (hwif->channel) t |= (HT_SECONDARY_IF << 8); - hwif->drives[0].drive_data = t; - hwif->drives[1].drive_data = t; + drive->drive_data = t; } static int probe_ht6560b; @@ -329,7 +328,7 @@ module_param_named(probe, probe_ht6560b, bool, 0); MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); static const struct ide_port_ops ht6560b_port_ops = { - .port_init_devs = ht6560b_port_init_devs, + .init_dev = ht6560b_init_dev, .set_pio_mode = ht6560b_set_pio_mode, .selectproc = ht6560b_selectproc, }; diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c index ecae916a338..89c8ff0a4d0 100644 --- a/drivers/ide/legacy/ide-4drives.c +++ b/drivers/ide/legacy/ide-4drives.c @@ -11,6 +11,21 @@ static int probe_4drives; module_param_named(probe, probe_4drives, bool, 0); MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); +static void ide_4drives_init_dev(ide_drive_t *drive) +{ + if (drive->hwif->channel) + drive->select.all ^= 0x20; +} + +static const struct ide_port_ops ide_4drives_port_ops = { + .init_dev = ide_4drives_init_dev, +}; + +static const struct ide_port_info ide_4drives_port_info = { + .port_ops = &ide_4drives_port_ops, + .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, +}; + static int __init ide_4drives_init(void) { ide_hwif_t *hwif, *mate; @@ -49,18 +64,10 @@ static int __init ide_4drives_init(void) mate = ide_find_port(); if (mate) { ide_init_port_hw(mate, &hw); - mate->drives[0].select.all ^= 0x20; - mate->drives[1].select.all ^= 0x20; idx[1] = mate->index; - - if (hwif) { - hwif->mate = mate; - mate->mate = hwif; - hwif->serialized = mate->serialized = 1; - } } - ide_device_add(idx, NULL); + ide_device_add(idx, &ide_4drives_port_info); return 0; } diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 8dbf4d9b644..27b1e0b7ecb 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c @@ -66,8 +66,6 @@ MODULE_LICENSE("Dual MPL/GPL"); #ifdef CONFIG_PCMCIA_DEBUG INT_MODULE_PARM(pc_debug, 0); #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) -/*static char *version = -"ide-cs.c 1.3 2002/10/26 05:45:31 (David Hinds)";*/ #else #define DEBUG(n, args...) #endif @@ -154,6 +152,11 @@ static const struct ide_port_ops idecs_port_ops = { .quirkproc = ide_undecoded_slave, }; +static const struct ide_port_info idecs_port_info = { + .port_ops = &idecs_port_ops, + .host_flags = IDE_HFLAG_NO_DMA, +}; + static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) { @@ -187,13 +190,11 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, i = hwif->index; - ide_init_port_data(hwif, i); ide_init_port_hw(hwif, &hw); - hwif->port_ops = &idecs_port_ops; idx[0] = i; - ide_device_add(idx, NULL); + ide_device_add(idx, &idecs_port_info); if (hwif->present) return hwif; diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c index d3bc3f24e05..a249562b34b 100644 --- a/drivers/ide/legacy/ide_platform.c +++ b/drivers/ide/legacy/ide_platform.c @@ -44,6 +44,10 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw, hw->chipset = ide_generic; } +static const struct ide_port_info platform_ide_port_info = { + .host_flags = IDE_HFLAG_NO_DMA, +}; + static int __devinit plat_ide_probe(struct platform_device *pdev) { struct resource *res_base, *res_alt, *res_irq; @@ -54,6 +58,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) int ret = 0; int mmio = 0; hw_regs_t hw; + struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; @@ -102,13 +107,13 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) ide_init_port_hw(hwif, &hw); if (mmio) { - hwif->host_flags = IDE_HFLAG_MMIO; + d.host_flags |= IDE_HFLAG_MMIO; default_hwif_mmiops(hwif); } idx[0] = hwif->index; - ide_device_add(idx, NULL); + ide_device_add(idx, &d); platform_set_drvdata(pdev, hwif); diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c index 2e84290d0bc..0a6195bcfed 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/legacy/macide.c @@ -130,7 +130,6 @@ static int __init macide_init(void) u8 index = hwif->index; u8 idx[4] = { index, 0xff, 0xff, 0xff }; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); ide_device_add(idx, NULL); diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c index 8ff6e2d2083..9c2b9d078f6 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/legacy/q40ide.c @@ -142,7 +142,6 @@ static int __init q40ide_init(void) hwif = ide_find_port(); if (hwif) { - ide_init_port_data(hwif, hwif->index); ide_init_port_hw(hwif, &hw); /* Q40 has a byte-swapped IDE interface */ diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index 51dba82f881..2338f344ea2 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c @@ -207,6 +207,7 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio) static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) { ide_hwif_t *hwif = drive->hwif; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ @@ -236,7 +237,7 @@ static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) active_time = 110; recovery_time = cycle_time - 120; } else { - active_time = ide_pio_timings[pio].active_time; + active_time = t->active; recovery_time = cycle_time - active_time; } } @@ -281,17 +282,18 @@ static int __init qd_testreg(int port) return (readreg != QD_TESTVAL); } -static void __init qd6500_port_init_devs(ide_hwif_t *hwif) +static void __init qd6500_init_dev(ide_drive_t *drive) { + ide_hwif_t *hwif = drive->hwif; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); - hwif->drives[0].drive_data = QD6500_DEF_DATA; - hwif->drives[1].drive_data = QD6500_DEF_DATA; + drive->drive_data = QD6500_DEF_DATA; } -static void __init qd6580_port_init_devs(ide_hwif_t *hwif) +static void __init qd6580_init_dev(ide_drive_t *drive) { + ide_hwif_t *hwif = drive->hwif; u16 t1, t2; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); @@ -302,18 +304,17 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif) } else t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA; - hwif->drives[0].drive_data = t1; - hwif->drives[1].drive_data = t2; + drive->drive_data = drive->select.b.unit ? t2 : t1; } static const struct ide_port_ops qd6500_port_ops = { - .port_init_devs = qd6500_port_init_devs, + .init_dev = qd6500_init_dev, .set_pio_mode = qd6500_set_pio_mode, .selectproc = qd65xx_select, }; static const struct ide_port_ops qd6580_port_ops = { - .port_init_devs = qd6580_port_init_devs, + .init_dev = qd6580_init_dev, .set_pio_mode = qd6580_set_pio_mode, .selectproc = qd65xx_select, }; diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index 1a6c27b3249..48d57cae63c 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c @@ -213,10 +213,8 @@ static int auide_build_dmatable(ide_drive_t *drive) { int i, iswrite, count = 0; ide_hwif_t *hwif = HWIF(drive); - struct request *rq = HWGROUP(drive)->rq; - - _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; + _auide_hwif *ahwif = &auide_hwif; struct scatterlist *sg; iswrite = (rq_data_dir(rq) == WRITE); @@ -402,7 +400,7 @@ static const struct ide_dma_ops au1xxx_dma_ops = { static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) { - _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data; + _auide_hwif *auide = &auide_hwif; dbdev_tab_t source_dev_tab, target_dev_tab; u32 dev_id, tsize, devwidth, flags; @@ -463,7 +461,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) #else static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) { - _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data; + _auide_hwif *auide = &auide_hwif; dbdev_tab_t source_dev_tab; int flags; @@ -600,8 +598,6 @@ static int au_ide_probe(struct device *dev) ide_init_port_hw(hwif, &hw); - hwif->dev = dev; - /* If the user has selected DDMA assisted copies, then set up a few local I/O function entry points */ @@ -610,11 +606,8 @@ static int au_ide_probe(struct device *dev) hwif->input_data = au1xxx_input_data; hwif->output_data = au1xxx_output_data; #endif - hwif->select_data = 0; /* no chipset-specific code */ - hwif->config_data = 0; /* no chipset-specific code */ auide_hwif.hwif = hwif; - hwif->hwif_data = &auide_hwif; idx[0] = hwif->index; diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index 52fee3d2771..9f1212cc4ae 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c @@ -61,6 +61,11 @@ static struct resource swarm_ide_resource = { static struct platform_device *swarm_ide_dev; +static const struct ide_port_info swarm_port_info = { + .name = DRV_NAME, + .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, +}; + /* * swarm_ide_probe - if the board header indicates the existence of * Generic Bus IDE, allocate a HWIF for it. @@ -77,12 +82,6 @@ static int __devinit swarm_ide_probe(struct device *dev) if (!SIBYTE_HAVE_IDE) return -ENODEV; - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR DRV_NAME ": no free slot for interface\n"); - return -ENOMEM; - } - base = ioremap(A_IO_EXT_BASE, 0x800); offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); @@ -109,10 +108,6 @@ static int __devinit swarm_ide_probe(struct device *dev) base = ioremap(offset, size); - /* Setup MMIO ops. */ - hwif->host_flags = IDE_HFLAG_MMIO; - default_hwif_mmiops(hwif); - for (i = 0; i <= 7; i++) hw.io_ports_array[i] = (unsigned long)(base + ((0x1f0 + i) << 5)); @@ -121,15 +116,26 @@ static int __devinit swarm_ide_probe(struct device *dev) hw.irq = K_INT_GB_IDE; hw.chipset = ide_generic; + hwif = ide_find_port_slot(&swarm_port_info); + if (hwif == NULL) + goto err; + ide_init_port_hw(hwif, &hw); + /* Setup MMIO ops. */ + default_hwif_mmiops(hwif); + idx[0] = hwif->index; - ide_device_add(idx, NULL); + ide_device_add(idx, &swarm_port_info); dev_set_drvdata(dev, hwif); return 0; +err: + release_resource(&swarm_ide_resource); + iounmap(base); + return -ENOMEM; } static struct device_driver swarm_ide_driver = { diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index f2de00adf14..80d19c0eb78 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c @@ -69,7 +69,8 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = to_pci_dev(hwif->dev); - int s_time, a_time, c_time; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); + int s_time = t->setup, a_time = t->active, c_time = t->cycle; u8 s_clc, a_clc, r_clc; unsigned long flags; int bus_speed = ide_pci_clk ? ide_pci_clk : 33; @@ -78,13 +79,10 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) u8 cd_dma_fifo = 0; int unit = drive->select.b.unit & 1; - s_time = ide_pio_timings[pio].setup_time; - a_time = ide_pio_timings[pio].active_time; if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8) s_clc = 0; if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8) a_clc = 0; - c_time = ide_pio_timings[pio].cycle_time; if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) { r_clc = 1; diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c index ad222206a42..0bfcdd0e77b 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/pci/amd74xx.c @@ -21,8 +21,6 @@ #include <linux/init.h> #include <linux/ide.h> -#include "ide-timing.h" - enum { AMD_IDE_CONFIG = 0x41, AMD_CABLE_DETECT = 0x42, diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index cd1ba14984a..1ad1e23e310 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c @@ -521,6 +521,7 @@ static void program_drive_counts(ide_drive_t *drive, unsigned int index) static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, u8 pio_mode, unsigned int cycle_time) { + struct ide_timing *t; int setup_time, active_time, recovery_time, clock_time; u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; int bus_speed; @@ -532,8 +533,11 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, if (pio_mode > 5) pio_mode = 5; - setup_time = ide_pio_timings[pio_mode].setup_time; - active_time = ide_pio_timings[pio_mode].active_time; + + t = ide_timing_find_mode(XFER_PIO_0 + pio_mode); + setup_time = t->setup; + active_time = t->active; + recovery_time = cycle_time - (setup_time + active_time); clock_time = 1000 / bus_speed; cycle_count = DIV_ROUND_UP(cycle_time, clock_time); @@ -607,11 +611,40 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio) display_clocks(index); } +#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ + +static void cmd640_init_dev(ide_drive_t *drive) +{ + unsigned int i = drive->hwif->channel * 2 + drive->select.b.unit; + +#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED + /* + * Reset timing to the slowest speed and turn off prefetch. + * This way, the drive identify code has a better chance. + */ + setup_counts[i] = 4; /* max possible */ + active_counts[i] = 16; /* max possible */ + recovery_counts[i] = 16; /* max possible */ + program_drive_counts(drive, i); + set_prefetch_mode(drive, i, 0); + printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch cleared\n", i); +#else + /* + * Set the drive unmask flags to match the prefetch setting. + */ + check_prefetch(drive, i); + printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch(%s) preserved\n", + i, drive->no_io_32bit ? "off" : "on"); +#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ +} + static const struct ide_port_ops cmd640_port_ops = { + .init_dev = cmd640_init_dev, +#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED .set_pio_mode = cmd640_set_pio_mode, +#endif }; -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ static int pci_conf1(void) { @@ -654,10 +687,8 @@ static const struct ide_port_info cmd640_port_info __initdata = { IDE_HFLAG_NO_DMA | IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_ABUSE_FAST_DEVSEL, -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED .port_ops = &cmd640_port_ops, .pio_mask = ATA_PIO5, -#endif }; static int cmd640x_init_one(unsigned long base, unsigned long ctl) @@ -683,12 +714,8 @@ static int cmd640x_init_one(unsigned long base, unsigned long ctl) */ static int __init cmd640x_init(void) { -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED - int second_port_toggled = 0; -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ int second_port_cmd640 = 0, rc; const char *bus_type, *port2; - unsigned int index; u8 b, cfr; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; hw_regs_t hw[2]; @@ -774,88 +801,44 @@ static int __init cmd640x_init(void) put_cmd640_reg(CMDTIM, 0); put_cmd640_reg(BRST, 0x40); - cmd_hwif1 = ide_find_port(); + b = get_cmd640_reg(CNTRL); /* * Try to enable the secondary interface, if not already enabled */ - if (cmd_hwif1 && - cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) { - port2 = "not probed"; + if (secondary_port_responding()) { + if ((b & CNTRL_ENA_2ND)) { + second_port_cmd640 = 1; + port2 = "okay"; + } else if (cmd640_vlb) { + second_port_cmd640 = 1; + port2 = "alive"; + } else + port2 = "not cmd640"; } else { - b = get_cmd640_reg(CNTRL); + put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */ if (secondary_port_responding()) { - if ((b & CNTRL_ENA_2ND)) { - second_port_cmd640 = 1; - port2 = "okay"; - } else if (cmd640_vlb) { - second_port_cmd640 = 1; - port2 = "alive"; - } else - port2 = "not cmd640"; + second_port_cmd640 = 1; + port2 = "enabled"; } else { - put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */ - if (secondary_port_responding()) { - second_port_cmd640 = 1; -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED - second_port_toggled = 1; -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ - port2 = "enabled"; - } else { - put_cmd640_reg(CNTRL, b); /* restore original setting */ - port2 = "not responding"; - } + put_cmd640_reg(CNTRL, b); /* restore original setting */ + port2 = "not responding"; } } /* * Initialize data for secondary cmd640 port, if enabled */ - if (second_port_cmd640 && cmd_hwif1) { - ide_init_port_hw(cmd_hwif1, &hw[1]); - idx[1] = cmd_hwif1->index; + if (second_port_cmd640) { + cmd_hwif1 = ide_find_port(); + if (cmd_hwif1) { + ide_init_port_hw(cmd_hwif1, &hw[1]); + idx[1] = cmd_hwif1->index; + } } printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", second_port_cmd640 ? "" : "not ", port2); - /* - * Establish initial timings/prefetch for all drives. - * Do not unnecessarily disturb any prior BIOS setup of these. - */ - for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) { - ide_drive_t *drive; - - if (index > 1) { - if (cmd_hwif1 == NULL) - continue; - drive = &cmd_hwif1->drives[index & 1]; - } else { - if (cmd_hwif0 == NULL) - continue; - drive = &cmd_hwif0->drives[index & 1]; - } - -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED - /* - * Reset timing to the slowest speed and turn off prefetch. - * This way, the drive identify code has a better chance. - */ - setup_counts [index] = 4; /* max possible */ - active_counts [index] = 16; /* max possible */ - recovery_counts [index] = 16; /* max possible */ - program_drive_counts(drive, index); - set_prefetch_mode(drive, index, 0); - printk("cmd640: drive%d timings/prefetch cleared\n", index); -#else - /* - * Set the drive unmask flags to match the prefetch setting - */ - check_prefetch(drive, index); - printk("cmd640: drive%d timings/prefetch(%s) preserved\n", - index, drive->no_io_32bit ? "off" : "on"); -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ - } - #ifdef CMD640_DUMP_REGS cmd640_dump_regs(); #endif diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index ca4774aa27e..cfa784bacf4 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c @@ -116,6 +116,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = to_pci_dev(hwif->dev); + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; u8 setup_count, arttim = 0; @@ -124,10 +125,9 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) cycle_time = ide_pio_cycle_time(drive, pio); - program_cycle_times(drive, cycle_time, - ide_pio_timings[pio].active_time); + program_cycle_times(drive, cycle_time, t->active); - setup_count = quantize_timing(ide_pio_timings[pio].setup_time, + setup_count = quantize_timing(t->setup, 1000 / (ide_pci_clk ? ide_pci_clk : 33)); /* diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c index 99fe91a191b..dc97c48623f 100644 --- a/drivers/ide/pci/cs5535.c +++ b/drivers/ide/pci/cs5535.c @@ -26,8 +26,6 @@ #include <linux/pci.h> #include <linux/ide.h> -#include "ide-timing.h" - #define MSR_ATAC_BASE 0x51300000 #define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0) #define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01) @@ -75,13 +73,11 @@ static unsigned int cs5535_udma_timings[5] = */ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed) { - u32 reg = 0, dummy; int unit = drive->select.b.unit; - /* Set the PIO timings */ - if ((speed & XFER_MODE) == XFER_PIO) { + if (speed < XFER_SW_DMA_0) { ide_drive_t *pair = ide_get_paired_drive(drive); u8 cmd, pioa; diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c index 8c534afcb6c..e14ad5530fa 100644 --- a/drivers/ide/pci/cy82c693.c +++ b/drivers/ide/pci/cy82c693.c @@ -133,6 +133,7 @@ static int calc_clk(int time, int bus_speed) */ static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) { + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); int clk1, clk2; int bus_speed = ide_pci_clk ? ide_pci_clk : 33; @@ -141,15 +142,13 @@ static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) */ /* let's calc the address setup time clocks */ - p_pclk->address_time = (u8)calc_clk(ide_pio_timings[pio].setup_time, bus_speed); + p_pclk->address_time = (u8)calc_clk(t->setup, bus_speed); /* let's calc the active and recovery time clocks */ - clk1 = calc_clk(ide_pio_timings[pio].active_time, bus_speed); + clk1 = calc_clk(t->active, bus_speed); /* calc recovery timing */ - clk2 = ide_pio_timings[pio].cycle_time - - ide_pio_timings[pio].active_time - - ide_pio_timings[pio].setup_time; + clk2 = t->cycle - t->active - t->setup; clk2 = calc_clk(clk2, bus_speed); diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index af0f30051d5..0106e2a2df7 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c @@ -93,7 +93,6 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) i = hwif->index; - ide_init_port_data(hwif, i); ide_init_port_hw(hwif, &hw); idx[0] = i; diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 6ab04115286..cbf64720299 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c @@ -512,8 +512,14 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive) } static struct ide_dma_ops it821x_pass_through_dma_ops = { + .dma_host_set = ide_dma_host_set, + .dma_setup = ide_dma_setup, + .dma_exec_cmd = ide_dma_exec_cmd, .dma_start = it821x_dma_start, .dma_end = it821x_dma_end, + .dma_test_irq = ide_dma_test_irq, + .dma_timeout = ide_dma_timeout, + .dma_lost_irq = ide_dma_lost_irq, }; /** diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index 1584ebb6a18..789c66dfbde 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c @@ -558,12 +558,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; int i; - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR "%s: too many IDE interfaces, " - "no room in table\n", SCC_PATA_NAME); + hwif = ide_find_port_slot(d); + if (hwif == NULL) return -ENOMEM; - } memset(&hw, 0, sizeof(hw)); for (i = 0; i <= 8; i++) @@ -572,7 +569,6 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, hw.dev = &dev->dev; hw.chipset = ide_pci; ide_init_port_hw(hwif, &hw); - hwif->dev = &dev->dev; idx[0] = hwif->index; diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 24513e3dcd6..c79ff5b4108 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c @@ -568,6 +568,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = { }; static const struct ide_port_info sgiioc4_port_info __devinitdata = { + .name = DRV_NAME, .chipset = ide_pci, .init_dma = ide_dma_sgiioc4, .port_ops = &sgiioc4_port_ops, @@ -587,13 +588,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) hw_regs_t hw; struct ide_port_info d = sgiioc4_port_info; - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", - DRV_NAME); - return -ENOMEM; - } - /* Get the CmdBlk and CtrlBlk Base Registers */ bar0 = pci_resource_start(dev, 0); virt_base = ioremap(bar0, pci_resource_len(dev, 0)); @@ -608,11 +602,11 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) cmd_phys_base = bar0 + IOC4_CMD_OFFSET; if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, - hwif->name)) { + DRV_NAME)) { printk(KERN_ERR "%s : %s -- ERROR, Addresses " "0x%p to 0x%p ALREADY in use\n", - __func__, hwif->name, (void *) cmd_phys_base, + __func__, DRV_NAME, (void *) cmd_phys_base, (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); return -ENOMEM; } @@ -623,9 +617,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) hw.irq = dev->irq; hw.chipset = ide_pci; hw.dev = &dev->dev; - ide_init_port_hw(hwif, &hw); - hwif->dev = &dev->dev; + hwif = ide_find_port_slot(&d); + if (hwif == NULL) + goto err; + + ide_init_port_hw(hwif, &hw); /* The IOC4 uses MMIO rather than Port IO. */ default_hwif_mmiops(hwif); @@ -641,6 +638,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) return -EIO; return 0; +err: + release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); + iounmap(virt_base); + return -ENOMEM; } static unsigned int __devinit diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index b75e9bb390a..6e9d7655d89 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c @@ -421,8 +421,7 @@ static int sil_sata_reset_poll(ide_drive_t *drive) if ((sata_stat & 0x03) != 0x03) { printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", hwif->name, sata_stat); - HWGROUP(drive)->polling = 0; - return ide_started; + return -ENXIO; } } diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index e127eb25ab6..2389945ca95 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -52,8 +52,6 @@ #include <linux/init.h> #include <linux/ide.h> -#include "ide-timing.h" - /* registers layout and init values are chipset family dependant */ #define ATA_16 0x01 @@ -616,7 +614,6 @@ MODULE_LICENSE("GPL"); /* * TODO: * - CLEANUP - * - Use drivers/ide/ide-timing.h ! * - More checks in the config registers (force values instead of * relying on the BIOS setting them correctly). * - Further optimisations ? diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c index ce84fa045d3..6efbde29717 100644 --- a/drivers/ide/pci/sl82c105.c +++ b/drivers/ide/pci/sl82c105.c @@ -47,10 +47,11 @@ */ static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio) { + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cmd_on, cmd_off; u8 iordy = 0; - cmd_on = (ide_pio_timings[pio].active_time + 29) / 30; + cmd_on = (t->active + 29) / 30; cmd_off = (ide_pio_cycle_time(drive, pio) - 30 * cmd_on + 29) / 30; if (cmd_on == 0) diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index 3ed9728abd2..e47384c70c4 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c @@ -35,8 +35,6 @@ #include <asm/processor.h> #endif -#include "ide-timing.h" - #define VIA_IDE_ENABLE 0x40 #define VIA_IDE_CONFIG 0x41 #define VIA_FIFO_CONFIG 0x43 diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile index 65af5848b28..74e52adcdf4 100644 --- a/drivers/ide/ppc/Makefile +++ b/drivers/ide/ppc/Makefile @@ -1,3 +1,2 @@ obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o -obj-$(CONFIG_BLK_DEV_MPC8xx_IDE) += mpc8xx.o diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c deleted file mode 100644 index 236f9c38e51..00000000000 --- a/drivers/ide/ppc/mpc8xx.c +++ /dev/null @@ -1,851 +0,0 @@ -/* - * Copyright (C) 2000, 2001 Wolfgang Denk, wd@denx.de - * Modified for direct IDE interface - * by Thomas Lange, thomas@corelatus.com - * Modified for direct IDE interface on 8xx without using the PCMCIA - * controller - * by Steven.Scholz@imc-berlin.de - * Moved out of arch/ppc/kernel/m8xx_setup.c, other minor cleanups - * by Mathew Locke <mattl@mvista.com> - */ - -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/user.h> -#include <linux/tty.h> -#include <linux/major.h> -#include <linux/interrupt.h> -#include <linux/reboot.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/ide.h> -#include <linux/bootmem.h> - -#include <asm/mpc8xx.h> -#include <asm/mmu.h> -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/pgtable.h> -#include <asm/ide.h> -#include <asm/8xx_immap.h> -#include <asm/machdep.h> -#include <asm/irq.h> - -#define DRV_NAME "ide-mpc8xx" - -static int identify (volatile u8 *p); -static void print_fixed (volatile u8 *p); -static void print_funcid (int func); -static int check_ide_device (unsigned long base); - -static void ide_interrupt_ack (void *dev); -static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio); - -typedef struct ide_ioport_desc { - unsigned long base_off; /* Offset to PCMCIA memory */ - unsigned long reg_off[IDE_NR_PORTS]; /* controller register offsets */ - int irq; /* IRQ */ -} ide_ioport_desc_t; - -ide_ioport_desc_t ioport_dsc[MAX_HWIFS] = { -#ifdef IDE0_BASE_OFFSET - { IDE0_BASE_OFFSET, - { - IDE0_DATA_REG_OFFSET, - IDE0_ERROR_REG_OFFSET, - IDE0_NSECTOR_REG_OFFSET, - IDE0_SECTOR_REG_OFFSET, - IDE0_LCYL_REG_OFFSET, - IDE0_HCYL_REG_OFFSET, - IDE0_SELECT_REG_OFFSET, - IDE0_STATUS_REG_OFFSET, - IDE0_CONTROL_REG_OFFSET, - IDE0_IRQ_REG_OFFSET, - }, - IDE0_INTERRUPT, - }, -#ifdef IDE1_BASE_OFFSET - { IDE1_BASE_OFFSET, - { - IDE1_DATA_REG_OFFSET, - IDE1_ERROR_REG_OFFSET, - IDE1_NSECTOR_REG_OFFSET, - IDE1_SECTOR_REG_OFFSET, - IDE1_LCYL_REG_OFFSET, - IDE1_HCYL_REG_OFFSET, - IDE1_SELECT_REG_OFFSET, - IDE1_STATUS_REG_OFFSET, - IDE1_CONTROL_REG_OFFSET, - IDE1_IRQ_REG_OFFSET, - }, - IDE1_INTERRUPT, - }, -#endif /* IDE1_BASE_OFFSET */ -#endif /* IDE0_BASE_OFFSET */ -}; - -ide_pio_timings_t ide_pio_clocks[6]; -int hold_time[6] = {30, 20, 15, 10, 10, 10 }; /* PIO Mode 5 with IORDY (nonstandard) */ - -/* - * Warning: only 1 (ONE) PCMCIA slot supported here, - * which must be correctly initialized by the firmware (PPCBoot). - */ -static int _slot_ = -1; /* will be read from PCMCIA registers */ - -/* Make clock cycles and always round up */ -#define PCMCIA_MK_CLKS( t, T ) (( (t) * ((T)/1000000) + 999U ) / 1000U ) - -#define M8XX_PCMCIA_CD2(slot) (0x10000000 >> (slot << 4)) -#define M8XX_PCMCIA_CD1(slot) (0x08000000 >> (slot << 4)) - -/* - * The TQM850L hardware has two pins swapped! Grrrrgh! - */ -#ifdef CONFIG_TQM850L -#define __MY_PCMCIA_GCRX_CXRESET PCMCIA_GCRX_CXOE -#define __MY_PCMCIA_GCRX_CXOE PCMCIA_GCRX_CXRESET -#else -#define __MY_PCMCIA_GCRX_CXRESET PCMCIA_GCRX_CXRESET -#define __MY_PCMCIA_GCRX_CXOE PCMCIA_GCRX_CXOE -#endif - -#if defined(CONFIG_BLK_DEV_MPC8xx_IDE) && defined(CONFIG_IDE_8xx_PCCARD) -#define PCMCIA_SCHLVL IDE0_INTERRUPT /* Status Change Interrupt Level */ -static int pcmcia_schlvl = PCMCIA_SCHLVL; -#endif - -/* - * See include/linux/ide.h for definition of hw_regs_t (p, base) - */ - -/* - * m8xx_ide_init_ports() for a direct IDE interface _using_ - * MPC8xx's internal PCMCIA interface - */ -#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) -static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) -{ - unsigned long *p = hw->io_ports_array; - int i; - - typedef struct { - ulong br; - ulong or; - } pcmcia_win_t; - volatile pcmcia_win_t *win; - volatile pcmconf8xx_t *pcmp; - - uint *pgcrx; - u32 pcmcia_phy_base; - u32 pcmcia_phy_end; - static unsigned long pcmcia_base = 0; - unsigned long base; - - *p = 0; - - pcmp = (pcmconf8xx_t *)(&(((immap_t *)IMAP_ADDR)->im_pcmcia)); - - if (!pcmcia_base) { - /* - * Read out PCMCIA registers. Since the reset values - * are undefined, we sure hope that they have been - * set up by firmware - */ - - /* Scan all registers for valid settings */ - pcmcia_phy_base = 0xFFFFFFFF; - pcmcia_phy_end = 0; - /* br0 is start of brX and orX regs */ - win = (pcmcia_win_t *) \ - (&(((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0)); - for (i = 0; i < 8; i++) { - if (win->or & 1) { /* This bank is marked as valid */ - if (win->br < pcmcia_phy_base) { - pcmcia_phy_base = win->br; - } - if ((win->br + PCMCIA_MEM_SIZE) > pcmcia_phy_end) { - pcmcia_phy_end = win->br + PCMCIA_MEM_SIZE; - } - /* Check which slot that has been defined */ - _slot_ = (win->or >> 2) & 1; - - } /* Valid bank */ - win++; - } /* for */ - - printk ("PCMCIA slot %c: phys mem %08x...%08x (size %08x)\n", - 'A' + _slot_, - pcmcia_phy_base, pcmcia_phy_end, - pcmcia_phy_end - pcmcia_phy_base); - - if (!request_mem_region(pcmcia_phy_base, - pcmcia_phy_end - pcmcia_phy_base, - DRV_NAME)) { - printk(KERN_ERR "%s: resources busy\n", DRV_NAME); - return -EBUSY; - } - - pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base, - pcmcia_phy_end-pcmcia_phy_base); - -#ifdef DEBUG - printk ("PCMCIA virt base: %08lx\n", pcmcia_base); -#endif - /* Compute clock cycles for PIO timings */ - for (i=0; i<6; ++i) { - bd_t *binfo = (bd_t *)__res; - - hold_time[i] = - PCMCIA_MK_CLKS (hold_time[i], - binfo->bi_busfreq); - ide_pio_clocks[i].setup_time = - PCMCIA_MK_CLKS (ide_pio_timings[i].setup_time, - binfo->bi_busfreq); - ide_pio_clocks[i].active_time = - PCMCIA_MK_CLKS (ide_pio_timings[i].active_time, - binfo->bi_busfreq); - ide_pio_clocks[i].cycle_time = - PCMCIA_MK_CLKS (ide_pio_timings[i].cycle_time, - binfo->bi_busfreq); -#if 0 - printk ("PIO mode %d timings: %d/%d/%d => %d/%d/%d\n", - i, - ide_pio_clocks[i].setup_time, - ide_pio_clocks[i].active_time, - ide_pio_clocks[i].hold_time, - ide_pio_clocks[i].cycle_time, - ide_pio_timings[i].setup_time, - ide_pio_timings[i].active_time, - ide_pio_timings[i].hold_time, - ide_pio_timings[i].cycle_time); -#endif - } - } - - if (_slot_ == -1) { - printk ("PCMCIA slot has not been defined! Using A as default\n"); - _slot_ = 0; - } - -#ifdef CONFIG_IDE_8xx_PCCARD - -#ifdef DEBUG - printk ("PIPR = 0x%08X slot %c ==> mask = 0x%X\n", - pcmp->pcmc_pipr, - 'A' + _slot_, - M8XX_PCMCIA_CD1(_slot_) | M8XX_PCMCIA_CD2(_slot_) ); -#endif /* DEBUG */ - - if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) { - printk ("No card in slot %c: PIPR=%08x\n", - 'A' + _slot_, (u32) pcmp->pcmc_pipr); - return -ENODEV; /* No card in slot */ - } - - check_ide_device (pcmcia_base); - -#endif /* CONFIG_IDE_8xx_PCCARD */ - - base = pcmcia_base + ioport_dsc[data_port].base_off; -#ifdef DEBUG - printk ("base: %08x + %08x = %08x\n", - pcmcia_base, ioport_dsc[data_port].base_off, base); -#endif - - for (i = 0; i < IDE_NR_PORTS; ++i) { -#ifdef DEBUG - printk ("port[%d]: %08x + %08x = %08x\n", - i, - base, - ioport_dsc[data_port].reg_off[i], - i, base + ioport_dsc[data_port].reg_off[i]); -#endif - *p++ = base + ioport_dsc[data_port].reg_off[i]; - } - - hw->irq = ioport_dsc[data_port].irq; - hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack; - -#ifdef CONFIG_IDE_8xx_PCCARD - { - unsigned int reg; - - if (_slot_) - pgcrx = &((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pgcrb; - else - pgcrx = &((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pgcra; - - reg = *pgcrx; - reg |= mk_int_int_mask (pcmcia_schlvl) << 24; - reg |= mk_int_int_mask (pcmcia_schlvl) << 16; - *pgcrx = reg; - } -#endif /* CONFIG_IDE_8xx_PCCARD */ - - /* Enable Harddisk Interrupt, - * and make it edge sensitive - */ - /* (11-18) Set edge detect for irq, no wakeup from low power mode */ - ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel |= - (0x80000000 >> ioport_dsc[data_port].irq); - -#ifdef CONFIG_IDE_8xx_PCCARD - /* Make sure we don't get garbage irq */ - ((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pscr = 0xFFFF; - - /* Enable falling edge irq */ - pcmp->pcmc_per = 0x100000 >> (16 * _slot_); -#endif /* CONFIG_IDE_8xx_PCCARD */ - - hw->chipset = ide_generic; - - return 0; -} -#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */ - -/* - * m8xx_ide_init_ports() for a direct IDE interface _not_ using - * MPC8xx's internal PCMCIA interface - */ -#if defined(CONFIG_IDE_EXT_DIRECT) -static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) -{ - unsigned long *p = hw->io_ports_array; - int i; - - u32 ide_phy_base; - u32 ide_phy_end; - static unsigned long ide_base = 0; - unsigned long base; - - *p = 0; - - if (!ide_base) { - - /* TODO: - * - add code to read ORx, BRx - */ - ide_phy_base = CFG_ATA_BASE_ADDR; - ide_phy_end = CFG_ATA_BASE_ADDR + 0x200; - - printk ("IDE phys mem : %08x...%08x (size %08x)\n", - ide_phy_base, ide_phy_end, - ide_phy_end - ide_phy_base); - - if (!request_mem_region(ide_phy_base, 0x200, DRV_NAME)) { - printk(KERN_ERR "%s: resources busy\n", DRV_NAME); - return -EBUSY; - } - - ide_base=(unsigned long)ioremap(ide_phy_base, - ide_phy_end-ide_phy_base); - -#ifdef DEBUG - printk ("IDE virt base: %08lx\n", ide_base); -#endif - } - - base = ide_base + ioport_dsc[data_port].base_off; -#ifdef DEBUG - printk ("base: %08x + %08x = %08x\n", - ide_base, ioport_dsc[data_port].base_off, base); -#endif - - for (i = 0; i < IDE_NR_PORTS; ++i) { -#ifdef DEBUG - printk ("port[%d]: %08x + %08x = %08x\n", - i, - base, - ioport_dsc[data_port].reg_off[i], - i, base + ioport_dsc[data_port].reg_off[i]); -#endif - *p++ = base + ioport_dsc[data_port].reg_off[i]; - } - - /* direct connected IDE drive, i.e. external IRQ */ - hw->irq = ioport_dsc[data_port].irq; - hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack; - - /* Enable Harddisk Interrupt, - * and make it edge sensitive - */ - /* (11-18) Set edge detect for irq, no wakeup from low power mode */ - ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |= - (0x80000000 >> ioport_dsc[data_port].irq); - - hw->chipset = ide_generic; - - return 0; -} -#endif /* CONFIG_IDE_8xx_DIRECT */ - - -/* -------------------------------------------------------------------- */ - - -/* PCMCIA Timing */ -#ifndef PCMCIA_SHT -#define PCMCIA_SHT(t) ((t & 0x0F)<<16) /* Strobe Hold Time */ -#define PCMCIA_SST(t) ((t & 0x0F)<<12) /* Strobe Setup Time */ -#define PCMCIA_SL(t) ((t==32) ? 0 : ((t & 0x1F)<<7)) /* Strobe Length */ -#endif - -/* Calculate PIO timings */ -static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) -{ -#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) - volatile pcmconf8xx_t *pcmp; - ulong timing, mask, reg; - - pcmp = (pcmconf8xx_t *)(&(((immap_t *)IMAP_ADDR)->im_pcmcia)); - - mask = ~(PCMCIA_SHT(0xFF) | PCMCIA_SST(0xFF) | PCMCIA_SL(0xFF)); - - timing = PCMCIA_SHT(hold_time[pio] ) - | PCMCIA_SST(ide_pio_clocks[pio].setup_time ) - | PCMCIA_SL (ide_pio_clocks[pio].active_time) - ; - -#if 1 - printk ("Setting timing bits 0x%08lx in PCMCIA controller\n", timing); -#endif - if ((reg = pcmp->pcmc_por0 & mask) != 0) - pcmp->pcmc_por0 = reg | timing; - - if ((reg = pcmp->pcmc_por1 & mask) != 0) - pcmp->pcmc_por1 = reg | timing; - - if ((reg = pcmp->pcmc_por2 & mask) != 0) - pcmp->pcmc_por2 = reg | timing; - - if ((reg = pcmp->pcmc_por3 & mask) != 0) - pcmp->pcmc_por3 = reg | timing; - - if ((reg = pcmp->pcmc_por4 & mask) != 0) - pcmp->pcmc_por4 = reg | timing; - - if ((reg = pcmp->pcmc_por5 & mask) != 0) - pcmp->pcmc_por5 = reg | timing; - - if ((reg = pcmp->pcmc_por6 & mask) != 0) - pcmp->pcmc_por6 = reg | timing; - - if ((reg = pcmp->pcmc_por7 & mask) != 0) - pcmp->pcmc_por7 = reg | timing; - -#elif defined(CONFIG_IDE_EXT_DIRECT) - - printk("%s[%d] %s: not implemented yet!\n", - __FILE__, __LINE__, __func__); -#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */ -} - -static const struct ide_port_ops m8xx_port_ops = { - .set_pio_mode = m8xx_ide_set_pio_mode, -}; - -static void -ide_interrupt_ack (void *dev) -{ -#ifdef CONFIG_IDE_8xx_PCCARD - u_int pscr, pipr; - -#if (PCMCIA_SOCKETS_NO == 2) - u_int _slot_; -#endif - - /* get interrupt sources */ - - pscr = ((volatile immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr; - pipr = ((volatile immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr; - - /* - * report only if both card detect signals are the same - * not too nice done, - * we depend on that CD2 is the bit to the left of CD1... - */ - - if(_slot_==-1){ - printk("PCMCIA slot has not been defined! Using A as default\n"); - _slot_=0; - } - - if(((pipr & M8XX_PCMCIA_CD2(_slot_)) >> 1) ^ - (pipr & M8XX_PCMCIA_CD1(_slot_)) ) { - printk ("card detect interrupt\n"); - } - /* clear the interrupt sources */ - ((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr = pscr; - -#else /* ! CONFIG_IDE_8xx_PCCARD */ - /* - * Only CONFIG_IDE_8xx_PCCARD is using the interrupt of the - * MPC8xx's PCMCIA controller, so there is nothing to be done here - * for CONFIG_IDE_8xx_DIRECT and CONFIG_IDE_EXT_DIRECT. - * The interrupt is handled somewhere else. -- Steven - */ -#endif /* CONFIG_IDE_8xx_PCCARD */ -} - - - -/* - * CIS Tupel codes - */ -#define CISTPL_NULL 0x00 -#define CISTPL_DEVICE 0x01 -#define CISTPL_LONGLINK_CB 0x02 -#define CISTPL_INDIRECT 0x03 -#define CISTPL_CONFIG_CB 0x04 -#define CISTPL_CFTABLE_ENTRY_CB 0x05 -#define CISTPL_LONGLINK_MFC 0x06 -#define CISTPL_BAR 0x07 -#define CISTPL_PWR_MGMNT 0x08 -#define CISTPL_EXTDEVICE 0x09 -#define CISTPL_CHECKSUM 0x10 -#define CISTPL_LONGLINK_A 0x11 -#define CISTPL_LONGLINK_C 0x12 -#define CISTPL_LINKTARGET 0x13 -#define CISTPL_NO_LINK 0x14 -#define CISTPL_VERS_1 0x15 -#define CISTPL_ALTSTR 0x16 -#define CISTPL_DEVICE_A 0x17 -#define CISTPL_JEDEC_C 0x18 -#define CISTPL_JEDEC_A 0x19 -#define CISTPL_CONFIG 0x1a -#define CISTPL_CFTABLE_ENTRY 0x1b -#define CISTPL_DEVICE_OC 0x1c -#define CISTPL_DEVICE_OA 0x1d -#define CISTPL_DEVICE_GEO 0x1e -#define CISTPL_DEVICE_GEO_A 0x1f -#define CISTPL_MANFID 0x20 -#define CISTPL_FUNCID 0x21 -#define CISTPL_FUNCE 0x22 -#define CISTPL_SWIL 0x23 -#define CISTPL_END 0xff - -/* - * CIS Function ID codes - */ -#define CISTPL_FUNCID_MULTI 0x00 -#define CISTPL_FUNCID_MEMORY 0x01 -#define CISTPL_FUNCID_SERIAL 0x02 -#define CISTPL_FUNCID_PARALLEL 0x03 -#define CISTPL_FUNCID_FIXED 0x04 -#define CISTPL_FUNCID_VIDEO 0x05 -#define CISTPL_FUNCID_NETWORK 0x06 -#define CISTPL_FUNCID_AIMS 0x07 -#define CISTPL_FUNCID_SCSI 0x08 - -/* - * Fixed Disk FUNCE codes - */ -#define CISTPL_IDE_INTERFACE 0x01 - -#define CISTPL_FUNCE_IDE_IFACE 0x01 -#define CISTPL_FUNCE_IDE_MASTER 0x02 -#define CISTPL_FUNCE_IDE_SLAVE 0x03 - -/* First feature byte */ -#define CISTPL_IDE_SILICON 0x04 -#define CISTPL_IDE_UNIQUE 0x08 -#define CISTPL_IDE_DUAL 0x10 - -/* Second feature byte */ -#define CISTPL_IDE_HAS_SLEEP 0x01 -#define CISTPL_IDE_HAS_STANDBY 0x02 -#define CISTPL_IDE_HAS_IDLE 0x04 -#define CISTPL_IDE_LOW_POWER 0x08 -#define CISTPL_IDE_REG_INHIBIT 0x10 -#define CISTPL_IDE_HAS_INDEX 0x20 -#define CISTPL_IDE_IOIS16 0x40 - - -/* -------------------------------------------------------------------- */ - - -#define MAX_TUPEL_SZ 512 -#define MAX_FEATURES 4 - -static int check_ide_device (unsigned long base) -{ - volatile u8 *ident = NULL; - volatile u8 *feature_p[MAX_FEATURES]; - volatile u8 *p, *start; - int n_features = 0; - u8 func_id = ~0; - u8 code, len; - unsigned short config_base = 0; - int found = 0; - int i; - -#ifdef DEBUG - printk ("PCMCIA MEM: %08lX\n", base); -#endif - start = p = (volatile u8 *) base; - - while ((p - start) < MAX_TUPEL_SZ) { - - code = *p; p += 2; - - if (code == 0xFF) { /* End of chain */ - break; - } - - len = *p; p += 2; -#ifdef DEBUG_PCMCIA - { volatile u8 *q = p; - printk ("\nTuple code %02x length %d\n\tData:", - code, len); - - for (i = 0; i < len; ++i) { - printk (" %02x", *q); - q+= 2; - } - } -#endif /* DEBUG_PCMCIA */ - switch (code) { - case CISTPL_VERS_1: - ident = p + 4; - break; - case CISTPL_FUNCID: - func_id = *p; - break; - case CISTPL_FUNCE: - if (n_features < MAX_FEATURES) - feature_p[n_features++] = p; - break; - case CISTPL_CONFIG: - config_base = (*(p+6) << 8) + (*(p+4)); - default: - break; - } - p += 2 * len; - } - - found = identify (ident); - - if (func_id != ((u8)~0)) { - print_funcid (func_id); - - if (func_id == CISTPL_FUNCID_FIXED) - found = 1; - else - return (1); /* no disk drive */ - } - - for (i=0; i<n_features; ++i) { - print_fixed (feature_p[i]); - } - - if (!found) { - printk ("unknown card type\n"); - return (1); - } - - /* set level mode irq and I/O mapped device in config reg*/ - *((u8 *)(base + config_base)) = 0x41; - - return (0); -} - -/* ------------------------------------------------------------------------- */ - -static void print_funcid (int func) -{ - switch (func) { - case CISTPL_FUNCID_MULTI: - printk (" Multi-Function"); - break; - case CISTPL_FUNCID_MEMORY: - printk (" Memory"); - break; - case CISTPL_FUNCID_SERIAL: - printk (" Serial Port"); - break; - case CISTPL_FUNCID_PARALLEL: - printk (" Parallel Port"); - break; - case CISTPL_FUNCID_FIXED: - printk (" Fixed Disk"); - break; - case CISTPL_FUNCID_VIDEO: - printk (" Video Adapter"); - break; - case CISTPL_FUNCID_NETWORK: - printk (" Network Adapter"); - break; - case CISTPL_FUNCID_AIMS: - printk (" AIMS Card"); - break; - case CISTPL_FUNCID_SCSI: - printk (" SCSI Adapter"); - break; - default: - printk (" Unknown"); - break; - } - printk (" Card\n"); -} - -/* ------------------------------------------------------------------------- */ - -static void print_fixed (volatile u8 *p) -{ - if (p == NULL) - return; - - switch (*p) { - case CISTPL_FUNCE_IDE_IFACE: - { u8 iface = *(p+2); - - printk ((iface == CISTPL_IDE_INTERFACE) ? " IDE" : " unknown"); - printk (" interface "); - break; - } - case CISTPL_FUNCE_IDE_MASTER: - case CISTPL_FUNCE_IDE_SLAVE: - { u8 f1 = *(p+2); - u8 f2 = *(p+4); - - printk ((f1 & CISTPL_IDE_SILICON) ? " [silicon]" : " [rotating]"); - - if (f1 & CISTPL_IDE_UNIQUE) - printk (" [unique]"); - - printk ((f1 & CISTPL_IDE_DUAL) ? " [dual]" : " [single]"); - - if (f2 & CISTPL_IDE_HAS_SLEEP) - printk (" [sleep]"); - - if (f2 & CISTPL_IDE_HAS_STANDBY) - printk (" [standby]"); - - if (f2 & CISTPL_IDE_HAS_IDLE) - printk (" [idle]"); - - if (f2 & CISTPL_IDE_LOW_POWER) - printk (" [low power]"); - - if (f2 & CISTPL_IDE_REG_INHIBIT) - printk (" [reg inhibit]"); - - if (f2 & CISTPL_IDE_HAS_INDEX) - printk (" [index]"); - - if (f2 & CISTPL_IDE_IOIS16) - printk (" [IOis16]"); - - break; - } - } - printk ("\n"); -} - -/* ------------------------------------------------------------------------- */ - - -#define MAX_IDENT_CHARS 64 -#define MAX_IDENT_FIELDS 4 - -static u8 *known_cards[] = { - "ARGOSY PnPIDE D5", - NULL -}; - -static int identify (volatile u8 *p) -{ - u8 id_str[MAX_IDENT_CHARS]; - u8 data; - u8 *t; - u8 **card; - int i, done; - - if (p == NULL) - return (0); /* Don't know */ - - t = id_str; - done =0; - - for (i=0; i<=4 && !done; ++i, p+=2) { - while ((data = *p) != '\0') { - if (data == 0xFF) { - done = 1; - break; - } - *t++ = data; - if (t == &id_str[MAX_IDENT_CHARS-1]) { - done = 1; - break; - } - p += 2; - } - if (!done) - *t++ = ' '; - } - *t = '\0'; - while (--t > id_str) { - if (*t == ' ') - *t = '\0'; - else - break; - } - printk ("Card ID: %s\n", id_str); - - for (card=known_cards; *card; ++card) { - if (strcmp(*card, id_str) == 0) { /* found! */ - return (1); - } - } - - return (0); /* don't know */ -} - -static int __init mpc8xx_ide_probe(void) -{ - hw_regs_t hw; - u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; - -#ifdef IDE0_BASE_OFFSET - memset(&hw, 0, sizeof(hw)); - if (!m8xx_ide_init_ports(&hw, 0)) { - ide_hwif_t *hwif = ide_find_port(); - - if (hwif) { - ide_init_port_hw(hwif, &hw); - hwif->pio_mask = ATA_PIO4; - hwif->port_ops = &m8xx_port_ops; - - idx[0] = hwif->index; - } - } -#ifdef IDE1_BASE_OFFSET - memset(&hw, 0, sizeof(hw)); - if (!m8xx_ide_init_ports(&hw, 1)) { - ide_hwif_t *mate = ide_find_port(); - - if (mate) { - ide_init_port_hw(mate, &hw); - mate->pio_mask = ATA_PIO4; - mate->port_ops = &m8xx_port_ops; - - idx[1] = mate->index; - } - } -#endif -#endif - - ide_device_add(idx, NULL); - - return 0; -} - -module_init(mpc8xx_ide_probe); - -MODULE_LICENSE("GPL"); diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index dcb2c466bb9..93fb9067c04 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c @@ -5,7 +5,7 @@ * for doing DMA. * * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt - * Copyright (C) 2007 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -48,8 +48,6 @@ #include <asm/mediabay.h> #endif -#include "../ide-timing.h" - #undef IDE_PMAC_DEBUG #define DMA_WAIT_TIMEOUT 50 @@ -495,6 +493,7 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) static void pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) { + struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); u32 *timings, t; unsigned accessTicks, recTicks; unsigned accessTime, recTime; @@ -526,10 +525,9 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) } case controller_kl_ata4: /* 66Mhz cell */ - recTime = cycle_time - ide_pio_timings[pio].active_time - - ide_pio_timings[pio].setup_time; + recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); - accessTime = ide_pio_timings[pio].active_time; + accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); @@ -542,10 +540,9 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) default: { /* 33Mhz cell */ int ebit = 0; - recTime = cycle_time - ide_pio_timings[pio].active_time - - ide_pio_timings[pio].setup_time; + recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); - accessTime = ide_pio_timings[pio].active_time; + accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS(accessTime); accessTicks = min(accessTicks, 0x1fU); @@ -1151,8 +1148,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) base = ioremap(macio_resource_start(mdev, 0), 0x400); regbase = (unsigned long) base; - hwif->dev = &mdev->bus->pdev->dev; - pmif->mdev = mdev; pmif->node = mdev->ofdev.node; pmif->regbase = regbase; @@ -1174,7 +1169,8 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = irq; - hw.dev = &mdev->ofdev.dev; + hw.dev = &mdev->bus->pdev->dev; + hw.parent = &mdev->ofdev.dev; rc = pmac_ide_setup_device(pmif, hwif, &hw); if (rc != 0) { @@ -1274,7 +1270,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) goto out_free_pmif; } - hwif->dev = &pdev->dev; pmif->mdev = NULL; pmif->node = np; diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index abcfb1739d4..65fc08b6b6d 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -6,19 +6,15 @@ * May be copied or modified under the terms of the GNU General Public License */ -#include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ide.h> #include <linux/dma-mapping.h> #include <asm/io.h> -#include <asm/irq.h> /** * ide_setup_pci_baseregs - place a PCI IDE controller native @@ -319,25 +315,22 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, ctl = pci_resource_start(dev, 2*port+1); base = pci_resource_start(dev, 2*port); - if ((ctl && !base) || (base && !ctl)) { - printk(KERN_ERR "%s: inconsistent baseregs (BIOS) " - "for port %d, skipping\n", d->name, port); - return NULL; - } - } - if (!ctl) { + } else { /* Use default values */ ctl = port ? 0x374 : 0x3f4; base = port ? 0x170 : 0x1f0; } - hwif = ide_find_port_slot(d); - if (hwif == NULL) { - printk(KERN_ERR "%s: too many IDE interfaces, no room in " - "table\n", d->name); + if (!base || !ctl) { + printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n", + d->name, port); return NULL; } + hwif = ide_find_port_slot(d); + if (hwif == NULL) + return NULL; + memset(&hw, 0, sizeof(hw)); hw.irq = irq; hw.dev = &dev->dev; @@ -346,8 +339,6 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, ide_init_port_hw(hwif, &hw); - hwif->dev = &dev->dev; - return hwif; } diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 636af286230..1921b8dbb24 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -179,17 +179,29 @@ config FUJITSU_LAPTOP tristate "Fujitsu Laptop Extras" depends on X86 depends on ACPI + depends on INPUT depends on BACKLIGHT_CLASS_DEVICE ---help--- This is a driver for laptops built by Fujitsu: * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks * Possibly other Fujitsu laptop models + * Tested with S6410 and S7020 - It adds support for LCD brightness control. + It adds support for LCD brightness control and some hotkeys. If you have a Fujitsu laptop, say Y or M here. +config FUJITSU_LAPTOP_DEBUG + bool "Verbose debug mode for Fujitsu Laptop Extras" + depends on FUJITSU_LAPTOP + default n + ---help--- + Enables extra debug output from the fujitsu extras driver, at the + expense of a slight increase in driver size. + + If you are not sure, say N here. + config TC1100_WMI tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)" depends on X86 && !X86_64 @@ -219,6 +231,23 @@ config MSI_LAPTOP If you have an MSI S270 laptop, say Y or M here. +config COMPAL_LAPTOP + tristate "Compal Laptop Extras" + depends on X86 + depends on ACPI_EC + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is a driver for laptops built by Compal: + + Compal FL90/IFL90 + Compal FL91/IFL91 + Compal FL92/JFL92 + Compal FT00/IFT00 + + It adds support for Bluetooth, WLAN and LCD brightness control. + + If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here. + config SONY_LAPTOP tristate "Sony Laptop Extras" depends on X86 && ACPI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 1952875a272..a6dac6a2e7e 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -5,10 +5,11 @@ obj- := misc.o # Dummy rule to force built-in.o to be made obj-$(CONFIG_IBM_ASM) += ibmasm/ obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ -obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o -obj-$(CONFIG_ACER_WMI) += acer-wmi.o obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o +obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o +obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o +obj-$(CONFIG_ACER_WMI) += acer-wmi.o obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c index dd13a374992..e7a3fe508df 100644 --- a/drivers/misc/acer-wmi.c +++ b/drivers/misc/acer-wmi.c @@ -22,18 +22,18 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define ACER_WMI_VERSION "0.1" - #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/dmi.h> +#include <linux/fb.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/i8042.h> +#include <linux/debugfs.h> #include <acpi/acpi_drivers.h> @@ -87,6 +87,7 @@ struct acer_quirks { * Acer ACPI method GUIDs */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" +#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" #define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" @@ -150,6 +151,12 @@ struct acer_data { int brightness; }; +struct acer_debug { + struct dentry *root; + struct dentry *devices; + u32 wmid_devices; +}; + /* Each low-level interface must define at least some of the following */ struct wmi_interface { /* The WMI device type */ @@ -160,6 +167,9 @@ struct wmi_interface { /* Private data for the current interface */ struct acer_data data; + + /* debugfs entries associated with this interface */ + struct acer_debug debug; }; /* The static interface pointer, points to the currently detected interface */ @@ -174,7 +184,7 @@ static struct wmi_interface *interface; struct quirk_entry { u8 wireless; u8 mailled; - u8 brightness; + s8 brightness; u8 bluetooth; }; @@ -198,6 +208,10 @@ static int dmi_matched(const struct dmi_system_id *dmi) static struct quirk_entry quirk_unknown = { }; +static struct quirk_entry quirk_acer_aspire_1520 = { + .brightness = -1, +}; + static struct quirk_entry quirk_acer_travelmate_2490 = { .mailled = 1, }; @@ -207,9 +221,31 @@ static struct quirk_entry quirk_medion_md_98300 = { .wireless = 1, }; +static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { + .wireless = 2, +}; + static struct dmi_system_id acer_quirks[] = { { .callback = dmi_matched, + .ident = "Acer Aspire 1360", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), + }, + .driver_data = &quirk_acer_aspire_1520, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 1520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"), + }, + .driver_data = &quirk_acer_aspire_1520, + }, + { + .callback = dmi_matched, .ident = "Acer Aspire 3100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -300,6 +336,15 @@ static struct dmi_system_id acer_quirks[] = { }, { .callback = dmi_matched, + .ident = "Fujitsu Siemens Amilo Li 1718", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"), + }, + .driver_data = &quirk_fujitsu_amilo_li_1718, + }, + { + .callback = dmi_matched, .ident = "Medion MD 98300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), @@ -393,6 +438,12 @@ struct wmi_interface *iface) return AE_ERROR; *value = result & 0x1; return AE_OK; + case 2: + err = ec_read(0x71, &result); + if (err) + return AE_ERROR; + *value = result & 0x1; + return AE_OK; default: err = ec_read(0xA, &result); if (err) @@ -506,6 +557,15 @@ static acpi_status AMW0_set_capabilities(void) struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; + /* + * On laptops with this strange GUID (non Acer), normal probing doesn't + * work. + */ + if (wmi_has_guid(AMW0_GUID2)) { + interface->capability |= ACER_CAP_WIRELESS; + return AE_OK; + } + args.eax = ACER_AMW0_WRITE; args.ecx = args.edx = 0; @@ -552,7 +612,8 @@ static acpi_status AMW0_set_capabilities(void) * appear to use the same EC register for brightness, even if they * differ for wireless, etc */ - interface->capability |= ACER_CAP_BRIGHTNESS; + if (quirks->brightness >= 0) + interface->capability |= ACER_CAP_BRIGHTNESS; return AE_OK; } @@ -807,7 +868,15 @@ static int read_brightness(struct backlight_device *bd) static int update_bl_status(struct backlight_device *bd) { - set_u32(bd->props.brightness, ACER_CAP_BRIGHTNESS); + int intensity = bd->props.brightness; + + if (bd->props.power != FB_BLANK_UNBLANK) + intensity = 0; + if (bd->props.fb_blank != FB_BLANK_UNBLANK) + intensity = 0; + + set_u32(intensity, ACER_CAP_BRIGHTNESS); + return 0; } @@ -829,8 +898,9 @@ static int __devinit acer_backlight_init(struct device *dev) acer_backlight_device = bd; + bd->props.power = FB_BLANK_UNBLANK; + bd->props.brightness = max_brightness; bd->props.max_brightness = max_brightness; - bd->props.brightness = read_brightness(NULL); backlight_update_status(bd); return 0; } @@ -894,6 +964,28 @@ static DEVICE_ATTR(interface, S_IWUGO | S_IRUGO | S_IWUSR, show_interface, NULL); /* + * debugfs functions + */ +static u32 get_wmid_devices(void) +{ + struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *obj; + acpi_status status; + + status = wmi_query_block(WMID_GUID2, 1, &out); + if (ACPI_FAILURE(status)) + return 0; + + obj = (union acpi_object *) out.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(u32)) { + return *((u32 *) obj->buffer.pointer); + } else { + return 0; + } +} + +/* * Platform device */ static int __devinit acer_platform_probe(struct platform_device *device) @@ -1052,12 +1144,40 @@ error_sysfs: return retval; } +static void remove_debugfs(void) +{ + debugfs_remove(interface->debug.devices); + debugfs_remove(interface->debug.root); +} + +static int create_debugfs(void) +{ + interface->debug.root = debugfs_create_dir("acer-wmi", NULL); + if (!interface->debug.root) { + printk(ACER_ERR "Failed to create debugfs directory"); + return -ENOMEM; + } + + interface->debug.devices = debugfs_create_u32("devices", S_IRUGO, + interface->debug.root, + &interface->debug.wmid_devices); + if (!interface->debug.devices) + goto error_debugfs; + + return 0; + +error_debugfs: + remove_debugfs(); + return -ENOMEM; +} + static int __init acer_wmi_init(void) { int err; - printk(ACER_INFO "Acer Laptop ACPI-WMI Extras version %s\n", - ACER_WMI_VERSION); + printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); + + find_quirks(); /* * Detect which ACPI-WMI interface we're using. @@ -1092,8 +1212,6 @@ static int __init acer_wmi_init(void) if (wmi_has_guid(AMW0_GUID1)) AMW0_find_mailled(); - find_quirks(); - if (!interface) { printk(ACER_ERR "No or unsupported WMI interface, unable to " "load\n"); @@ -1111,6 +1229,13 @@ static int __init acer_wmi_init(void) if (err) return err; + if (wmi_has_guid(WMID_GUID2)) { + interface->debug.wmid_devices = get_wmid_devices(); + err = create_debugfs(); + if (err) + return err; + } + /* Override any initial settings with values from the commandline */ acer_commandline_init(); diff --git a/drivers/misc/compal-laptop.c b/drivers/misc/compal-laptop.c new file mode 100644 index 00000000000..344b790a625 --- /dev/null +++ b/drivers/misc/compal-laptop.c @@ -0,0 +1,404 @@ +/*-*-linux-c-*-*/ + +/* + Copyright (C) 2008 Cezary Jackiewicz <cezary.jackiewicz (at) gmail.com> + + based on MSI driver + + Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + */ + +/* + * comapl-laptop.c - Compal laptop support. + * + * This driver exports a few files in /sys/devices/platform/compal-laptop/: + * + * wlan - wlan subsystem state: contains 0 or 1 (rw) + * + * bluetooth - Bluetooth subsystem state: contains 0 or 1 (rw) + * + * raw - raw value taken from embedded controller register (ro) + * + * In addition to these platform device attributes the driver + * registers itself in the Linux backlight control subsystem and is + * available to userspace under /sys/class/backlight/compal-laptop/. + * + * This driver might work on other laptops produced by Compal. If you + * want to try it you can pass force=1 as argument to the module which + * will force it to load even when the DMI data doesn't identify the + * laptop as FL9x. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/backlight.h> +#include <linux/platform_device.h> +#include <linux/autoconf.h> + +#define COMPAL_DRIVER_VERSION "0.2.6" + +#define COMPAL_LCD_LEVEL_MAX 8 + +#define COMPAL_EC_COMMAND_WIRELESS 0xBB +#define COMPAL_EC_COMMAND_LCD_LEVEL 0xB9 + +#define KILLSWITCH_MASK 0x10 +#define WLAN_MASK 0x01 +#define BT_MASK 0x02 + +static int force; +module_param(force, bool, 0); +MODULE_PARM_DESC(force, "Force driver load, ignore DMI data"); + +/* Hardware access */ + +static int set_lcd_level(int level) +{ + if (level < 0 || level >= COMPAL_LCD_LEVEL_MAX) + return -EINVAL; + + ec_write(COMPAL_EC_COMMAND_LCD_LEVEL, level); + + return 0; +} + +static int get_lcd_level(void) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_LCD_LEVEL, &result); + + return (int) result; +} + +static int set_wlan_state(int state) +{ + u8 result, value; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if ((result & KILLSWITCH_MASK) == 0) + return -EINVAL; + else { + if (state) + value = (u8) (result | WLAN_MASK); + else + value = (u8) (result & ~WLAN_MASK); + ec_write(COMPAL_EC_COMMAND_WIRELESS, value); + } + + return 0; +} + +static int set_bluetooth_state(int state) +{ + u8 result, value; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if ((result & KILLSWITCH_MASK) == 0) + return -EINVAL; + else { + if (state) + value = (u8) (result | BT_MASK); + else + value = (u8) (result & ~BT_MASK); + ec_write(COMPAL_EC_COMMAND_WIRELESS, value); + } + + return 0; +} + +static int get_wireless_state(int *wlan, int *bluetooth) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if (wlan) { + if ((result & KILLSWITCH_MASK) == 0) + *wlan = 0; + else + *wlan = result & WLAN_MASK; + } + + if (bluetooth) { + if ((result & KILLSWITCH_MASK) == 0) + *bluetooth = 0; + else + *bluetooth = (result & BT_MASK) >> 1; + } + + return 0; +} + +/* Backlight device stuff */ + +static int bl_get_brightness(struct backlight_device *b) +{ + return get_lcd_level(); +} + + +static int bl_update_status(struct backlight_device *b) +{ + return set_lcd_level(b->props.brightness); +} + +static struct backlight_ops compalbl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, +}; + +static struct backlight_device *compalbl_device; + +/* Platform device */ + +static ssize_t show_wlan(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret, enabled; + + ret = get_wireless_state(&enabled, NULL); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t show_raw(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + return sprintf(buf, "%i\n", result); +} + +static ssize_t show_bluetooth(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret, enabled; + + ret = get_wireless_state(NULL, &enabled); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t store_wlan_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state, ret; + + if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1)) + return -EINVAL; + + ret = set_wlan_state(state); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t store_bluetooth_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state, ret; + + if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1)) + return -EINVAL; + + ret = set_bluetooth_state(state); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(bluetooth, 0644, show_bluetooth, store_bluetooth_state); +static DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan_state); +static DEVICE_ATTR(raw, 0444, show_raw, NULL); + +static struct attribute *compal_attributes[] = { + &dev_attr_bluetooth.attr, + &dev_attr_wlan.attr, + &dev_attr_raw.attr, + NULL +}; + +static struct attribute_group compal_attribute_group = { + .attrs = compal_attributes +}; + +static struct platform_driver compal_driver = { + .driver = { + .name = "compal-laptop", + .owner = THIS_MODULE, + } +}; + +static struct platform_device *compal_device; + +/* Initialization */ + +static int dmi_check_cb(const struct dmi_system_id *id) +{ + printk(KERN_INFO "compal-laptop: Identified laptop model '%s'.\n", + id->ident); + + return 0; +} + +static struct dmi_system_id __initdata compal_dmi_table[] = { + { + .ident = "FL90/IFL90", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL90"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL90/IFL90", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL90"), + DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL91/IFL91", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL91"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL92/JFL92", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "JFL92"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FT00/IFT00", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFT00"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { } +}; + +static int __init compal_init(void) +{ + int ret; + + if (acpi_disabled) + return -ENODEV; + + if (!force && !dmi_check_system(compal_dmi_table)) + return -ENODEV; + + /* Register backlight stuff */ + + compalbl_device = backlight_device_register("compal-laptop", NULL, NULL, + &compalbl_ops); + if (IS_ERR(compalbl_device)) + return PTR_ERR(compalbl_device); + + compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1; + + ret = platform_driver_register(&compal_driver); + if (ret) + goto fail_backlight; + + /* Register platform stuff */ + + compal_device = platform_device_alloc("compal-laptop", -1); + if (!compal_device) { + ret = -ENOMEM; + goto fail_platform_driver; + } + + ret = platform_device_add(compal_device); + if (ret) + goto fail_platform_device1; + + ret = sysfs_create_group(&compal_device->dev.kobj, + &compal_attribute_group); + if (ret) + goto fail_platform_device2; + + printk(KERN_INFO "compal-laptop: driver "COMPAL_DRIVER_VERSION + " successfully loaded.\n"); + + return 0; + +fail_platform_device2: + + platform_device_del(compal_device); + +fail_platform_device1: + + platform_device_put(compal_device); + +fail_platform_driver: + + platform_driver_unregister(&compal_driver); + +fail_backlight: + + backlight_device_unregister(compalbl_device); + + return ret; +} + +static void __exit compal_cleanup(void) +{ + + sysfs_remove_group(&compal_device->dev.kobj, &compal_attribute_group); + platform_device_unregister(compal_device); + platform_driver_unregister(&compal_driver); + backlight_device_unregister(compalbl_device); + + printk(KERN_INFO "compal-laptop: driver unloaded.\n"); +} + +module_init(compal_init); +module_exit(compal_cleanup); + +MODULE_AUTHOR("Cezary Jackiewicz"); +MODULE_DESCRIPTION("Compal Laptop Support"); +MODULE_VERSION(COMPAL_DRIVER_VERSION); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("dmi:*:rnIFL90:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnIFL90:rvrREFERENCE:*"); +MODULE_ALIAS("dmi:*:rnIFL91:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnJFL92:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*"); diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c index 6d727609097..9e8d79e7e9f 100644 --- a/drivers/misc/eeepc-laptop.c +++ b/drivers/misc/eeepc-laptop.c @@ -87,7 +87,7 @@ enum { CM_ASL_LID }; -const char *cm_getv[] = { +static const char *cm_getv[] = { "WLDG", NULL, NULL, NULL, "CAMG", NULL, NULL, NULL, NULL, "PBLG", NULL, NULL, @@ -96,7 +96,7 @@ const char *cm_getv[] = { "CRDG", "LIDG" }; -const char *cm_setv[] = { +static const char *cm_setv[] = { "WLDS", NULL, NULL, NULL, "CAMS", NULL, NULL, NULL, "SDSP", "PBLS", "HDPS", NULL, diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c index 6d14e8fe153..7a1ef6c262d 100644 --- a/drivers/misc/fujitsu-laptop.c +++ b/drivers/misc/fujitsu-laptop.c @@ -1,12 +1,14 @@ /*-*-linux-c-*-*/ /* - Copyright (C) 2007 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> + Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> + Copyright (C) 2008 Peter Gruber <nokos@gmx.net> Based on earlier work: Copyright (C) 2003 Shane Spencer <shane@bogomip.com> Adrian Yee <brewt-fujitsu@brewt.org> - Templated from msi-laptop.c which is copyright by its respective authors. + Templated from msi-laptop.c and thinkpad_acpi.c which is copyright + by its respective authors. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -39,8 +41,17 @@ * registers itself in the Linux backlight control subsystem and is * available to userspace under /sys/class/backlight/fujitsu-laptop/. * - * This driver has been tested on a Fujitsu Lifebook S7020. It should - * work on most P-series and S-series Lifebooks, but YMMV. + * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are + * also supported by this driver. + * + * This driver has been tested on a Fujitsu Lifebook S6410 and S7020. It + * should work on most P-series and S-series Lifebooks, but YMMV. + * + * The module parameter use_alt_lcd_levels switches between different ACPI + * brightness controls which are used by different Fujitsu laptops. In most + * cases the correct method is automatically detected. "use_alt_lcd_levels=1" + * is applicable for a Fujitsu Lifebook S6410 if autodetection fails. + * */ #include <linux/module.h> @@ -49,30 +60,105 @@ #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/backlight.h> +#include <linux/input.h> +#include <linux/kfifo.h> +#include <linux/video_output.h> #include <linux/platform_device.h> -#define FUJITSU_DRIVER_VERSION "0.3" +#define FUJITSU_DRIVER_VERSION "0.4.2" #define FUJITSU_LCD_N_LEVELS 8 #define ACPI_FUJITSU_CLASS "fujitsu" #define ACPI_FUJITSU_HID "FUJ02B1" -#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI extras driver" +#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver" #define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1" - +#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3" +#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver" +#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3" + +#define ACPI_FUJITSU_NOTIFY_CODE1 0x80 + +#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 +#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 + +/* Hotkey details */ +#define LOCK_KEY 0x410 /* codes for the keys in the GIRB register */ +#define DISPLAY_KEY 0x411 /* keys are mapped to KEY_SCREENLOCK (the key with the key symbol) */ +#define ENERGY_KEY 0x412 /* KEY_MEDIA (the key with the laptop symbol, KEY_EMAIL (E key)) */ +#define REST_KEY 0x413 /* KEY_SUSPEND (R key) */ + +#define MAX_HOTKEY_RINGBUFFER_SIZE 100 +#define RINGBUFFERSIZE 40 + +/* Debugging */ +#define FUJLAPTOP_LOG ACPI_FUJITSU_HID ": " +#define FUJLAPTOP_ERR KERN_ERR FUJLAPTOP_LOG +#define FUJLAPTOP_NOTICE KERN_NOTICE FUJLAPTOP_LOG +#define FUJLAPTOP_INFO KERN_INFO FUJLAPTOP_LOG +#define FUJLAPTOP_DEBUG KERN_DEBUG FUJLAPTOP_LOG + +#define FUJLAPTOP_DBG_ALL 0xffff +#define FUJLAPTOP_DBG_ERROR 0x0001 +#define FUJLAPTOP_DBG_WARN 0x0002 +#define FUJLAPTOP_DBG_INFO 0x0004 +#define FUJLAPTOP_DBG_TRACE 0x0008 + +#define dbg_printk(a_dbg_level, format, arg...) \ + do { if (dbg_level & a_dbg_level) \ + printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \ + } while (0) +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +#define vdbg_printk(a_dbg_level, format, arg...) \ + dbg_printk(a_dbg_level, format, ## arg) +#else +#define vdbg_printk(a_dbg_level, format, arg...) +#endif + +/* Device controlling the backlight and associated keys */ struct fujitsu_t { acpi_handle acpi_handle; + struct acpi_device *dev; + struct input_dev *input; + char phys[32]; struct backlight_device *bl_device; struct platform_device *pf_device; - unsigned long fuj02b1_state; + unsigned int max_brightness; unsigned int brightness_changed; unsigned int brightness_level; }; static struct fujitsu_t *fujitsu; +static int use_alt_lcd_levels = -1; +static int disable_brightness_keys = -1; +static int disable_brightness_adjust = -1; + +/* Device used to access other hotkeys on the laptop */ +struct fujitsu_hotkey_t { + acpi_handle acpi_handle; + struct acpi_device *dev; + struct input_dev *input; + char phys[32]; + struct platform_device *pf_device; + struct kfifo *fifo; + spinlock_t fifo_lock; + + unsigned int irb; /* info about the pressed buttons */ +}; -/* Hardware access */ +static struct fujitsu_hotkey_t *fujitsu_hotkey; + +static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, + void *data); + +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +static u32 dbg_level = 0x03; +#endif + +static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); + +/* Hardware access for LCD brightness control */ static int set_lcd_level(int level) { @@ -81,7 +167,10 @@ static int set_lcd_level(int level) struct acpi_object_list arg_list = { 1, &arg0 }; acpi_handle handle = NULL; - if (level < 0 || level >= FUJITSU_LCD_N_LEVELS) + vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", + level); + + if (level < 0 || level >= fujitsu->max_brightness) return -EINVAL; if (!fujitsu) @@ -89,7 +178,38 @@ static int set_lcd_level(int level) status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle); if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SBLL not present\n")); + vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n"); + return -ENODEV; + } + + arg0.integer.value = level; + + status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +static int set_lcd_level_alt(int level) +{ + acpi_status status = AE_OK; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list arg_list = { 1, &arg0 }; + acpi_handle handle = NULL; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", + level); + + if (level < 0 || level >= fujitsu->max_brightness) + return -EINVAL; + + if (!fujitsu) + return -EINVAL; + + status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle); + if (ACPI_FAILURE(status)) { + vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n"); return -ENODEV; } @@ -107,13 +227,52 @@ static int get_lcd_level(void) unsigned long state = 0; acpi_status status = AE_OK; - // Get the Brightness + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n"); + status = acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state); if (status < 0) return status; - fujitsu->fuj02b1_state = state; + fujitsu->brightness_level = state & 0x0fffffff; + + if (state & 0x80000000) + fujitsu->brightness_changed = 1; + else + fujitsu->brightness_changed = 0; + + return fujitsu->brightness_level; +} + +static int get_max_brightness(void) +{ + unsigned long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n"); + + status = + acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state); + if (status < 0) + return status; + + fujitsu->max_brightness = state; + + return fujitsu->max_brightness; +} + +static int get_lcd_level_alt(void) +{ + unsigned long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n"); + + status = + acpi_evaluate_integer(fujitsu->acpi_handle, "GBLS", NULL, &state); + if (status < 0) + return status; + fujitsu->brightness_level = state & 0x0fffffff; if (state & 0x80000000) @@ -128,12 +287,18 @@ static int get_lcd_level(void) static int bl_get_brightness(struct backlight_device *b) { - return get_lcd_level(); + if (use_alt_lcd_levels) + return get_lcd_level_alt(); + else + return get_lcd_level(); } static int bl_update_status(struct backlight_device *b) { - return set_lcd_level(b->props.brightness); + if (use_alt_lcd_levels) + return set_lcd_level_alt(b->props.brightness); + else + return set_lcd_level(b->props.brightness); } static struct backlight_ops fujitsubl_ops = { @@ -141,7 +306,35 @@ static struct backlight_ops fujitsubl_ops = { .update_status = bl_update_status, }; -/* Platform device */ +/* Platform LCD brightness device */ + +static ssize_t +show_max_brightness(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = get_max_brightness(); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t +show_brightness_changed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = fujitsu->brightness_changed; + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} static ssize_t show_lcd_level(struct device *dev, struct device_attribute *attr, char *buf) @@ -149,7 +342,10 @@ static ssize_t show_lcd_level(struct device *dev, int ret; - ret = get_lcd_level(); + if (use_alt_lcd_levels) + ret = get_lcd_level_alt(); + else + ret = get_lcd_level(); if (ret < 0) return ret; @@ -164,19 +360,61 @@ static ssize_t store_lcd_level(struct device *dev, int level, ret; if (sscanf(buf, "%i", &level) != 1 - || (level < 0 || level >= FUJITSU_LCD_N_LEVELS)) + || (level < 0 || level >= fujitsu->max_brightness)) return -EINVAL; - ret = set_lcd_level(level); + if (use_alt_lcd_levels) + ret = set_lcd_level_alt(level); + else + ret = set_lcd_level(level); + if (ret < 0) + return ret; + + if (use_alt_lcd_levels) + ret = get_lcd_level_alt(); + else + ret = get_lcd_level(); if (ret < 0) return ret; return count; } +/* Hardware access for hotkey device */ + +static int get_irb(void) +{ + unsigned long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n"); + + status = + acpi_evaluate_integer(fujitsu_hotkey->acpi_handle, "GIRB", NULL, + &state); + if (status < 0) + return status; + + fujitsu_hotkey->irb = state; + + return fujitsu_hotkey->irb; +} + +static ssize_t +ignore_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return count; +} + +static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store); +static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed, + ignore_store); static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); static struct attribute *fujitsupf_attributes[] = { + &dev_attr_brightness_changed.attr, + &dev_attr_max_brightness.attr, &dev_attr_lcd_level.attr, NULL }; @@ -192,14 +430,52 @@ static struct platform_driver fujitsupf_driver = { } }; -/* ACPI device */ +static int dmi_check_cb_s6410(const struct dmi_system_id *id) +{ + acpi_handle handle; + int have_blnf; + printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n", + id->ident); + have_blnf = ACPI_SUCCESS + (acpi_get_handle(NULL, "\\_SB.PCI0.GFX0.LCD.BLNF", &handle)); + if (use_alt_lcd_levels == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detecting usealt\n"); + use_alt_lcd_levels = 1; + } + if (disable_brightness_keys == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "auto-detecting disable_keys\n"); + disable_brightness_keys = have_blnf ? 1 : 0; + } + if (disable_brightness_adjust == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "auto-detecting disable_adjust\n"); + disable_brightness_adjust = have_blnf ? 0 : 1; + } + return 0; +} + +static struct dmi_system_id __initdata fujitsu_dmi_table[] = { + { + .ident = "Fujitsu Siemens", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"), + }, + .callback = dmi_check_cb_s6410}, + {} +}; + +/* ACPI device for LCD brightness control */ static int acpi_fujitsu_add(struct acpi_device *device) { + acpi_status status; + acpi_handle handle; int result = 0; int state = 0; - - ACPI_FUNCTION_TRACE("acpi_fujitsu_add"); + struct input_dev *input; + int error; if (!device) return -EINVAL; @@ -209,10 +485,42 @@ static int acpi_fujitsu_add(struct acpi_device *device) sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); acpi_driver_data(device) = fujitsu; + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify, fujitsu); + + if (ACPI_FAILURE(status)) { + printk(KERN_ERR "Error installing notify handler\n"); + error = -ENODEV; + goto err_stop; + } + + fujitsu->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_uninstall_notify; + } + + snprintf(fujitsu->phys, sizeof(fujitsu->phys), + "%s/video/input0", acpi_device_hid(device)); + + input->name = acpi_device_name(device); + input->phys = fujitsu->phys; + input->id.bustype = BUS_HOST; + input->id.product = 0x06; + input->dev.parent = &device->dev; + input->evbit[0] = BIT(EV_KEY); + set_bit(KEY_BRIGHTNESSUP, input->keybit); + set_bit(KEY_BRIGHTNESSDOWN, input->keybit); + set_bit(KEY_UNKNOWN, input->keybit); + + error = input_register_device(input); + if (error) + goto err_free_input_dev; + result = acpi_bus_get_power(fujitsu->acpi_handle, &state); if (result) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error reading power state\n")); + printk(KERN_ERR "Error reading power state\n"); goto end; } @@ -220,22 +528,373 @@ static int acpi_fujitsu_add(struct acpi_device *device) acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); - end: + fujitsu->dev = device; + + if (ACPI_SUCCESS + (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { + vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); + if (ACPI_FAILURE + (acpi_evaluate_object + (device->handle, METHOD_NAME__INI, NULL, NULL))) + printk(KERN_ERR "_INI Method failed\n"); + } + + /* do config (detect defaults) */ + dmi_check_system(fujitsu_dmi_table); + use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; + disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0; + disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; + vdbg_printk(FUJLAPTOP_DBG_INFO, + "config: [alt interface: %d], [key disable: %d], [adjust disable: %d]\n", + use_alt_lcd_levels, disable_brightness_keys, + disable_brightness_adjust); + + if (get_max_brightness() <= 0) + fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; + if (use_alt_lcd_levels) + get_lcd_level_alt(); + else + get_lcd_level(); + + return result; + +end: +err_free_input_dev: + input_free_device(input); +err_uninstall_notify: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify); +err_stop: return result; } static int acpi_fujitsu_remove(struct acpi_device *device, int type) { - ACPI_FUNCTION_TRACE("acpi_fujitsu_remove"); + acpi_status status; + struct fujitsu_t *fujitsu = NULL; if (!device || !acpi_driver_data(device)) return -EINVAL; + + fujitsu = acpi_driver_data(device); + + status = acpi_remove_notify_handler(fujitsu->acpi_handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify); + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + fujitsu->acpi_handle = NULL; return 0; } +/* Brightness notify */ + +static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) +{ + struct input_dev *input; + int keycode; + int oldb, newb; + + input = fujitsu->input; + + switch (event) { + case ACPI_FUJITSU_NOTIFY_CODE1: + keycode = 0; + oldb = fujitsu->brightness_level; + get_lcd_level(); /* the alt version always yields changed */ + newb = fujitsu->brightness_level; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "brightness button event [%i -> %i (%i)]\n", + oldb, newb, fujitsu->brightness_changed); + + if (oldb == newb && fujitsu->brightness_changed) { + keycode = 0; + if (disable_brightness_keys != 1) { + if (oldb == 0) { + acpi_bus_generate_proc_event(fujitsu-> + dev, + ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSDOWN; + } else if (oldb == + (fujitsu->max_brightness) - 1) { + acpi_bus_generate_proc_event(fujitsu-> + dev, + ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSUP; + } + } + } else if (oldb < newb) { + if (disable_brightness_adjust != 1) { + if (use_alt_lcd_levels) + set_lcd_level_alt(newb); + else + set_lcd_level(newb); + } + if (disable_brightness_keys != 1) { + acpi_bus_generate_proc_event(fujitsu->dev, + ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSUP; + } + } else if (oldb > newb) { + if (disable_brightness_adjust != 1) { + if (use_alt_lcd_levels) + set_lcd_level_alt(newb); + else + set_lcd_level(newb); + } + if (disable_brightness_keys != 1) { + acpi_bus_generate_proc_event(fujitsu->dev, + ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSDOWN; + } + } else { + keycode = KEY_UNKNOWN; + } + break; + default: + keycode = KEY_UNKNOWN; + vdbg_printk(FUJLAPTOP_DBG_WARN, + "unsupported event [0x%x]\n", event); + break; + } + + if (keycode != 0) { + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + } + + return; +} + +/* ACPI device for hotkey handling */ + +static int acpi_fujitsu_hotkey_add(struct acpi_device *device) +{ + acpi_status status; + acpi_handle handle; + int result = 0; + int state = 0; + struct input_dev *input; + int error; + int i; + + if (!device) + return -EINVAL; + + fujitsu_hotkey->acpi_handle = device->handle; + sprintf(acpi_device_name(device), "%s", + ACPI_FUJITSU_HOTKEY_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); + acpi_driver_data(device) = fujitsu_hotkey; + + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify, + fujitsu_hotkey); + + if (ACPI_FAILURE(status)) { + printk(KERN_ERR "Error installing notify handler\n"); + error = -ENODEV; + goto err_stop; + } + + /* kfifo */ + spin_lock_init(&fujitsu_hotkey->fifo_lock); + fujitsu_hotkey->fifo = + kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL, + &fujitsu_hotkey->fifo_lock); + if (IS_ERR(fujitsu_hotkey->fifo)) { + printk(KERN_ERR "kfifo_alloc failed\n"); + error = PTR_ERR(fujitsu_hotkey->fifo); + goto err_stop; + } + + fujitsu_hotkey->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_uninstall_notify; + } + + snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), + "%s/video/input0", acpi_device_hid(device)); + + input->name = acpi_device_name(device); + input->phys = fujitsu_hotkey->phys; + input->id.bustype = BUS_HOST; + input->id.product = 0x06; + input->dev.parent = &device->dev; + input->evbit[0] = BIT(EV_KEY); + set_bit(KEY_SCREENLOCK, input->keybit); + set_bit(KEY_MEDIA, input->keybit); + set_bit(KEY_EMAIL, input->keybit); + set_bit(KEY_SUSPEND, input->keybit); + set_bit(KEY_UNKNOWN, input->keybit); + + error = input_register_device(input); + if (error) + goto err_free_input_dev; + + result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state); + if (result) { + printk(KERN_ERR "Error reading power state\n"); + goto end; + } + + printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + acpi_device_name(device), acpi_device_bid(device), + !device->power.state ? "on" : "off"); + + fujitsu_hotkey->dev = device; + + if (ACPI_SUCCESS + (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { + vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); + if (ACPI_FAILURE + (acpi_evaluate_object + (device->handle, METHOD_NAME__INI, NULL, NULL))) + printk(KERN_ERR "_INI Method failed\n"); + } + + i = 0; /* Discard hotkey ringbuffer */ + while (get_irb() != 0 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) ; + vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); + + return result; + +end: +err_free_input_dev: + input_free_device(input); +err_uninstall_notify: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify); + kfifo_free(fujitsu_hotkey->fifo); +err_stop: + + return result; +} + +static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) +{ + acpi_status status; + struct fujitsu_hotkey_t *fujitsu_hotkey = NULL; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + fujitsu_hotkey = acpi_driver_data(device); + + status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify); + + fujitsu_hotkey->acpi_handle = NULL; + + kfifo_free(fujitsu_hotkey->fifo); + + return 0; +} + +static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, + void *data) +{ + struct input_dev *input; + int keycode, keycode_r; + unsigned int irb = 1; + int i, status; + + input = fujitsu_hotkey->input; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "Hotkey event\n"); + + switch (event) { + case ACPI_FUJITSU_NOTIFY_CODE1: + i = 0; + while ((irb = get_irb()) != 0 + && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, "GIRB result [%x]\n", + irb); + + switch (irb & 0x4ff) { + case LOCK_KEY: + keycode = KEY_SCREENLOCK; + break; + case DISPLAY_KEY: + keycode = KEY_MEDIA; + break; + case ENERGY_KEY: + keycode = KEY_EMAIL; + break; + case REST_KEY: + keycode = KEY_SUSPEND; + break; + case 0: + keycode = 0; + break; + default: + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unknown GIRB result [%x]\n", irb); + keycode = -1; + break; + } + if (keycode > 0) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Push keycode into ringbuffer [%d]\n", + keycode); + status = kfifo_put(fujitsu_hotkey->fifo, + (unsigned char *)&keycode, + sizeof(keycode)); + if (status != sizeof(keycode)) { + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Could not push keycode [0x%x]\n", + keycode); + } else { + input_report_key(input, keycode, 1); + input_sync(input); + } + } else if (keycode == 0) { + while ((status = + kfifo_get + (fujitsu_hotkey->fifo, (unsigned char *) + &keycode_r, + sizeof + (keycode_r))) == sizeof(keycode_r)) { + input_report_key(input, keycode_r, 0); + input_sync(input); + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Pop keycode from ringbuffer [%d]\n", + keycode_r); + } + } + } + + break; + default: + keycode = KEY_UNKNOWN; + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unsupported event [0x%x]\n", event); + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + break; + } + + return; +} + +/* Initialization */ + static const struct acpi_device_id fujitsu_device_ids[] = { {ACPI_FUJITSU_HID, 0}, {"", 0}, @@ -251,11 +910,24 @@ static struct acpi_driver acpi_fujitsu_driver = { }, }; -/* Initialization */ +static const struct acpi_device_id fujitsu_hotkey_device_ids[] = { + {ACPI_FUJITSU_HOTKEY_HID, 0}, + {"", 0}, +}; + +static struct acpi_driver acpi_fujitsu_hotkey_driver = { + .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME, + .class = ACPI_FUJITSU_CLASS, + .ids = fujitsu_hotkey_device_ids, + .ops = { + .add = acpi_fujitsu_hotkey_add, + .remove = acpi_fujitsu_hotkey_remove, + }, +}; static int __init fujitsu_init(void) { - int ret, result; + int ret, result, max_brightness; if (acpi_disabled) return -ENODEV; @@ -271,19 +943,6 @@ static int __init fujitsu_init(void) goto fail_acpi; } - /* Register backlight stuff */ - - fujitsu->bl_device = - backlight_device_register("fujitsu-laptop", NULL, NULL, - &fujitsubl_ops); - if (IS_ERR(fujitsu->bl_device)) - return PTR_ERR(fujitsu->bl_device); - - fujitsu->bl_device->props.max_brightness = FUJITSU_LCD_N_LEVELS - 1; - ret = platform_driver_register(&fujitsupf_driver); - if (ret) - goto fail_backlight; - /* Register platform stuff */ fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1); @@ -302,28 +961,68 @@ static int __init fujitsu_init(void) if (ret) goto fail_platform_device2; + /* Register backlight stuff */ + + fujitsu->bl_device = + backlight_device_register("fujitsu-laptop", NULL, NULL, + &fujitsubl_ops); + if (IS_ERR(fujitsu->bl_device)) + return PTR_ERR(fujitsu->bl_device); + + max_brightness = fujitsu->max_brightness; + + fujitsu->bl_device->props.max_brightness = max_brightness - 1; + fujitsu->bl_device->props.brightness = fujitsu->brightness_level; + + ret = platform_driver_register(&fujitsupf_driver); + if (ret) + goto fail_backlight; + + /* Register hotkey driver */ + + fujitsu_hotkey = kmalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL); + if (!fujitsu_hotkey) { + ret = -ENOMEM; + goto fail_hotkey; + } + memset(fujitsu_hotkey, 0, sizeof(struct fujitsu_hotkey_t)); + + result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver); + if (result < 0) { + ret = -ENODEV; + goto fail_hotkey1; + } + printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION " successfully loaded.\n"); return 0; - fail_platform_device2: +fail_hotkey1: - platform_device_del(fujitsu->pf_device); - - fail_platform_device1: - - platform_device_put(fujitsu->pf_device); + kfree(fujitsu_hotkey); - fail_platform_driver: +fail_hotkey: platform_driver_unregister(&fujitsupf_driver); - fail_backlight: +fail_backlight: backlight_device_unregister(fujitsu->bl_device); - fail_acpi: +fail_platform_device2: + + platform_device_del(fujitsu->pf_device); + +fail_platform_device1: + + platform_device_put(fujitsu->pf_device); + +fail_platform_driver: + + acpi_bus_unregister_driver(&acpi_fujitsu_driver); + +fail_acpi: kfree(fujitsu); @@ -342,19 +1041,43 @@ static void __exit fujitsu_cleanup(void) kfree(fujitsu); + acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver); + + kfree(fujitsu_hotkey); + printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n"); } module_init(fujitsu_init); module_exit(fujitsu_cleanup); -MODULE_AUTHOR("Jonathan Woithe"); +module_param(use_alt_lcd_levels, uint, 0644); +MODULE_PARM_DESC(use_alt_lcd_levels, + "Use alternative interface for lcd_levels (needed for Lifebook s6410)."); +module_param(disable_brightness_keys, uint, 0644); +MODULE_PARM_DESC(disable_brightness_keys, + "Disable brightness keys (eg. if they are already handled by the generic ACPI_VIDEO device)."); +module_param(disable_brightness_adjust, uint, 0644); +MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment ."); +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +module_param_named(debug, dbg_level, uint, 0644); +MODULE_PARM_DESC(debug, "Sets debug level bit-mask"); +#endif + +MODULE_AUTHOR("Jonathan Woithe, Peter Gruber"); MODULE_DESCRIPTION("Fujitsu laptop extras support"); MODULE_VERSION(FUJITSU_DRIVER_VERSION); MODULE_LICENSE("GPL"); +MODULE_ALIAS + ("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); +MODULE_ALIAS + ("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); + static struct pnp_device_id pnp_ids[] = { { .id = "FUJ02bf" }, + { .id = "FUJ02B1" }, + { .id = "FUJ02E3" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, pnp_ids); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f9ad960d7c1..66e5a5487c2 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2,7 +2,7 @@ * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company - * Copyright 2005-2007 Pierre Ossman + * Copyright 2005-2008 Pierre Ossman * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is @@ -237,17 +237,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) if (brq.data.blocks > card->host->max_blk_count) brq.data.blocks = card->host->max_blk_count; - /* - * If the host doesn't support multiple block writes, force - * block writes to single block. SD cards are excepted from - * this rule as they support querying the number of - * successfully written sectors. - */ - if (rq_data_dir(req) != READ && - !(card->host->caps & MMC_CAP_MULTIWRITE) && - !mmc_card_sd(card)) - brq.data.blocks = 1; - if (brq.data.blocks > 1) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. @@ -296,22 +285,24 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) mmc_queue_bounce_post(mq); + /* + * Check for errors here, but don't jump to cmd_err + * until later as we need to wait for the card to leave + * programming mode even when things go wrong. + */ if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write command\n", req->rq_disk->disk_name, brq.cmd.error); - goto cmd_err; } if (brq.data.error) { printk(KERN_ERR "%s: error %d transferring data\n", req->rq_disk->disk_name, brq.data.error); - goto cmd_err; } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command\n", req->rq_disk->disk_name, brq.stop.error); - goto cmd_err; } if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { @@ -344,6 +335,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) #endif } + if (brq.cmd.error || brq.data.error || brq.stop.error) + goto cmd_err; + /* * A block was successfully transferred. */ @@ -362,30 +356,32 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors - * if the controller can do proper error reporting. + * as reported by the controller (which might be less than + * the real number of written sectors, but never more). * * For reads we just fail the entire chunk as that should * be safe in all cases. */ - if (rq_data_dir(req) != READ && mmc_card_sd(card)) { - u32 blocks; - unsigned int bytes; - - blocks = mmc_sd_num_wr_blocks(card); - if (blocks != (u32)-1) { - if (card->csd.write_partial) - bytes = blocks << md->block_bits; - else - bytes = blocks << 9; + if (rq_data_dir(req) != READ) { + if (mmc_card_sd(card)) { + u32 blocks; + unsigned int bytes; + + blocks = mmc_sd_num_wr_blocks(card); + if (blocks != (u32)-1) { + if (card->csd.write_partial) + bytes = blocks << md->block_bits; + else + bytes = blocks << 9; + spin_lock_irq(&md->lock); + ret = __blk_end_request(req, 0, bytes); + spin_unlock_irq(&md->lock); + } + } else { spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, bytes); + ret = __blk_end_request(req, 0, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } - } else if (rq_data_dir(req) != READ && - (card->host->caps & MMC_CAP_MULTIWRITE)) { - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, brq.data.bytes_xfered); - spin_unlock_irq(&md->lock); } mmc_release_host(card->host); diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index ffadee549a4..d6b9b486417 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -1,7 +1,7 @@ /* * linux/drivers/mmc/card/mmc_test.c * - * Copyright 2007 Pierre Ossman + * Copyright 2007-2008 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,13 +26,17 @@ struct mmc_test_card { struct mmc_card *card; + u8 scratch[BUFFER_SIZE]; u8 *buffer; }; /*******************************************************************/ -/* Helper functions */ +/* General helper functions */ /*******************************************************************/ +/* + * Configure correct block size in card + */ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) { struct mmc_command cmd; @@ -48,117 +52,61 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) return 0; } -static int __mmc_test_transfer(struct mmc_test_card *test, int write, - unsigned broken_xfer, u8 *buffer, unsigned addr, - unsigned blocks, unsigned blksz) +/* + * Fill in the mmc_request structure given a set of transfer parameters. + */ +static void mmc_test_prepare_mrq(struct mmc_test_card *test, + struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, + unsigned dev_addr, unsigned blocks, unsigned blksz, int write) { - int ret, busy; - - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_command stop; - struct mmc_data data; - - struct scatterlist sg; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - memset(&cmd, 0, sizeof(struct mmc_command)); + BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop); - if (broken_xfer) { - if (blocks > 1) { - cmd.opcode = write ? - MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; - } else { - cmd.opcode = MMC_SEND_STATUS; - } + if (blocks > 1) { + mrq->cmd->opcode = write ? + MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; } else { - if (blocks > 1) { - cmd.opcode = write ? - MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; - } else { - cmd.opcode = write ? - MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; - } + mrq->cmd->opcode = write ? + MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; } - if (broken_xfer && blocks == 1) - cmd.arg = test->card->rca << 16; - else - cmd.arg = addr; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + mrq->cmd->arg = dev_addr; + mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; - memset(&stop, 0, sizeof(struct mmc_command)); - - if (!broken_xfer && (blocks > 1)) { - stop.opcode = MMC_STOP_TRANSMISSION; - stop.arg = 0; - stop.flags = MMC_RSP_R1B | MMC_CMD_AC; - - mrq.stop = &stop; + if (blocks == 1) + mrq->stop = NULL; + else { + mrq->stop->opcode = MMC_STOP_TRANSMISSION; + mrq->stop->arg = 0; + mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; } - memset(&data, 0, sizeof(struct mmc_data)); - - data.blksz = blksz; - data.blocks = blocks; - data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - sg_init_one(&sg, buffer, blocks * blksz); - - mmc_set_data_timeout(&data, test->card); + mrq->data->blksz = blksz; + mrq->data->blocks = blocks; + mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + mrq->data->sg = sg; + mrq->data->sg_len = sg_len; - mmc_wait_for_req(test->card->host, &mrq); - - ret = 0; - - if (broken_xfer) { - if (!ret && cmd.error) - ret = cmd.error; - if (!ret && data.error == 0) - ret = RESULT_FAIL; - if (!ret && data.error != -ETIMEDOUT) - ret = data.error; - if (!ret && stop.error) - ret = stop.error; - if (blocks > 1) { - if (!ret && data.bytes_xfered > blksz) - ret = RESULT_FAIL; - } else { - if (!ret && data.bytes_xfered > 0) - ret = RESULT_FAIL; - } - } else { - if (!ret && cmd.error) - ret = cmd.error; - if (!ret && data.error) - ret = data.error; - if (!ret && stop.error) - ret = stop.error; - if (!ret && data.bytes_xfered != blocks * blksz) - ret = RESULT_FAIL; - } + mmc_set_data_timeout(mrq->data, test->card); +} - if (ret == -EINVAL) - ret = RESULT_UNSUP_HOST; +/* + * Wait for the card to finish the busy state + */ +static int mmc_test_wait_busy(struct mmc_test_card *test) +{ + int ret, busy; + struct mmc_command cmd; busy = 0; do { - int ret2; - memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = test->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - ret2 = mmc_wait_for_cmd(test->card->host, &cmd, 0); - if (ret2) + ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); + if (ret) break; if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { @@ -172,14 +120,57 @@ static int __mmc_test_transfer(struct mmc_test_card *test, int write, return ret; } -static int mmc_test_transfer(struct mmc_test_card *test, int write, - u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) +/* + * Transfer a single sector of kernel addressable data + */ +static int mmc_test_buffer_transfer(struct mmc_test_card *test, + u8 *buffer, unsigned addr, unsigned blksz, int write) { - return __mmc_test_transfer(test, write, 0, buffer, - addr, blocks, blksz); + int ret; + + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_data data; + + struct scatterlist sg; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + memset(&stop, 0, sizeof(struct mmc_command)); + + mrq.cmd = &cmd; + mrq.data = &data; + mrq.stop = &stop; + + sg_init_one(&sg, buffer, blksz); + + mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); + + mmc_wait_for_req(test->card->host, &mrq); + + if (cmd.error) + return cmd.error; + if (data.error) + return data.error; + + ret = mmc_test_wait_busy(test); + if (ret) + return ret; + + return 0; } -static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) +/*******************************************************************/ +/* Test preparation and cleanup */ +/*******************************************************************/ + +/* + * Fill the first couple of sectors of the card with known data + * so that bad reads/writes can be detected + */ +static int __mmc_test_prepare(struct mmc_test_card *test, int write) { int ret, i; @@ -188,15 +179,14 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) return ret; if (write) - memset(test->buffer, 0xDF, BUFFER_SIZE); + memset(test->buffer, 0xDF, 512); else { - for (i = 0;i < BUFFER_SIZE;i++) + for (i = 0;i < 512;i++) test->buffer[i] = i; } for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_transfer(test, 1, test->buffer + i * 512, - i * 512, 1, 512); + ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); if (ret) return ret; } @@ -204,41 +194,218 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) return 0; } -static int mmc_test_prepare_verify_write(struct mmc_test_card *test) +static int mmc_test_prepare_write(struct mmc_test_card *test) +{ + return __mmc_test_prepare(test, 1); +} + +static int mmc_test_prepare_read(struct mmc_test_card *test) +{ + return __mmc_test_prepare(test, 0); +} + +static int mmc_test_cleanup(struct mmc_test_card *test) { - return mmc_test_prepare_verify(test, 1); + int ret, i; + + ret = mmc_test_set_blksize(test, 512); + if (ret) + return ret; + + memset(test->buffer, 0, 512); + + for (i = 0;i < BUFFER_SIZE / 512;i++) { + ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); + if (ret) + return ret; + } + + return 0; } -static int mmc_test_prepare_verify_read(struct mmc_test_card *test) +/*******************************************************************/ +/* Test execution helpers */ +/*******************************************************************/ + +/* + * Modifies the mmc_request to perform the "short transfer" tests + */ +static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, + struct mmc_request *mrq, int write) { - return mmc_test_prepare_verify(test, 0); + BUG_ON(!mrq || !mrq->cmd || !mrq->data); + + if (mrq->data->blocks > 1) { + mrq->cmd->opcode = write ? + MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; + mrq->stop = NULL; + } else { + mrq->cmd->opcode = MMC_SEND_STATUS; + mrq->cmd->arg = test->card->rca << 16; + } } -static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, - u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) +/* + * Checks that a normal transfer didn't have any errors + */ +static int mmc_test_check_result(struct mmc_test_card *test, + struct mmc_request *mrq) { - int ret, i, sectors; + int ret; - /* - * It is assumed that the above preparation has been done. - */ + BUG_ON(!mrq || !mrq->cmd || !mrq->data); + + ret = 0; - memset(test->buffer, 0, BUFFER_SIZE); + if (!ret && mrq->cmd->error) + ret = mrq->cmd->error; + if (!ret && mrq->data->error) + ret = mrq->data->error; + if (!ret && mrq->stop && mrq->stop->error) + ret = mrq->stop->error; + if (!ret && mrq->data->bytes_xfered != + mrq->data->blocks * mrq->data->blksz) + ret = RESULT_FAIL; + + if (ret == -EINVAL) + ret = RESULT_UNSUP_HOST; + + return ret; +} + +/* + * Checks that a "short transfer" behaved as expected + */ +static int mmc_test_check_broken_result(struct mmc_test_card *test, + struct mmc_request *mrq) +{ + int ret; + + BUG_ON(!mrq || !mrq->cmd || !mrq->data); + + ret = 0; + + if (!ret && mrq->cmd->error) + ret = mrq->cmd->error; + if (!ret && mrq->data->error == 0) + ret = RESULT_FAIL; + if (!ret && mrq->data->error != -ETIMEDOUT) + ret = mrq->data->error; + if (!ret && mrq->stop && mrq->stop->error) + ret = mrq->stop->error; + if (mrq->data->blocks > 1) { + if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) + ret = RESULT_FAIL; + } else { + if (!ret && mrq->data->bytes_xfered > 0) + ret = RESULT_FAIL; + } + + if (ret == -EINVAL) + ret = RESULT_UNSUP_HOST; + + return ret; +} + +/* + * Tests a basic transfer with certain parameters + */ +static int mmc_test_simple_transfer(struct mmc_test_card *test, + struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, + unsigned blocks, unsigned blksz, int write) +{ + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_data data; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + memset(&stop, 0, sizeof(struct mmc_command)); + + mrq.cmd = &cmd; + mrq.data = &data; + mrq.stop = &stop; + + mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, + blocks, blksz, write); + + mmc_wait_for_req(test->card->host, &mrq); + + mmc_test_wait_busy(test); + + return mmc_test_check_result(test, &mrq); +} + +/* + * Tests a transfer where the card will fail completely or partly + */ +static int mmc_test_broken_transfer(struct mmc_test_card *test, + unsigned blocks, unsigned blksz, int write) +{ + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_data data; + + struct scatterlist sg; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + memset(&stop, 0, sizeof(struct mmc_command)); + + mrq.cmd = &cmd; + mrq.data = &data; + mrq.stop = &stop; + + sg_init_one(&sg, test->buffer, blocks * blksz); + + mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); + mmc_test_prepare_broken_mrq(test, &mrq, write); + + mmc_wait_for_req(test->card->host, &mrq); + + mmc_test_wait_busy(test); + + return mmc_test_check_broken_result(test, &mrq); +} + +/* + * Does a complete transfer test where data is also validated + * + * Note: mmc_test_prepare() must have been done before this call + */ +static int mmc_test_transfer(struct mmc_test_card *test, + struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, + unsigned blocks, unsigned blksz, int write) +{ + int ret, i; + unsigned long flags; if (write) { for (i = 0;i < blocks * blksz;i++) - buffer[i] = i; + test->scratch[i] = i; + } else { + memset(test->scratch, 0, BUFFER_SIZE); } + local_irq_save(flags); + sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); + local_irq_restore(flags); ret = mmc_test_set_blksize(test, blksz); if (ret) return ret; - ret = mmc_test_transfer(test, write, buffer, addr, blocks, blksz); + ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, + blocks, blksz, write); if (ret) return ret; if (write) { + int sectors; + ret = mmc_test_set_blksize(test, 512); if (ret) return ret; @@ -253,9 +420,9 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, memset(test->buffer, 0, sectors * 512); for (i = 0;i < sectors;i++) { - ret = mmc_test_transfer(test, 0, + ret = mmc_test_buffer_transfer(test, test->buffer + i * 512, - addr + i * 512, 1, 512); + dev_addr + i * 512, 512, 0); if (ret) return ret; } @@ -270,8 +437,11 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, return RESULT_FAIL; } } else { + local_irq_save(flags); + sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); + local_irq_restore(flags); for (i = 0;i < blocks * blksz;i++) { - if (buffer[i] != (u8)i) + if (test->scratch[i] != (u8)i) return RESULT_FAIL; } } @@ -279,26 +449,6 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, return 0; } -static int mmc_test_cleanup_verify(struct mmc_test_card *test) -{ - int ret, i; - - ret = mmc_test_set_blksize(test, 512); - if (ret) - return ret; - - memset(test->buffer, 0, BUFFER_SIZE); - - for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_transfer(test, 1, test->buffer + i * 512, - i * 512, 1, 512); - if (ret) - return ret; - } - - return 0; -} - /*******************************************************************/ /* Tests */ /*******************************************************************/ @@ -314,12 +464,15 @@ struct mmc_test_case { static int mmc_test_basic_write(struct mmc_test_card *test) { int ret; + struct scatterlist sg; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; - ret = mmc_test_transfer(test, 1, test->buffer, 0, 1, 512); + sg_init_one(&sg, test->buffer, 512); + + ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; @@ -329,12 +482,15 @@ static int mmc_test_basic_write(struct mmc_test_card *test) static int mmc_test_basic_read(struct mmc_test_card *test) { int ret; + struct scatterlist sg; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; - ret = mmc_test_transfer(test, 0, test->buffer, 0, 1, 512); + sg_init_one(&sg, test->buffer, 512); + + ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; @@ -344,8 +500,11 @@ static int mmc_test_basic_read(struct mmc_test_card *test) static int mmc_test_verify_write(struct mmc_test_card *test) { int ret; + struct scatterlist sg; + + sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 1, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; @@ -355,8 +514,11 @@ static int mmc_test_verify_write(struct mmc_test_card *test) static int mmc_test_verify_read(struct mmc_test_card *test) { int ret; + struct scatterlist sg; + + sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 1, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; @@ -367,6 +529,7 @@ static int mmc_test_multi_write(struct mmc_test_card *test) { int ret; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -379,8 +542,9 @@ static int mmc_test_multi_write(struct mmc_test_card *test) if (size < 1024) return RESULT_UNSUP_HOST; - ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, - size / 512, 512); + sg_init_one(&sg, test->buffer, size); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); if (ret) return ret; @@ -391,6 +555,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test) { int ret; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -403,8 +568,9 @@ static int mmc_test_multi_read(struct mmc_test_card *test) if (size < 1024) return RESULT_UNSUP_HOST; - ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, - size / 512, 512); + sg_init_one(&sg, test->buffer, size); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); if (ret) return ret; @@ -414,13 +580,14 @@ static int mmc_test_multi_read(struct mmc_test_card *test) static int mmc_test_pow2_write(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.write_partial) return RESULT_UNSUP_CARD; for (i = 1; i < 512;i <<= 1) { - ret = mmc_test_verified_transfer(test, 1, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); if (ret) return ret; } @@ -431,13 +598,14 @@ static int mmc_test_pow2_write(struct mmc_test_card *test) static int mmc_test_pow2_read(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.read_partial) return RESULT_UNSUP_CARD; for (i = 1; i < 512;i <<= 1) { - ret = mmc_test_verified_transfer(test, 0, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); if (ret) return ret; } @@ -448,13 +616,14 @@ static int mmc_test_pow2_read(struct mmc_test_card *test) static int mmc_test_weird_write(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.write_partial) return RESULT_UNSUP_CARD; for (i = 3; i < 512;i += 7) { - ret = mmc_test_verified_transfer(test, 1, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); if (ret) return ret; } @@ -465,13 +634,14 @@ static int mmc_test_weird_write(struct mmc_test_card *test) static int mmc_test_weird_read(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.read_partial) return RESULT_UNSUP_CARD; for (i = 3; i < 512;i += 7) { - ret = mmc_test_verified_transfer(test, 0, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); if (ret) return ret; } @@ -482,10 +652,11 @@ static int mmc_test_weird_read(struct mmc_test_card *test) static int mmc_test_align_write(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 1, test->buffer + i, - 0, 1, 512); + sg_init_one(&sg, test->buffer + i, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; } @@ -496,10 +667,11 @@ static int mmc_test_align_write(struct mmc_test_card *test) static int mmc_test_align_read(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 0, test->buffer + i, - 0, 1, 512); + sg_init_one(&sg, test->buffer + i, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; } @@ -511,6 +683,7 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test) { int ret, i; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -524,8 +697,8 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test) return RESULT_UNSUP_HOST; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 1, test->buffer + i, - 0, size / 512, 512); + sg_init_one(&sg, test->buffer + i, size); + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); if (ret) return ret; } @@ -537,6 +710,7 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test) { int ret, i; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -550,8 +724,8 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test) return RESULT_UNSUP_HOST; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 0, test->buffer + i, - 0, size / 512, 512); + sg_init_one(&sg, test->buffer + i, size); + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); if (ret) return ret; } @@ -567,7 +741,7 @@ static int mmc_test_xfersize_write(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 1, 512); + ret = mmc_test_broken_transfer(test, 1, 512, 1); if (ret) return ret; @@ -582,7 +756,7 @@ static int mmc_test_xfersize_read(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 1, 512); + ret = mmc_test_broken_transfer(test, 1, 512, 0); if (ret) return ret; @@ -600,7 +774,7 @@ static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 2, 512); + ret = mmc_test_broken_transfer(test, 2, 512, 1); if (ret) return ret; @@ -618,7 +792,7 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 2, 512); + ret = mmc_test_broken_transfer(test, 2, 512, 0); if (ret) return ret; @@ -638,86 +812,86 @@ static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (with data verification)", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_verify_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Basic read (with data verification)", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_verify_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Multi-block write", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_multi_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Multi-block read", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_multi_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Power of two block writes", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_pow2_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Power of two block reads", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_pow2_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Weird sized block writes", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_weird_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Weird sized block reads", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_weird_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned write", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_align_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned read", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_align_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned multi-block write", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_align_multi_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned multi-block read", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_align_multi_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { @@ -743,7 +917,7 @@ static const struct mmc_test_case mmc_test_cases[] = { static struct mutex mmc_test_lock; -static void mmc_test_run(struct mmc_test_card *test) +static void mmc_test_run(struct mmc_test_card *test, int testcase) { int i, ret; @@ -753,6 +927,9 @@ static void mmc_test_run(struct mmc_test_card *test) mmc_claim_host(test->card->host); for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { + if (testcase && ((i + 1) != testcase)) + continue; + printk(KERN_INFO "%s: Test case %d. %s...\n", mmc_hostname(test->card->host), i + 1, mmc_test_cases[i].name); @@ -824,9 +1001,12 @@ static ssize_t mmc_test_store(struct device *dev, { struct mmc_card *card; struct mmc_test_card *test; + int testcase; card = container_of(dev, struct mmc_card, dev); + testcase = simple_strtol(buf, NULL, 10); + test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); if (!test) return -ENOMEM; @@ -836,7 +1016,7 @@ static ssize_t mmc_test_store(struct device *dev, test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); if (test->buffer) { mutex_lock(&mmc_test_lock); - mmc_test_run(test); + mmc_test_run(test, testcase); mutex_unlock(&mmc_test_lock); } @@ -852,6 +1032,9 @@ static int mmc_test_probe(struct mmc_card *card) { int ret; + if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD)) + return -ENODEV; + mutex_init(&mmc_test_lock); ret = device_create_file(&card->dev, &dev_attr_test); diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index eeea84c309e..78ad48718ab 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -885,12 +885,14 @@ static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_t sdio_uart_release_func(port); } -static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state) +static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state) { struct sdio_uart_port *port = tty->driver_data; + int result; - if (sdio_uart_claim_func(port) != 0) - return; + result = sdio_uart_claim_func(port); + if (result != 0) + return result; if (break_state == -1) port->lcr |= UART_LCR_SBC; @@ -899,6 +901,7 @@ static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state) sdio_out(port, UART_LCR, port->lcr); sdio_uart_release_func(port); + return 0; } static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 01ced4c5a61..3ee5b8c3b5c 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -3,7 +3,7 @@ * * Copyright (C) 2003-2004 Russell King, All Rights Reserved. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. - * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify @@ -295,6 +295,33 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) EXPORT_SYMBOL(mmc_set_data_timeout); /** + * mmc_align_data_size - pads a transfer size to a more optimal value + * @card: the MMC card associated with the data transfer + * @sz: original transfer size + * + * Pads the original data size with a number of extra bytes in + * order to avoid controller bugs and/or performance hits + * (e.g. some controllers revert to PIO for certain sizes). + * + * Returns the improved size, which might be unmodified. + * + * Note that this function is only relevant when issuing a + * single scatter gather entry. + */ +unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) +{ + /* + * FIXME: We don't have a system for the controller to tell + * the core about its problems yet, so for now we just 32-bit + * align the size. + */ + sz = ((sz + 3) / 4) * 4; + + return sz; +} +EXPORT_SYMBOL(mmc_align_data_size); + +/** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim * @abort: whether or not the operation should be aborted @@ -638,6 +665,9 @@ void mmc_rescan(struct work_struct *work) */ mmc_bus_put(host); + if (host->ops->get_cd && host->ops->get_cd(host) == 0) + goto out; + mmc_claim_host(host); mmc_power_up(host); @@ -652,7 +682,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); - return; + goto out; } /* @@ -662,7 +692,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); - return; + goto out; } /* @@ -672,7 +702,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); - return; + goto out; } mmc_release_host(host); @@ -683,6 +713,9 @@ void mmc_rescan(struct work_struct *work) mmc_bus_put(host); } +out: + if (host->caps & MMC_CAP_NEEDS_POLL) + mmc_schedule_delayed_work(&host->detect, HZ); } void mmc_start_host(struct mmc_host *host) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 3da29eef8f7..fdd7c760be8 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -288,7 +288,7 @@ static struct device_type mmc_type = { /* * Handle the detection and initialisation of a card. * - * In the case of a resume, "curcard" will contain the card + * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 7ef3b15c5e3..26fc098d77c 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -326,7 +326,7 @@ static struct device_type sd_type = { /* * Handle the detection and initialisation of a card. * - * In the case of a resume, "curcard" will contain the card + * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, @@ -494,13 +494,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, * Check if read-only switch is active. */ if (!oldcard) { - if (!host->ops->get_ro) { + if (!host->ops->get_ro || host->ops->get_ro(host) < 0) { printk(KERN_WARNING "%s: host does not " "support reading read-only " "switch. assuming write-enable.\n", mmc_hostname(host)); } else { - if (host->ops->get_ro(host)) + if (host->ops->get_ro(host) > 0) mmc_card_set_readonly(card); } } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index d5e51b1c7b3..956bd767750 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -129,6 +129,12 @@ static int cistpl_funce_func(struct sdio_func *func, /* TPLFE_MAX_BLK_SIZE */ func->max_blksize = buf[12] | (buf[13] << 8); + /* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */ + if (vsn > SDIO_SDIO_REV_1_00) + func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10; + else + func->enable_timeout = jiffies_to_msecs(HZ); + return 0; } diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 625b92ce9ce..f61fc2d4cd0 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -1,7 +1,7 @@ /* * linux/drivers/mmc/core/sdio_io.c * - * Copyright 2007 Pierre Ossman + * Copyright 2007-2008 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -76,11 +76,7 @@ int sdio_enable_func(struct sdio_func *func) if (ret) goto err; - /* - * FIXME: This should timeout based on information in the CIS, - * but we don't have card to parse that yet. - */ - timeout = jiffies + HZ; + timeout = jiffies + msecs_to_jiffies(func->enable_timeout); while (1) { ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, ®); @@ -167,10 +163,8 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz) return -EINVAL; if (blksz == 0) { - blksz = min(min( - func->max_blksize, - func->card->host->max_blk_size), - 512u); + blksz = min(func->max_blksize, func->card->host->max_blk_size); + blksz = min(blksz, 512u); } ret = mmc_io_rw_direct(func->card, 1, 0, @@ -186,9 +180,116 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz) func->cur_blksize = blksz; return 0; } - EXPORT_SYMBOL_GPL(sdio_set_block_size); +/* + * Calculate the maximum byte mode transfer size + */ +static inline unsigned int sdio_max_byte_size(struct sdio_func *func) +{ + unsigned mval = min(func->card->host->max_seg_size, + func->card->host->max_blk_size); + mval = min(mval, func->max_blksize); + return min(mval, 512u); /* maximum size for byte mode */ +} + +/** + * sdio_align_size - pads a transfer size to a more optimal value + * @func: SDIO function + * @sz: original transfer size + * + * Pads the original data size with a number of extra bytes in + * order to avoid controller bugs and/or performance hits + * (e.g. some controllers revert to PIO for certain sizes). + * + * If possible, it will also adjust the size so that it can be + * handled in just a single request. + * + * Returns the improved size, which might be unmodified. + */ +unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) +{ + unsigned int orig_sz; + unsigned int blk_sz, byte_sz; + unsigned chunk_sz; + + orig_sz = sz; + + /* + * Do a first check with the controller, in case it + * wants to increase the size up to a point where it + * might need more than one block. + */ + sz = mmc_align_data_size(func->card, sz); + + /* + * If we can still do this with just a byte transfer, then + * we're done. + */ + if (sz <= sdio_max_byte_size(func)) + return sz; + + if (func->card->cccr.multi_block) { + /* + * Check if the transfer is already block aligned + */ + if ((sz % func->cur_blksize) == 0) + return sz; + + /* + * Realign it so that it can be done with one request, + * and recheck if the controller still likes it. + */ + blk_sz = ((sz + func->cur_blksize - 1) / + func->cur_blksize) * func->cur_blksize; + blk_sz = mmc_align_data_size(func->card, blk_sz); + + /* + * This value is only good if it is still just + * one request. + */ + if ((blk_sz % func->cur_blksize) == 0) + return blk_sz; + + /* + * We failed to do one request, but at least try to + * pad the remainder properly. + */ + byte_sz = mmc_align_data_size(func->card, + sz % func->cur_blksize); + if (byte_sz <= sdio_max_byte_size(func)) { + blk_sz = sz / func->cur_blksize; + return blk_sz * func->cur_blksize + byte_sz; + } + } else { + /* + * We need multiple requests, so first check that the + * controller can handle the chunk size; + */ + chunk_sz = mmc_align_data_size(func->card, + sdio_max_byte_size(func)); + if (chunk_sz == sdio_max_byte_size(func)) { + /* + * Fix up the size of the remainder (if any) + */ + byte_sz = orig_sz % chunk_sz; + if (byte_sz) { + byte_sz = mmc_align_data_size(func->card, + byte_sz); + } + + return (orig_sz / chunk_sz) * chunk_sz + byte_sz; + } + } + + /* + * The controller is simply incapable of transferring the size + * we want in decent manner, so just return the original size. + */ + return orig_sz; +} +EXPORT_SYMBOL_GPL(sdio_align_size); + /* Split an arbitrarily sized data transfer into several * IO_RW_EXTENDED commands. */ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, @@ -199,14 +300,13 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, int ret; /* Do the bulk of the transfer using block mode (if supported). */ - if (func->card->cccr.multi_block) { + if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { /* Blocks per command is limited by host count, host transfer * size (we only use a single sg entry) and the maximum for * IO_RW_EXTENDED of 511 blocks. */ - max_blocks = min(min( - func->card->host->max_blk_count, - func->card->host->max_seg_size / func->cur_blksize), - 511u); + max_blocks = min(func->card->host->max_blk_count, + func->card->host->max_seg_size / func->cur_blksize); + max_blocks = min(max_blocks, 511u); while (remainder > func->cur_blksize) { unsigned blocks; @@ -231,11 +331,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, /* Write the remainder using byte mode. */ while (remainder > 0) { - size = remainder; - if (size > func->cur_blksize) - size = func->cur_blksize; - if (size > 512) - size = 512; /* maximum size for byte mode */ + size = min(remainder, sdio_max_byte_size(func)); ret = mmc_io_rw_extended(func->card, write, func->num, addr, incr_addr, buf, 1, size); @@ -260,11 +356,10 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, * function. If there is a problem reading the address, 0xff * is returned and @err_ret will contain the error code. */ -unsigned char sdio_readb(struct sdio_func *func, unsigned int addr, - int *err_ret) +u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; - unsigned char val; + u8 val; BUG_ON(!func); @@ -293,8 +388,7 @@ EXPORT_SYMBOL_GPL(sdio_readb); * function. @err_ret will contain the status of the actual * transfer. */ -void sdio_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, - int *err_ret) +void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret) { int ret; @@ -355,7 +449,6 @@ int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr, { return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count); } - EXPORT_SYMBOL_GPL(sdio_readsb); /** @@ -385,8 +478,7 @@ EXPORT_SYMBOL_GPL(sdio_writesb); * function. If there is a problem reading the address, 0xffff * is returned and @err_ret will contain the error code. */ -unsigned short sdio_readw(struct sdio_func *func, unsigned int addr, - int *err_ret) +u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; @@ -400,7 +492,7 @@ unsigned short sdio_readw(struct sdio_func *func, unsigned int addr, return 0xFFFF; } - return le16_to_cpu(*(u16*)func->tmpbuf); + return le16_to_cpup((__le16 *)func->tmpbuf); } EXPORT_SYMBOL_GPL(sdio_readw); @@ -415,12 +507,11 @@ EXPORT_SYMBOL_GPL(sdio_readw); * function. @err_ret will contain the status of the actual * transfer. */ -void sdio_writew(struct sdio_func *func, unsigned short b, unsigned int addr, - int *err_ret) +void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret) { int ret; - *(u16*)func->tmpbuf = cpu_to_le16(b); + *(__le16 *)func->tmpbuf = cpu_to_le16(b); ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2); if (err_ret) @@ -439,8 +530,7 @@ EXPORT_SYMBOL_GPL(sdio_writew); * 0xffffffff is returned and @err_ret will contain the error * code. */ -unsigned long sdio_readl(struct sdio_func *func, unsigned int addr, - int *err_ret) +u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; @@ -454,7 +544,7 @@ unsigned long sdio_readl(struct sdio_func *func, unsigned int addr, return 0xFFFFFFFF; } - return le32_to_cpu(*(u32*)func->tmpbuf); + return le32_to_cpup((__le32 *)func->tmpbuf); } EXPORT_SYMBOL_GPL(sdio_readl); @@ -469,12 +559,11 @@ EXPORT_SYMBOL_GPL(sdio_readl); * function. @err_ret will contain the status of the actual * transfer. */ -void sdio_writel(struct sdio_func *func, unsigned long b, unsigned int addr, - int *err_ret) +void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret) { int ret; - *(u32*)func->tmpbuf = cpu_to_le32(b); + *(__le32 *)func->tmpbuf = cpu_to_le32(b); ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4); if (err_ret) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index dead61754ad..dc6f2579f85 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -26,18 +26,31 @@ config MMC_PXA config MMC_SDHCI tristate "Secure Digital Host Controller Interface support" - depends on PCI + depends on HAS_DMA help - This select the generic Secure Digital Host Controller Interface. + This selects the generic Secure Digital Host Controller Interface. It is used by manufacturers such as Texas Instruments(R), Ricoh(R) and Toshiba(R). Most controllers found in laptops are of this type. + + If you have a controller with this interface, say Y or M here. You + also need to enable an appropriate bus interface. + + If unsure, say N. + +config MMC_SDHCI_PCI + tristate "SDHCI support on PCI bus" + depends on MMC_SDHCI && PCI + help + This selects the PCI Secure Digital Host Controller Interface. + Most controllers found today are PCI devices. + If you have a controller with this interface, say Y or M here. If unsure, say N. config MMC_RICOH_MMC tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL && MMC_SDHCI + depends on MMC_SDHCI_PCI help This selects the disabler for the Ricoh MMC Controller. This proprietary controller is unnecessary because the SDHCI driver @@ -91,6 +104,16 @@ config MMC_AT91 If unsure, say N. +config MMC_ATMELMCI + tristate "Atmel Multimedia Card Interface support" + depends on AVR32 + help + This selects the Atmel Multimedia Card Interface driver. If + you have an AT32 (AVR32) platform with a Multimedia Card + slot, say Y or M here. + + If unsure, say N. + config MMC_IMX tristate "Motorola i.MX Multimedia Card Interface support" depends on ARCH_IMX @@ -130,3 +153,24 @@ config MMC_SPI If unsure, or if your system has no SPI master driver, say N. +config MMC_S3C + tristate "Samsung S3C SD/MMC Card Interface support" + depends on ARCH_S3C2410 && MMC + help + This selects a driver for the MCI interface found in + Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. + If you have a board based on one of those and a MMC/SD + slot, say Y or M here. + + If unsure, say N. + +config MMC_SDRICOH_CS + tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" + depends on EXPERIMENTAL && MMC && PCI && PCMCIA + help + Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA + card whenever you insert a MMC or SD card into the card slot. + + To compile this driver as a module, choose M here: the + module will be called sdricoh_cs. + diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 3877c87e6da..db52eebfb50 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -10,11 +10,15 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o obj-$(CONFIG_MMC_PXA) += pxamci.o obj-$(CONFIG_MMC_IMX) += imxmmc.o obj-$(CONFIG_MMC_SDHCI) += sdhci.o +obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o obj-$(CONFIG_MMC_WBSD) += wbsd.o obj-$(CONFIG_MMC_AU1X) += au1xmmc.o obj-$(CONFIG_MMC_OMAP) += omap.o obj-$(CONFIG_MMC_AT91) += at91_mci.o +obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o obj-$(CONFIG_MMC_SPI) += mmc_spi.o +obj-$(CONFIG_MMC_S3C) += s3cmci.o +obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 8979ad330a4..f15e2064305 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c @@ -125,9 +125,72 @@ struct at91mci_host /* Latest in the scatterlist that has been enabled for transfer */ int transfer_index; + + /* Timer for timeouts */ + struct timer_list timer; }; /* + * Reset the controller and restore most of the state + */ +static void at91_reset_host(struct at91mci_host *host) +{ + unsigned long flags; + u32 mr; + u32 sdcr; + u32 dtor; + u32 imr; + + local_irq_save(flags); + imr = at91_mci_read(host, AT91_MCI_IMR); + + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); + + /* save current state */ + mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; + sdcr = at91_mci_read(host, AT91_MCI_SDCR); + dtor = at91_mci_read(host, AT91_MCI_DTOR); + + /* reset the controller */ + at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); + + /* restore state */ + at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); + at91_mci_write(host, AT91_MCI_MR, mr); + at91_mci_write(host, AT91_MCI_SDCR, sdcr); + at91_mci_write(host, AT91_MCI_DTOR, dtor); + at91_mci_write(host, AT91_MCI_IER, imr); + + /* make sure sdio interrupts will fire */ + at91_mci_read(host, AT91_MCI_SR); + + local_irq_restore(flags); +} + +static void at91_timeout_timer(unsigned long data) +{ + struct at91mci_host *host; + + host = (struct at91mci_host *)data; + + if (host->request) { + dev_err(host->mmc->parent, "Timeout waiting end of packet\n"); + + if (host->cmd && host->cmd->data) { + host->cmd->data->error = -ETIMEDOUT; + } else { + if (host->cmd) + host->cmd->error = -ETIMEDOUT; + else + host->request->cmd->error = -ETIMEDOUT; + } + + at91_reset_host(host); + mmc_request_done(host->mmc, host->request); + } +} + +/* * Copy from sg to a dma block - used for transfers */ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) @@ -135,9 +198,14 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data unsigned int len, i, size; unsigned *dmabuf = host->buffer; - size = host->total_length; + size = data->blksz * data->blocks; len = data->sg_len; + /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */ + if (cpu_is_at91sam9260() || cpu_is_at91sam9263()) + if (host->total_length == 12) + memset(dmabuf, 0, 12); + /* * Just loop through all entries. Size might not * be the entire list though so make sure that @@ -159,9 +227,10 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data for (index = 0; index < (amount / 4); index++) *dmabuf++ = swab32(sgbuffer[index]); - } - else + } else { memcpy(dmabuf, sgbuffer, amount); + dmabuf += amount; + } kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); @@ -233,11 +302,11 @@ static void at91_mci_pre_dma_read(struct at91mci_host *host) if (i == 0) { at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address); - at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4); + at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4); } else { at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address); - at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4); + at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4); } } @@ -277,8 +346,6 @@ static void at91_mci_post_dma_read(struct at91mci_host *host) dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE); - data->bytes_xfered += sg->length; - if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ unsigned int *buffer; int index; @@ -294,6 +361,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host) } flush_dcache_page(sg_page(sg)); + + data->bytes_xfered += sg->length; } /* Is there another transfer to trigger? */ @@ -334,10 +403,32 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host) at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); } else at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); +} + +/* + * Update bytes tranfered count during a write operation + */ +static void at91_mci_update_bytes_xfered(struct at91mci_host *host) +{ + struct mmc_data *data; + + /* always deal with the effective request (and not the current cmd) */ + + if (host->request->cmd && host->request->cmd->error != 0) + return; - data->bytes_xfered = host->total_length; + if (host->request->data) { + data = host->request->data; + if (data->flags & MMC_DATA_WRITE) { + /* card is in IDLE mode now */ + pr_debug("-> bytes_xfered %d, total_length = %d\n", + data->bytes_xfered, host->total_length); + data->bytes_xfered = data->blksz * data->blocks; + } + } } + /*Handle after command sent ready*/ static int at91_mci_handle_cmdrdy(struct at91mci_host *host) { @@ -350,8 +441,7 @@ static int at91_mci_handle_cmdrdy(struct at91mci_host *host) } else return 1; } else if (host->cmd->data->flags & MMC_DATA_WRITE) { /*After sendding multi-block-write command, start DMA transfer*/ - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE); - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); + at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE); at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); } @@ -430,11 +520,19 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command if (data) { - if ( data->blksz & 0x3 ) { - pr_debug("Unsupported block size\n"); - cmd->error = -EINVAL; - mmc_request_done(host->mmc, host->request); - return; + if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) { + if (data->blksz & 0x3) { + pr_debug("Unsupported block size\n"); + cmd->error = -EINVAL; + mmc_request_done(host->mmc, host->request); + return; + } + if (data->flags & MMC_DATA_STREAM) { + pr_debug("Stream commands not supported\n"); + cmd->error = -EINVAL; + mmc_request_done(host->mmc, host->request); + return; + } } block_length = data->blksz; @@ -481,8 +579,16 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command ier = AT91_MCI_CMDRDY; } else { /* zero block length and PDC mode */ - mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; - at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); + mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff; + mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0; + mr |= (block_length << 16); + mr |= AT91_MCI_PDCMODE; + at91_mci_write(host, AT91_MCI_MR, mr); + + if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261())) + at91_mci_write(host, AT91_MCI_BLKR, + AT91_MCI_BLKR_BCNT(blocks) | + AT91_MCI_BLKR_BLKLEN(block_length)); /* * Disable the PDC controller @@ -508,6 +614,13 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command * Handle a write */ host->total_length = block_length * blocks; + /* + * AT91SAM926[0/3] Data Write Operation and + * number of bytes erratum + */ + if (cpu_is_at91sam9260 () || cpu_is_at91sam9263()) + if (host->total_length < 12) + host->total_length = 12; host->buffer = dma_alloc_coherent(NULL, host->total_length, &host->physical_address, GFP_KERNEL); @@ -517,7 +630,9 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command pr_debug("Transmitting %d bytes\n", host->total_length); at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); - at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4); + at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ? + host->total_length : host->total_length / 4); + ier = AT91_MCI_CMDRDY; } } @@ -552,20 +667,26 @@ static void at91_mci_process_next(struct at91mci_host *host) else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { host->flags |= FL_SENT_STOP; at91_mci_send_command(host, host->request->stop); - } - else + } else { + del_timer(&host->timer); + /* the at91rm9200 mci controller hangs after some transfers, + * and the workaround is to reset it after each transfer. + */ + if (cpu_is_at91rm9200()) + at91_reset_host(host); mmc_request_done(host->mmc, host->request); + } } /* * Handle a command that has been completed */ -static void at91_mci_completed_command(struct at91mci_host *host) +static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status) { struct mmc_command *cmd = host->cmd; - unsigned int status; + struct mmc_data *data = cmd->data; - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); @@ -577,25 +698,34 @@ static void at91_mci_completed_command(struct at91mci_host *host) host->buffer = NULL; } - status = at91_mci_read(host, AT91_MCI_SR); - - pr_debug("Status = %08X [%08X %08X %08X %08X]\n", - status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); + pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n", + status, at91_mci_read(host, AT91_MCI_SR), + cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); if (status & AT91_MCI_ERRORS) { if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { cmd->error = 0; } else { - if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE)) - cmd->error = -ETIMEDOUT; - else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE)) - cmd->error = -EILSEQ; - else - cmd->error = -EIO; + if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) { + if (data) { + if (status & AT91_MCI_DTOE) + data->error = -ETIMEDOUT; + else if (status & AT91_MCI_DCRCE) + data->error = -EILSEQ; + } + } else { + if (status & AT91_MCI_RTOE) + cmd->error = -ETIMEDOUT; + else if (status & AT91_MCI_RCRCE) + cmd->error = -EILSEQ; + else + cmd->error = -EIO; + } - pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n", - cmd->error, cmd->opcode, cmd->retries); + pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n", + cmd->error, data ? data->error : 0, + cmd->opcode, cmd->retries); } } else @@ -613,6 +743,8 @@ static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->request = mrq; host->flags = 0; + mod_timer(&host->timer, jiffies + HZ); + at91_mci_process_next(host); } @@ -736,6 +868,7 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) if (int_status & AT91_MCI_NOTBUSY) { pr_debug("Card is ready\n"); + at91_mci_update_bytes_xfered(host); completed = 1; } @@ -744,9 +877,21 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) if (int_status & AT91_MCI_BLKE) { pr_debug("Block transfer has ended\n"); - completed = 1; + if (host->request->data && host->request->data->blocks > 1) { + /* multi block write : complete multi write + * command and send stop */ + completed = 1; + } else { + at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); + } } + if (int_status & AT91_MCI_SDIOIRQA) + mmc_signal_sdio_irq(host->mmc); + + if (int_status & AT91_MCI_SDIOIRQB) + mmc_signal_sdio_irq(host->mmc); + if (int_status & AT91_MCI_TXRDY) pr_debug("Ready to transmit\n"); @@ -761,10 +906,10 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) if (completed) { pr_debug("Completed command\n"); - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); - at91_mci_completed_command(host); + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); + at91_mci_completed_command(host, int_status); } else - at91_mci_write(host, AT91_MCI_IDR, int_status); + at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); return IRQ_HANDLED; } @@ -793,25 +938,33 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host) static int at91_mci_get_ro(struct mmc_host *mmc) { - int read_only = 0; struct at91mci_host *host = mmc_priv(mmc); - if (host->board->wp_pin) { - read_only = gpio_get_value(host->board->wp_pin); - printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc), - (read_only ? "read-only" : "read-write") ); - } - else { - printk(KERN_WARNING "%s: host does not support reading read-only " - "switch. Assuming write-enable.\n", mmc_hostname(mmc)); - } - return read_only; + if (host->board->wp_pin) + return !!gpio_get_value(host->board->wp_pin); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; +} + +static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct at91mci_host *host = mmc_priv(mmc); + + pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc), + host->board->slot_b ? 'B':'A', enable ? "enable" : "disable"); + at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR, + host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA); + } static const struct mmc_host_ops at91_mci_ops = { .request = at91_mci_request, .set_ios = at91_mci_set_ios, .get_ro = at91_mci_get_ro, + .enable_sdio_irq = at91_mci_enable_sdio_irq, }; /* @@ -842,6 +995,7 @@ static int __init at91_mci_probe(struct platform_device *pdev) mmc->f_min = 375000; mmc->f_max = 25000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_SDIO_IRQ; mmc->max_blk_size = 4095; mmc->max_blk_count = mmc->max_req_size; @@ -935,6 +1089,8 @@ static int __init at91_mci_probe(struct platform_device *pdev) mmc_add_host(mmc); + setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); + /* * monitor card insertion/removal if we can */ @@ -995,6 +1151,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev) } at91_mci_disable(host); + del_timer_sync(&host->timer); mmc_remove_host(mmc); free_irq(host->irq, host); diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h new file mode 100644 index 00000000000..a9a5657706c --- /dev/null +++ b/drivers/mmc/host/atmel-mci-regs.h @@ -0,0 +1,91 @@ +/* + * Atmel MultiMedia Card Interface driver + * + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __DRIVERS_MMC_ATMEL_MCI_H__ +#define __DRIVERS_MMC_ATMEL_MCI_H__ + +/* MCI Register Definitions */ +#define MCI_CR 0x0000 /* Control */ +# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ +# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ +# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */ +#define MCI_MR 0x0004 /* Mode */ +# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ +# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ +# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ +#define MCI_DTOR 0x0008 /* Data Timeout */ +# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ +# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ +#define MCI_SDCR 0x000c /* SD Card / SDIO */ +# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ +# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ +# define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */ +# define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */ +#define MCI_ARGR 0x0010 /* Command Argument */ +#define MCI_CMDR 0x0014 /* Command */ +# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ +# define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ +# define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ +# define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ +# define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ +# define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ +# define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ +# define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ +# define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ +# define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ +# define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ +# define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ +# define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ +# define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ +# define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ +# define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ +# define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ +# define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ +# define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ +# define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ +# define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ +# define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ +#define MCI_BLKR 0x0018 /* Block */ +# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */ +# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ +#define MCI_RSPR 0x0020 /* Response 0 */ +#define MCI_RSPR1 0x0024 /* Response 1 */ +#define MCI_RSPR2 0x0028 /* Response 2 */ +#define MCI_RSPR3 0x002c /* Response 3 */ +#define MCI_RDR 0x0030 /* Receive Data */ +#define MCI_TDR 0x0034 /* Transmit Data */ +#define MCI_SR 0x0040 /* Status */ +#define MCI_IER 0x0044 /* Interrupt Enable */ +#define MCI_IDR 0x0048 /* Interrupt Disable */ +#define MCI_IMR 0x004c /* Interrupt Mask */ +# define MCI_CMDRDY ( 1 << 0) /* Command Ready */ +# define MCI_RXRDY ( 1 << 1) /* Receiver Ready */ +# define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */ +# define MCI_BLKE ( 1 << 3) /* Data Block Ended */ +# define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ +# define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ +# define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ +# define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ +# define MCI_RINDE ( 1 << 16) /* Response Index Error */ +# define MCI_RDIRE ( 1 << 17) /* Response Direction Error */ +# define MCI_RCRCE ( 1 << 18) /* Response CRC Error */ +# define MCI_RENDE ( 1 << 19) /* Response End Bit Error */ +# define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */ +# define MCI_DCRCE ( 1 << 21) /* Data CRC Error */ +# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */ +# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */ +# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */ + +/* Register access macros */ +#define mci_readl(port,reg) \ + __raw_readl((port)->regs + MCI_##reg) +#define mci_writel(port,reg,value) \ + __raw_writel((value), (port)->regs + MCI_##reg) + +#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c new file mode 100644 index 00000000000..cce873c5a14 --- /dev/null +++ b/drivers/mmc/host/atmel-mci.c @@ -0,0 +1,981 @@ +/* + * Atmel MultiMedia Card Interface driver + * + * Copyright (C) 2004-2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/blkdev.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> + +#include <linux/mmc/host.h> + +#include <asm/atmel-mci.h> +#include <asm/io.h> +#include <asm/unaligned.h> + +#include <asm/arch/board.h> +#include <asm/arch/gpio.h> + +#include "atmel-mci-regs.h" + +#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) + +enum { + EVENT_CMD_COMPLETE = 0, + EVENT_DATA_ERROR, + EVENT_DATA_COMPLETE, + EVENT_STOP_SENT, + EVENT_STOP_COMPLETE, + EVENT_XFER_COMPLETE, +}; + +struct atmel_mci { + struct mmc_host *mmc; + void __iomem *regs; + + struct scatterlist *sg; + unsigned int pio_offset; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + u32 cmd_status; + u32 data_status; + u32 stop_status; + u32 stop_cmdr; + + u32 mode_reg; + u32 sdc_reg; + + struct tasklet_struct tasklet; + unsigned long pending_events; + unsigned long completed_events; + + int present; + int detect_pin; + int wp_pin; + + /* For detect pin debouncing */ + struct timer_list detect_timer; + + unsigned long bus_hz; + unsigned long mapbase; + struct clk *mck; + struct platform_device *pdev; +}; + +#define atmci_is_completed(host, event) \ + test_bit(event, &host->completed_events) +#define atmci_test_and_clear_pending(host, event) \ + test_and_clear_bit(event, &host->pending_events) +#define atmci_test_and_set_completed(host, event) \ + test_and_set_bit(event, &host->completed_events) +#define atmci_set_completed(host, event) \ + set_bit(event, &host->completed_events) +#define atmci_set_pending(host, event) \ + set_bit(event, &host->pending_events) +#define atmci_clear_pending(host, event) \ + clear_bit(event, &host->pending_events) + + +static void atmci_enable(struct atmel_mci *host) +{ + clk_enable(host->mck); + mci_writel(host, CR, MCI_CR_MCIEN); + mci_writel(host, MR, host->mode_reg); + mci_writel(host, SDCR, host->sdc_reg); +} + +static void atmci_disable(struct atmel_mci *host) +{ + mci_writel(host, CR, MCI_CR_SWRST); + + /* Stall until write is complete, then disable the bus clock */ + mci_readl(host, SR); + clk_disable(host->mck); +} + +static inline unsigned int ns_to_clocks(struct atmel_mci *host, + unsigned int ns) +{ + return (ns * (host->bus_hz / 1000000) + 999) / 1000; +} + +static void atmci_set_timeout(struct atmel_mci *host, + struct mmc_data *data) +{ + static unsigned dtomul_to_shift[] = { + 0, 4, 7, 8, 10, 12, 16, 20 + }; + unsigned timeout; + unsigned dtocyc; + unsigned dtomul; + + timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; + + for (dtomul = 0; dtomul < 8; dtomul++) { + unsigned shift = dtomul_to_shift[dtomul]; + dtocyc = (timeout + (1 << shift) - 1) >> shift; + if (dtocyc < 15) + break; + } + + if (dtomul >= 8) { + dtomul = 7; + dtocyc = 15; + } + + dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n", + dtocyc << dtomul_to_shift[dtomul]); + mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); +} + +/* + * Return mask with command flags to be enabled for this command. + */ +static u32 atmci_prepare_command(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct mmc_data *data; + u32 cmdr; + + cmd->error = -EINPROGRESS; + + cmdr = MCI_CMDR_CMDNB(cmd->opcode); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) + cmdr |= MCI_CMDR_RSPTYP_136BIT; + else + cmdr |= MCI_CMDR_RSPTYP_48BIT; + } + + /* + * This should really be MAXLAT_5 for CMD2 and ACMD41, but + * it's too difficult to determine whether this is an ACMD or + * not. Better make it 64. + */ + cmdr |= MCI_CMDR_MAXLAT_64CYC; + + if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) + cmdr |= MCI_CMDR_OPDCMD; + + data = cmd->data; + if (data) { + cmdr |= MCI_CMDR_START_XFER; + if (data->flags & MMC_DATA_STREAM) + cmdr |= MCI_CMDR_STREAM; + else if (data->blocks > 1) + cmdr |= MCI_CMDR_MULTI_BLOCK; + else + cmdr |= MCI_CMDR_BLOCK; + + if (data->flags & MMC_DATA_READ) + cmdr |= MCI_CMDR_TRDIR_READ; + } + + return cmdr; +} + +static void atmci_start_command(struct atmel_mci *host, + struct mmc_command *cmd, + u32 cmd_flags) +{ + /* Must read host->cmd after testing event flags */ + smp_rmb(); + WARN_ON(host->cmd); + host->cmd = cmd; + + dev_vdbg(&host->mmc->class_dev, + "start command: ARGR=0x%08x CMDR=0x%08x\n", + cmd->arg, cmd_flags); + + mci_writel(host, ARGR, cmd->arg); + mci_writel(host, CMDR, cmd_flags); +} + +static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data) +{ + struct atmel_mci *host = mmc_priv(mmc); + + atmci_start_command(host, data->stop, host->stop_cmdr); + mci_writel(host, IER, MCI_CMDRDY); +} + +static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct atmel_mci *host = mmc_priv(mmc); + + WARN_ON(host->cmd || host->data); + host->mrq = NULL; + + atmci_disable(host); + + mmc_request_done(mmc, mrq); +} + +/* + * Returns a mask of interrupt flags to be enabled after the whole + * request has been prepared. + */ +static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) +{ + struct atmel_mci *host = mmc_priv(mmc); + u32 iflags; + + data->error = -EINPROGRESS; + + WARN_ON(host->data); + host->sg = NULL; + host->data = data; + + mci_writel(host, BLKR, MCI_BCNT(data->blocks) + | MCI_BLKLEN(data->blksz)); + dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", + MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); + + iflags = ATMCI_DATA_ERROR_FLAGS; + host->sg = data->sg; + host->pio_offset = 0; + if (data->flags & MMC_DATA_READ) + iflags |= MCI_RXRDY; + else + iflags |= MCI_TXRDY; + + return iflags; +} + +static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct atmel_mci *host = mmc_priv(mmc); + struct mmc_data *data; + struct mmc_command *cmd; + u32 iflags; + u32 cmdflags = 0; + + iflags = mci_readl(host, IMR); + if (iflags) + dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n", + mci_readl(host, IMR)); + + WARN_ON(host->mrq != NULL); + + /* + * We may "know" the card is gone even though there's still an + * electrical connection. If so, we really need to communicate + * this to the MMC core since there won't be any more + * interrupts as the card is completely removed. Otherwise, + * the MMC core might believe the card is still there even + * though the card was just removed very slowly. + */ + if (!host->present) { + mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + return; + } + + host->mrq = mrq; + host->pending_events = 0; + host->completed_events = 0; + + atmci_enable(host); + + /* We don't support multiple blocks of weird lengths. */ + data = mrq->data; + if (data) { + if (data->blocks > 1 && data->blksz & 3) + goto fail; + atmci_set_timeout(host, data); + } + + iflags = MCI_CMDRDY; + cmd = mrq->cmd; + cmdflags = atmci_prepare_command(mmc, cmd); + atmci_start_command(host, cmd, cmdflags); + + if (data) + iflags |= atmci_submit_data(mmc, data); + + if (mrq->stop) { + host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop); + host->stop_cmdr |= MCI_CMDR_STOP_XFER; + if (!(data->flags & MMC_DATA_WRITE)) + host->stop_cmdr |= MCI_CMDR_TRDIR_READ; + if (data->flags & MMC_DATA_STREAM) + host->stop_cmdr |= MCI_CMDR_STREAM; + else + host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; + } + + /* + * We could have enabled interrupts earlier, but I suspect + * that would open up a nice can of interesting race + * conditions (e.g. command and data complete, but stop not + * prepared yet.) + */ + mci_writel(host, IER, iflags); + + return; + +fail: + atmci_disable(host); + host->mrq = NULL; + mrq->cmd->error = -EINVAL; + mmc_request_done(mmc, mrq); +} + +static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct atmel_mci *host = mmc_priv(mmc); + + if (ios->clock) { + u32 clkdiv; + + /* Set clock rate */ + clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1; + if (clkdiv > 255) { + dev_warn(&mmc->class_dev, + "clock %u too slow; using %lu\n", + ios->clock, host->bus_hz / (2 * 256)); + clkdiv = 255; + } + + host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF + | MCI_MR_RDPROOF; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + host->sdc_reg = 0; + break; + case MMC_BUS_WIDTH_4: + host->sdc_reg = MCI_SDCBUS_4BIT; + break; + } + + switch (ios->power_mode) { + case MMC_POWER_ON: + /* Send init sequence (74 clock cycles) */ + atmci_enable(host); + mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); + while (!(mci_readl(host, SR) & MCI_CMDRDY)) + cpu_relax(); + atmci_disable(host); + break; + default: + /* + * TODO: None of the currently available AVR32-based + * boards allow MMC power to be turned off. Implement + * power control when this can be tested properly. + */ + break; + } +} + +static int atmci_get_ro(struct mmc_host *mmc) +{ + int read_only = 0; + struct atmel_mci *host = mmc_priv(mmc); + + if (host->wp_pin >= 0) { + read_only = gpio_get_value(host->wp_pin); + dev_dbg(&mmc->class_dev, "card is %s\n", + read_only ? "read-only" : "read-write"); + } else { + dev_dbg(&mmc->class_dev, + "no pin for checking read-only switch." + " Assuming write-enable.\n"); + } + + return read_only; +} + +static struct mmc_host_ops atmci_ops = { + .request = atmci_request, + .set_ios = atmci_set_ios, + .get_ro = atmci_get_ro, +}; + +static void atmci_command_complete(struct atmel_mci *host, + struct mmc_command *cmd, u32 status) +{ + /* Read the response from the card (up to 16 bytes) */ + cmd->resp[0] = mci_readl(host, RSPR); + cmd->resp[1] = mci_readl(host, RSPR); + cmd->resp[2] = mci_readl(host, RSPR); + cmd->resp[3] = mci_readl(host, RSPR); + + if (status & MCI_RTOE) + cmd->error = -ETIMEDOUT; + else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) + cmd->error = -EILSEQ; + else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) + cmd->error = -EIO; + else + cmd->error = 0; + + if (cmd->error) { + dev_dbg(&host->mmc->class_dev, + "command error: status=0x%08x\n", status); + + if (cmd->data) { + host->data = NULL; + mci_writel(host, IDR, MCI_NOTBUSY + | MCI_TXRDY | MCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + } + } +} + +static void atmci_detect_change(unsigned long data) +{ + struct atmel_mci *host = (struct atmel_mci *)data; + struct mmc_request *mrq = host->mrq; + int present; + + /* + * atmci_remove() sets detect_pin to -1 before freeing the + * interrupt. We must not re-enable the interrupt if it has + * been freed. + */ + smp_rmb(); + if (host->detect_pin < 0) + return; + + enable_irq(gpio_to_irq(host->detect_pin)); + present = !gpio_get_value(host->detect_pin); + + dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n", + present, host->present); + + if (present != host->present) { + dev_dbg(&host->mmc->class_dev, "card %s\n", + present ? "inserted" : "removed"); + host->present = present; + + /* Reset controller if card is gone */ + if (!present) { + mci_writel(host, CR, MCI_CR_SWRST); + mci_writel(host, IDR, ~0UL); + mci_writel(host, CR, MCI_CR_MCIEN); + } + + /* Clean up queue if present */ + if (mrq) { + /* + * Reset controller to terminate any ongoing + * commands or data transfers. + */ + mci_writel(host, CR, MCI_CR_SWRST); + + if (!atmci_is_completed(host, EVENT_CMD_COMPLETE)) + mrq->cmd->error = -ENOMEDIUM; + + if (mrq->data && !atmci_is_completed(host, + EVENT_DATA_COMPLETE)) { + host->data = NULL; + mrq->data->error = -ENOMEDIUM; + } + if (mrq->stop && !atmci_is_completed(host, + EVENT_STOP_COMPLETE)) + mrq->stop->error = -ENOMEDIUM; + + host->cmd = NULL; + atmci_request_end(host->mmc, mrq); + } + + mmc_detect_change(host->mmc, 0); + } +} + +static void atmci_tasklet_func(unsigned long priv) +{ + struct mmc_host *mmc = (struct mmc_host *)priv; + struct atmel_mci *host = mmc_priv(mmc); + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = host->data; + + dev_vdbg(&mmc->class_dev, + "tasklet: pending/completed/mask %lx/%lx/%x\n", + host->pending_events, host->completed_events, + mci_readl(host, IMR)); + + if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) { + /* + * host->cmd must be set to NULL before the interrupt + * handler sees EVENT_CMD_COMPLETE + */ + host->cmd = NULL; + smp_wmb(); + atmci_set_completed(host, EVENT_CMD_COMPLETE); + atmci_command_complete(host, mrq->cmd, host->cmd_status); + + if (!mrq->cmd->error && mrq->stop + && atmci_is_completed(host, EVENT_XFER_COMPLETE) + && !atmci_test_and_set_completed(host, + EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, mrq->data); + } + if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) { + /* + * host->cmd must be set to NULL before the interrupt + * handler sees EVENT_STOP_COMPLETE + */ + host->cmd = NULL; + smp_wmb(); + atmci_set_completed(host, EVENT_STOP_COMPLETE); + atmci_command_complete(host, mrq->stop, host->stop_status); + } + if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) { + u32 status = host->data_status; + + dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status); + + atmci_set_completed(host, EVENT_DATA_ERROR); + atmci_set_completed(host, EVENT_DATA_COMPLETE); + + if (status & MCI_DTOE) { + dev_dbg(&mmc->class_dev, + "data timeout error\n"); + data->error = -ETIMEDOUT; + } else if (status & MCI_DCRCE) { + dev_dbg(&mmc->class_dev, "data CRC error\n"); + data->error = -EILSEQ; + } else { + dev_dbg(&mmc->class_dev, + "data FIFO error (status=%08x)\n", + status); + data->error = -EIO; + } + + if (host->present && data->stop + && atmci_is_completed(host, EVENT_CMD_COMPLETE) + && !atmci_test_and_set_completed( + host, EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, data); + + host->data = NULL; + } + if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) { + atmci_set_completed(host, EVENT_DATA_COMPLETE); + + if (!atmci_is_completed(host, EVENT_DATA_ERROR)) { + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + } + + host->data = NULL; + } + + if (host->mrq && !host->cmd && !host->data) + atmci_request_end(mmc, host->mrq); +} + +static void atmci_read_data_pio(struct atmel_mci *host) +{ + struct scatterlist *sg = host->sg; + void *buf = sg_virt(sg); + unsigned int offset = host->pio_offset; + struct mmc_data *data = host->data; + u32 value; + u32 status; + unsigned int nbytes = 0; + + do { + value = mci_readl(host, RDR); + if (likely(offset + 4 <= sg->length)) { + put_unaligned(value, (u32 *)(buf + offset)); + + offset += 4; + nbytes += 4; + + if (offset == sg->length) { + host->sg = sg = sg_next(sg); + if (!sg) + goto done; + + offset = 0; + buf = sg_virt(sg); + } + } else { + unsigned int remaining = sg->length - offset; + memcpy(buf + offset, &value, remaining); + nbytes += remaining; + + flush_dcache_page(sg_page(sg)); + host->sg = sg = sg_next(sg); + if (!sg) + goto done; + + offset = 4 - remaining; + buf = sg_virt(sg); + memcpy(buf, (u8 *)&value + remaining, offset); + nbytes += offset; + } + + status = mci_readl(host, SR); + if (status & ATMCI_DATA_ERROR_FLAGS) { + mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS)); + host->data_status = status; + atmci_set_pending(host, EVENT_DATA_ERROR); + tasklet_schedule(&host->tasklet); + break; + } + } while (status & MCI_RXRDY); + + host->pio_offset = offset; + data->bytes_xfered += nbytes; + + return; + +done: + mci_writel(host, IDR, MCI_RXRDY); + mci_writel(host, IER, MCI_NOTBUSY); + data->bytes_xfered += nbytes; + atmci_set_completed(host, EVENT_XFER_COMPLETE); + if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) + && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, data); +} + +static void atmci_write_data_pio(struct atmel_mci *host) +{ + struct scatterlist *sg = host->sg; + void *buf = sg_virt(sg); + unsigned int offset = host->pio_offset; + struct mmc_data *data = host->data; + u32 value; + u32 status; + unsigned int nbytes = 0; + + do { + if (likely(offset + 4 <= sg->length)) { + value = get_unaligned((u32 *)(buf + offset)); + mci_writel(host, TDR, value); + + offset += 4; + nbytes += 4; + if (offset == sg->length) { + host->sg = sg = sg_next(sg); + if (!sg) + goto done; + + offset = 0; + buf = sg_virt(sg); + } + } else { + unsigned int remaining = sg->length - offset; + + value = 0; + memcpy(&value, buf + offset, remaining); + nbytes += remaining; + + host->sg = sg = sg_next(sg); + if (!sg) { + mci_writel(host, TDR, value); + goto done; + } + + offset = 4 - remaining; + buf = sg_virt(sg); + memcpy((u8 *)&value + remaining, buf, offset); + mci_writel(host, TDR, value); + nbytes += offset; + } + + status = mci_readl(host, SR); + if (status & ATMCI_DATA_ERROR_FLAGS) { + mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY + | ATMCI_DATA_ERROR_FLAGS)); + host->data_status = status; + atmci_set_pending(host, EVENT_DATA_ERROR); + tasklet_schedule(&host->tasklet); + break; + } + } while (status & MCI_TXRDY); + + host->pio_offset = offset; + data->bytes_xfered += nbytes; + + return; + +done: + mci_writel(host, IDR, MCI_TXRDY); + mci_writel(host, IER, MCI_NOTBUSY); + data->bytes_xfered += nbytes; + atmci_set_completed(host, EVENT_XFER_COMPLETE); + if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) + && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, data); +} + +static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status) +{ + struct atmel_mci *host = mmc_priv(mmc); + + mci_writel(host, IDR, MCI_CMDRDY); + + if (atmci_is_completed(host, EVENT_STOP_SENT)) { + host->stop_status = status; + atmci_set_pending(host, EVENT_STOP_COMPLETE); + } else { + host->cmd_status = status; + atmci_set_pending(host, EVENT_CMD_COMPLETE); + } + + tasklet_schedule(&host->tasklet); +} + +static irqreturn_t atmci_interrupt(int irq, void *dev_id) +{ + struct mmc_host *mmc = dev_id; + struct atmel_mci *host = mmc_priv(mmc); + u32 status, mask, pending; + unsigned int pass_count = 0; + + spin_lock(&mmc->lock); + + do { + status = mci_readl(host, SR); + mask = mci_readl(host, IMR); + pending = status & mask; + if (!pending) + break; + + if (pending & ATMCI_DATA_ERROR_FLAGS) { + mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS + | MCI_RXRDY | MCI_TXRDY); + pending &= mci_readl(host, IMR); + host->data_status = status; + atmci_set_pending(host, EVENT_DATA_ERROR); + tasklet_schedule(&host->tasklet); + } + if (pending & MCI_NOTBUSY) { + mci_writel(host, IDR, (MCI_NOTBUSY + | ATMCI_DATA_ERROR_FLAGS)); + atmci_set_pending(host, EVENT_DATA_COMPLETE); + tasklet_schedule(&host->tasklet); + } + if (pending & MCI_RXRDY) + atmci_read_data_pio(host); + if (pending & MCI_TXRDY) + atmci_write_data_pio(host); + + if (pending & MCI_CMDRDY) + atmci_cmd_interrupt(mmc, status); + } while (pass_count++ < 5); + + spin_unlock(&mmc->lock); + + return pass_count ? IRQ_HANDLED : IRQ_NONE; +} + +static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) +{ + struct mmc_host *mmc = dev_id; + struct atmel_mci *host = mmc_priv(mmc); + + /* + * Disable interrupts until the pin has stabilized and check + * the state then. Use mod_timer() since we may be in the + * middle of the timer routine when this interrupt triggers. + */ + disable_irq_nosync(irq); + mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20)); + + return IRQ_HANDLED; +} + +static int __init atmci_probe(struct platform_device *pdev) +{ + struct mci_platform_data *pdata; + struct atmel_mci *host; + struct mmc_host *mmc; + struct resource *regs; + int irq; + int ret; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + pdata = pdev->dev.platform_data; + if (!pdata) + return -ENXIO; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->pdev = pdev; + host->mmc = mmc; + host->detect_pin = pdata->detect_pin; + host->wp_pin = pdata->wp_pin; + + host->mck = clk_get(&pdev->dev, "mci_clk"); + if (IS_ERR(host->mck)) { + ret = PTR_ERR(host->mck); + goto err_clk_get; + } + + ret = -ENOMEM; + host->regs = ioremap(regs->start, regs->end - regs->start + 1); + if (!host->regs) + goto err_ioremap; + + clk_enable(host->mck); + mci_writel(host, CR, MCI_CR_SWRST); + host->bus_hz = clk_get_rate(host->mck); + clk_disable(host->mck); + + host->mapbase = regs->start; + + mmc->ops = &atmci_ops; + mmc->f_min = (host->bus_hz + 511) / 512; + mmc->f_max = host->bus_hz / 2; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps |= MMC_CAP_4_BIT_DATA; + + mmc->max_hw_segs = 64; + mmc->max_phys_segs = 64; + mmc->max_req_size = 32768 * 512; + mmc->max_blk_size = 32768; + mmc->max_blk_count = 512; + + tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc); + + ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc); + if (ret) + goto err_request_irq; + + /* Assume card is present if we don't have a detect pin */ + host->present = 1; + if (host->detect_pin >= 0) { + if (gpio_request(host->detect_pin, "mmc_detect")) { + dev_dbg(&mmc->class_dev, "no detect pin available\n"); + host->detect_pin = -1; + } else { + host->present = !gpio_get_value(host->detect_pin); + } + } + if (host->wp_pin >= 0) { + if (gpio_request(host->wp_pin, "mmc_wp")) { + dev_dbg(&mmc->class_dev, "no WP pin available\n"); + host->wp_pin = -1; + } + } + + platform_set_drvdata(pdev, host); + + mmc_add_host(mmc); + + if (host->detect_pin >= 0) { + setup_timer(&host->detect_timer, atmci_detect_change, + (unsigned long)host); + + ret = request_irq(gpio_to_irq(host->detect_pin), + atmci_detect_interrupt, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "mmc-detect", mmc); + if (ret) { + dev_dbg(&mmc->class_dev, + "could not request IRQ %d for detect pin\n", + gpio_to_irq(host->detect_pin)); + gpio_free(host->detect_pin); + host->detect_pin = -1; + } + } + + dev_info(&mmc->class_dev, + "Atmel MCI controller at 0x%08lx irq %d\n", + host->mapbase, irq); + + return 0; + +err_request_irq: + iounmap(host->regs); +err_ioremap: + clk_put(host->mck); +err_clk_get: + mmc_free_host(mmc); + return ret; +} + +static int __exit atmci_remove(struct platform_device *pdev) +{ + struct atmel_mci *host = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + if (host) { + if (host->detect_pin >= 0) { + int pin = host->detect_pin; + + /* Make sure the timer doesn't enable the interrupt */ + host->detect_pin = -1; + smp_wmb(); + + free_irq(gpio_to_irq(pin), host->mmc); + del_timer_sync(&host->detect_timer); + gpio_free(pin); + } + + mmc_remove_host(host->mmc); + + clk_enable(host->mck); + mci_writel(host, IDR, ~0UL); + mci_writel(host, CR, MCI_CR_MCIDIS); + mci_readl(host, SR); + clk_disable(host->mck); + + if (host->wp_pin >= 0) + gpio_free(host->wp_pin); + + free_irq(platform_get_irq(pdev, 0), host->mmc); + iounmap(host->regs); + + clk_put(host->mck); + + mmc_free_host(host->mmc); + } + return 0; +} + +static struct platform_driver atmci_driver = { + .remove = __exit_p(atmci_remove), + .driver = { + .name = "atmel_mci", + }, +}; + +static int __init atmci_init(void) +{ + return platform_driver_probe(&atmci_driver, atmci_probe); +} + +static void __exit atmci_exit(void) +{ + platform_driver_unregister(&atmci_driver); +} + +module_init(atmci_init); +module_exit(atmci_exit); + +MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); +MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index cc5f7bc546a..3f15eb20489 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -21,7 +21,7 @@ * published by the Free Software Foundation. */ -/* Why is a timer used to detect insert events? +/* Why don't we use the SD controllers' carddetect feature? * * From the AU1100 MMC application guide: * If the Au1100-based design is intended to support both MultiMediaCards @@ -30,8 +30,6 @@ * In doing so, a MMC card never enters SPI-mode communications, * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective * (the low to high transition will not occur). - * - * So we use the timer to check the status manually. */ #include <linux/module.h> @@ -41,51 +39,110 @@ #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> - +#include <linux/leds.h> #include <linux/mmc/host.h> + #include <asm/io.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1100_mmc.h> -#include <au1xxx.h> -#include "au1xmmc.h" - #define DRIVER_NAME "au1xxx-mmc" /* Set this to enable special debugging macros */ +/* #define DEBUG */ #ifdef DEBUG -#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args) +#define DBG(fmt, idx, args...) \ + printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args) #else -#define DBG(fmt, idx, args...) +#define DBG(fmt, idx, args...) do {} while (0) #endif -const struct { +/* Hardware definitions */ +#define AU1XMMC_DESCRIPTOR_COUNT 1 +#define AU1XMMC_DESCRIPTOR_SIZE 2048 + +#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ + MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ + MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) + +/* This gives us a hard value for the stop command that we can write directly + * to the command register. + */ +#define STOP_CMD \ + (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO) + +/* This is the set of interrupts that we configure by default. */ +#define AU1XMMC_INTERRUPTS \ + (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \ + SD_CONFIG_CR | SD_CONFIG_I) + +/* The poll event (looking for insert/remove events runs twice a second. */ +#define AU1XMMC_DETECT_TIMEOUT (HZ/2) + +struct au1xmmc_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + + u32 flags; u32 iobase; - u32 tx_devid, rx_devid; - u16 bcsrpwr; - u16 bcsrstatus; - u16 wpstatus; -} au1xmmc_card_table[] = { - { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0, - BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP }, -#ifndef CONFIG_MIPS_DB1200 - { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1, - BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP } -#endif -}; + u32 clock; + u32 bus_width; + u32 power_mode; -#define AU1XMMC_CONTROLLER_COUNT (ARRAY_SIZE(au1xmmc_card_table)) + int status; -/* This array stores pointers for the hosts (used by the IRQ handler) */ -struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT]; -static int dma = 1; + struct { + int len; + int dir; + } dma; -#ifdef MODULE -module_param(dma, bool, 0); -MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)"); -#endif + struct { + int index; + int offset; + int len; + } pio; + + u32 tx_chan; + u32 rx_chan; + + int irq; + + struct tasklet_struct finish_task; + struct tasklet_struct data_task; + struct au1xmmc_platform_data *platdata; + struct platform_device *pdev; + struct resource *ioarea; +}; + +/* Status flags used by the host structure */ +#define HOST_F_XMIT 0x0001 +#define HOST_F_RECV 0x0002 +#define HOST_F_DMA 0x0010 +#define HOST_F_ACTIVE 0x0100 +#define HOST_F_STOP 0x1000 + +#define HOST_S_IDLE 0x0001 +#define HOST_S_CMD 0x0002 +#define HOST_S_DATA 0x0003 +#define HOST_S_STOP 0x0004 + +/* Easy access macros */ +#define HOST_STATUS(h) ((h)->iobase + SD_STATUS) +#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) +#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) +#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) +#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) +#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) +#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) +#define HOST_CMD(h) ((h)->iobase + SD_CMD) +#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) +#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) +#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) + +#define DMA_CHANNEL(h) \ + (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) { @@ -119,14 +176,13 @@ static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask) static inline void SEND_STOP(struct au1xmmc_host *host) { - - /* We know the value of CONFIG2, so avoid a read we don't need */ - u32 mask = SD_CONFIG2_EN; + u32 config2; WARN_ON(host->status != HOST_S_DATA); host->status = HOST_S_STOP; - au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host)); + config2 = au_readl(HOST_CONFIG2(host)); + au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); au_sync(); /* Send the stop commmand */ @@ -135,35 +191,36 @@ static inline void SEND_STOP(struct au1xmmc_host *host) static void au1xmmc_set_power(struct au1xmmc_host *host, int state) { - - u32 val = au1xmmc_card_table[host->id].bcsrpwr; - - bcsr->board &= ~val; - if (state) bcsr->board |= val; - - au_sync_delay(1); + if (host->platdata && host->platdata->set_power) + host->platdata->set_power(host->mmc, state); } -static inline int au1xmmc_card_inserted(struct au1xmmc_host *host) +static int au1xmmc_card_inserted(struct mmc_host *mmc) { - return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus) - ? 1 : 0; + struct au1xmmc_host *host = mmc_priv(mmc); + + if (host->platdata && host->platdata->card_inserted) + return !!host->platdata->card_inserted(host->mmc); + + return -ENOSYS; } static int au1xmmc_card_readonly(struct mmc_host *mmc) { struct au1xmmc_host *host = mmc_priv(mmc); - return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) - ? 1 : 0; + + if (host->platdata && host->platdata->card_readonly) + return !!host->platdata->card_readonly(mmc); + + return -ENOSYS; } static void au1xmmc_finish_request(struct au1xmmc_host *host) { - struct mmc_request *mrq = host->mrq; host->mrq = NULL; - host->flags &= HOST_F_ACTIVE; + host->flags &= HOST_F_ACTIVE | HOST_F_DMA; host->dma.len = 0; host->dma.dir = 0; @@ -174,8 +231,6 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host) host->status = HOST_S_IDLE; - bcsr->disk_leds |= (1 << 8); - mmc_request_done(host->mmc, mrq); } @@ -235,18 +290,14 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, au_sync(); /* Wait for the command to go on the line */ - - while(1) { - if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO)) - break; - } + while (au_readl(HOST_CMD(host)) & SD_CMD_GO) + /* nop */; /* Wait for the command to come back */ - if (wait) { u32 status = au_readl(HOST_STATUS(host)); - while(!(status & SD_STATUS_CR)) + while (!(status & SD_STATUS_CR)) status = au_readl(HOST_STATUS(host)); /* Clear the CR status */ @@ -260,12 +311,11 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) { - struct mmc_request *mrq = host->mrq; struct mmc_data *data; u32 crc; - WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP); + WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP)); if (host->mrq == NULL) return; @@ -276,15 +326,13 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) status = au_readl(HOST_STATUS(host)); /* The transaction is really over when the SD_STATUS_DB bit is clear */ - - while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) + while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) status = au_readl(HOST_STATUS(host)); data->error = 0; dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); /* Process any errors */ - crc = (status & (SD_STATUS_WC | SD_STATUS_RC)); if (host->flags & HOST_F_XMIT) crc |= ((status & 0x07) == 0x02) ? 0 : 1; @@ -299,16 +347,16 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) if (!data->error) { if (host->flags & HOST_F_DMA) { +#ifdef CONFIG_SOC_AU1200 /* DBDMA */ u32 chan = DMA_CHANNEL(host); - chan_tab_t *c = *((chan_tab_t **) chan); + chan_tab_t *c = *((chan_tab_t **)chan); au1x_dma_chan_t *cp = c->chan_ptr; data->bytes_xfered = cp->ddma_bytecnt; - } - else +#endif + } else data->bytes_xfered = - (data->blocks * data->blksz) - - host->pio.len; + (data->blocks * data->blksz) - host->pio.len; } au1xmmc_finish_request(host); @@ -316,7 +364,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) static void au1xmmc_tasklet_data(unsigned long param) { - struct au1xmmc_host *host = (struct au1xmmc_host *) param; + struct au1xmmc_host *host = (struct au1xmmc_host *)param; u32 status = au_readl(HOST_STATUS(host)); au1xmmc_data_complete(host, status); @@ -326,11 +374,10 @@ static void au1xmmc_tasklet_data(unsigned long param) static void au1xmmc_send_pio(struct au1xmmc_host *host) { - - struct mmc_data *data = 0; - int sg_len, max, count = 0; - unsigned char *sg_ptr; - u32 status = 0; + struct mmc_data *data; + int sg_len, max, count; + unsigned char *sg_ptr, val; + u32 status; struct scatterlist *sg; data = host->mrq->data; @@ -345,14 +392,12 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) /* This is the space left inside the buffer */ sg_len = data->sg[host->pio.index].length - host->pio.offset; - /* Check to if we need less then the size of the sg_buffer */ - + /* Check if we need less than the size of the sg_buffer */ max = (sg_len > host->pio.len) ? host->pio.len : sg_len; - if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; - - for(count = 0; count < max; count++ ) { - unsigned char val; + if (max > AU1XMMC_MAX_TRANSFER) + max = AU1XMMC_MAX_TRANSFER; + for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_TH)) @@ -360,7 +405,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) val = *sg_ptr++; - au_writel((unsigned long) val, HOST_TXPORT(host)); + au_writel((unsigned long)val, HOST_TXPORT(host)); au_sync(); } @@ -384,11 +429,10 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) static void au1xmmc_receive_pio(struct au1xmmc_host *host) { - - struct mmc_data *data = 0; - int sg_len = 0, max = 0, count = 0; - unsigned char *sg_ptr = 0; - u32 status = 0; + struct mmc_data *data; + int max, count, sg_len = 0; + unsigned char *sg_ptr = NULL; + u32 status, val; struct scatterlist *sg; data = host->mrq->data; @@ -405,33 +449,33 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) /* This is the space left inside the buffer */ sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; - /* Check to if we need less then the size of the sg_buffer */ - if (sg_len < max) max = sg_len; + /* Check if we need less than the size of the sg_buffer */ + if (sg_len < max) + max = sg_len; } if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; - for(count = 0; count < max; count++ ) { - u32 val; + for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_NE)) break; if (status & SD_STATUS_RC) { - DBG("RX CRC Error [%d + %d].\n", host->id, + DBG("RX CRC Error [%d + %d].\n", host->pdev->id, host->pio.len, count); break; } if (status & SD_STATUS_RO) { - DBG("RX Overrun [%d + %d]\n", host->id, + DBG("RX Overrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } else if (status & SD_STATUS_RU) { - DBG("RX Underrun [%d + %d]\n", host->id, + DBG("RX Underrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } @@ -439,7 +483,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) val = au_readl(HOST_RXPORT(host)); if (sg_ptr) - *sg_ptr++ = (unsigned char) (val & 0xFF); + *sg_ptr++ = (unsigned char)(val & 0xFF); } host->pio.len -= count; @@ -451,7 +495,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) } if (host->pio.len == 0) { - //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); + /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */ IRQ_OFF(host, SD_CONFIG_NE); if (host->flags & HOST_F_STOP) @@ -461,17 +505,15 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) } } -/* static void au1xmmc_cmd_complete - This is called when a command has been completed - grab the response - and check for errors. Then start the data transfer if it is indicated. -*/ - +/* This is called when a command has been completed - grab the response + * and check for errors. Then start the data transfer if it is indicated. + */ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) { - struct mmc_request *mrq = host->mrq; struct mmc_command *cmd; - int trans; + u32 r[4]; + int i, trans; if (!host->mrq) return; @@ -481,9 +523,6 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { - u32 r[4]; - int i; - r[0] = au_readl(host->iobase + SD_RESP3); r[1] = au_readl(host->iobase + SD_RESP2); r[2] = au_readl(host->iobase + SD_RESP1); @@ -491,10 +530,9 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) /* The CRC is omitted from the response, so really * we only got 120 bytes, but the engine expects - * 128 bits, so we have to shift things up + * 128 bits, so we have to shift things up. */ - - for(i = 0; i < 4; i++) { + for (i = 0; i < 4; i++) { cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; if (i != 3) cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; @@ -505,22 +543,20 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) * our response omits the CRC, our data ends up * being shifted 8 bits to the right. In this case, * that means that the OSR data starts at bit 31, - * so we can just read RESP0 and return that + * so we can just read RESP0 and return that. */ cmd->resp[0] = au_readl(host->iobase + SD_RESP0); } } /* Figure out errors */ - if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC)) cmd->error = -EILSEQ; trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); if (!trans || cmd->error) { - - IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF); + IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); tasklet_schedule(&host->finish_task); return; } @@ -528,6 +564,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) host->status = HOST_S_DATA; if (host->flags & HOST_F_DMA) { +#ifdef CONFIG_SOC_AU1200 /* DBDMA */ u32 channel = DMA_CHANNEL(host); /* Start the DMA as soon as the buffer gets something in it */ @@ -540,23 +577,21 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) } au1xxx_dbdma_start(channel); +#endif } } static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) { - unsigned int pbus = get_au1x00_speed(); unsigned int divisor; u32 config; /* From databook: - divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 - */ - + * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 + */ pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2); pbus /= 2; - divisor = ((pbus / rate) / 2) - 1; config = au_readl(HOST_CONFIG(host)); @@ -568,15 +603,11 @@ static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) au_sync(); } -static int -au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) +static int au1xmmc_prepare_data(struct au1xmmc_host *host, + struct mmc_data *data) { - int datalen = data->blocks * data->blksz; - if (dma != 0) - host->flags |= HOST_F_DMA; - if (data->flags & MMC_DATA_READ) host->flags |= HOST_F_RECV; else @@ -596,12 +627,13 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) au_writel(data->blksz - 1, HOST_BLKSIZE(host)); if (host->flags & HOST_F_DMA) { +#ifdef CONFIG_SOC_AU1200 /* DBDMA */ int i; u32 channel = DMA_CHANNEL(host); au1xxx_dbdma_stop(channel); - for(i = 0; i < host->dma.len; i++) { + for (i = 0; i < host->dma.len; i++) { u32 ret = 0, flags = DDMA_FLAGS_NOIE; struct scatterlist *sg = &data->sg[i]; int sg_len = sg->length; @@ -611,23 +643,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) if (i == host->dma.len - 1) flags = DDMA_FLAGS_IE; - if (host->flags & HOST_F_XMIT){ - ret = au1xxx_dbdma_put_source_flags(channel, - (void *) sg_virt(sg), len, flags); - } - else { - ret = au1xxx_dbdma_put_dest_flags(channel, - (void *) sg_virt(sg), - len, flags); + if (host->flags & HOST_F_XMIT) { + ret = au1xxx_dbdma_put_source_flags(channel, + (void *)sg_virt(sg), len, flags); + } else { + ret = au1xxx_dbdma_put_dest_flags(channel, + (void *)sg_virt(sg), len, flags); } - if (!ret) + if (!ret) goto dataerr; datalen -= len; } - } - else { +#endif + } else { host->pio.index = 0; host->pio.offset = 0; host->pio.len = datalen; @@ -636,25 +666,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) IRQ_ON(host, SD_CONFIG_TH); else IRQ_ON(host, SD_CONFIG_NE); - //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF); + /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */ } return 0; - dataerr: - dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir); +dataerr: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + host->dma.dir); return -ETIMEDOUT; } -/* static void au1xmmc_request - This actually starts a command or data transaction -*/ - +/* This actually starts a command or data transaction */ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) { - struct au1xmmc_host *host = mmc_priv(mmc); - unsigned int flags = 0; int ret = 0; WARN_ON(irqs_disabled()); @@ -663,11 +689,15 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) host->mrq = mrq; host->status = HOST_S_CMD; - bcsr->disk_leds &= ~(1 << 8); + /* fail request immediately if no card is present */ + if (0 == au1xmmc_card_inserted(mmc)) { + mrq->cmd->error = -ENOMEDIUM; + au1xmmc_finish_request(host); + return; + } if (mrq->data) { FLUSH_FIFO(host); - flags = mrq->data->flags; ret = au1xmmc_prepare_data(host, mrq->data); } @@ -682,7 +712,6 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) static void au1xmmc_reset_controller(struct au1xmmc_host *host) { - /* Apply the clock */ au_writel(SD_ENABLE_CE, HOST_ENABLE(host)); au_sync_delay(1); @@ -712,9 +741,10 @@ static void au1xmmc_reset_controller(struct au1xmmc_host *host) } -static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) +static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct au1xmmc_host *host = mmc_priv(mmc); + u32 config2; if (ios->power_mode == MMC_POWER_OFF) au1xmmc_set_power(host, 0); @@ -726,21 +756,18 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) au1xmmc_set_clock(host, ios->clock); host->clock = ios->clock; } -} - -static void au1xmmc_dma_callback(int irq, void *dev_id) -{ - struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id; - - /* Avoid spurious interrupts */ - if (!host->mrq) - return; - - if (host->flags & HOST_F_STOP) - SEND_STOP(host); - - tasklet_schedule(&host->data_task); + config2 = au_readl(HOST_CONFIG2(host)); + switch (ios->bus_width) { + case MMC_BUS_WIDTH_4: + config2 |= SD_CONFIG2_WB; + break; + case MMC_BUS_WIDTH_1: + config2 &= ~SD_CONFIG2_WB; + break; + } + au_writel(config2, HOST_CONFIG2(host)); + au_sync(); } #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) @@ -749,245 +776,354 @@ static void au1xmmc_dma_callback(int irq, void *dev_id) static irqreturn_t au1xmmc_irq(int irq, void *dev_id) { - + struct au1xmmc_host *host = dev_id; u32 status; - int i, ret = 0; - - disable_irq(AU1100_SD_IRQ); - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { - struct au1xmmc_host * host = au1xmmc_hosts[i]; - u32 handled = 1; + status = au_readl(HOST_STATUS(host)); - status = au_readl(HOST_STATUS(host)); + if (!(status & SD_STATUS_I)) + return IRQ_NONE; /* not ours */ - if (host->mrq && (status & STATUS_TIMEOUT)) { - if (status & SD_STATUS_RAT) - host->mrq->cmd->error = -ETIMEDOUT; + if (status & SD_STATUS_SI) /* SDIO */ + mmc_signal_sdio_irq(host->mmc); - else if (status & SD_STATUS_DT) - host->mrq->data->error = -ETIMEDOUT; + if (host->mrq && (status & STATUS_TIMEOUT)) { + if (status & SD_STATUS_RAT) + host->mrq->cmd->error = -ETIMEDOUT; + else if (status & SD_STATUS_DT) + host->mrq->data->error = -ETIMEDOUT; - /* In PIO mode, interrupts might still be enabled */ - IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); + /* In PIO mode, interrupts might still be enabled */ + IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); - //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF); - tasklet_schedule(&host->finish_task); - } + /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */ + tasklet_schedule(&host->finish_task); + } #if 0 - else if (status & SD_STATUS_DD) { - - /* Sometimes we get a DD before a NE in PIO mode */ - - if (!(host->flags & HOST_F_DMA) && - (status & SD_STATUS_NE)) - au1xmmc_receive_pio(host); - else { - au1xmmc_data_complete(host, status); - //tasklet_schedule(&host->data_task); - } + else if (status & SD_STATUS_DD) { + /* Sometimes we get a DD before a NE in PIO mode */ + if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE)) + au1xmmc_receive_pio(host); + else { + au1xmmc_data_complete(host, status); + /* tasklet_schedule(&host->data_task); */ } + } #endif - else if (status & (SD_STATUS_CR)) { - if (host->status == HOST_S_CMD) - au1xmmc_cmd_complete(host,status); - } - else if (!(host->flags & HOST_F_DMA)) { - if ((host->flags & HOST_F_XMIT) && - (status & STATUS_DATA_OUT)) - au1xmmc_send_pio(host); - else if ((host->flags & HOST_F_RECV) && - (status & STATUS_DATA_IN)) - au1xmmc_receive_pio(host); - } - else if (status & 0x203FBC70) { - DBG("Unhandled status %8.8x\n", host->id, status); - handled = 0; - } - - au_writel(status, HOST_STATUS(host)); - au_sync(); - - ret |= handled; + else if (status & SD_STATUS_CR) { + if (host->status == HOST_S_CMD) + au1xmmc_cmd_complete(host, status); + + } else if (!(host->flags & HOST_F_DMA)) { + if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT)) + au1xmmc_send_pio(host); + else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN)) + au1xmmc_receive_pio(host); + + } else if (status & 0x203F3C70) { + DBG("Unhandled status %8.8x\n", host->pdev->id, + status); } - enable_irq(AU1100_SD_IRQ); - return ret; + au_writel(status, HOST_STATUS(host)); + au_sync(); + + return IRQ_HANDLED; } -static void au1xmmc_poll_event(unsigned long arg) -{ - struct au1xmmc_host *host = (struct au1xmmc_host *) arg; +#ifdef CONFIG_SOC_AU1200 +/* 8bit memory DMA device */ +static dbdev_tab_t au1xmmc_mem_dbdev = { + .dev_id = DSCR_CMD0_ALWAYS, + .dev_flags = DEV_FLAGS_ANYUSE, + .dev_tsize = 0, + .dev_devwidth = 8, + .dev_physaddr = 0x00000000, + .dev_intlevel = 0, + .dev_intpolarity = 0, +}; +static int memid; - int card = au1xmmc_card_inserted(host); - int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0; +static void au1xmmc_dbdma_callback(int irq, void *dev_id) +{ + struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id; - if (card != controller) { - host->flags &= ~HOST_F_ACTIVE; - if (card) host->flags |= HOST_F_ACTIVE; - mmc_detect_change(host->mmc, 0); - } + /* Avoid spurious interrupts */ + if (!host->mrq) + return; - if (host->mrq != NULL) { - u32 status = au_readl(HOST_STATUS(host)); - DBG("PENDING - %8.8x\n", host->id, status); - } + if (host->flags & HOST_F_STOP) + SEND_STOP(host); - mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT); + tasklet_schedule(&host->data_task); } -static dbdev_tab_t au1xmmc_mem_dbdev = -{ - DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0 -}; - -static void au1xmmc_init_dma(struct au1xmmc_host *host) +static int au1xmmc_dbdma_init(struct au1xmmc_host *host) { + struct resource *res; + int txid, rxid; + + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0); + if (!res) + return -ENODEV; + txid = res->start; + + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1); + if (!res) + return -ENODEV; + rxid = res->start; + + if (!memid) + return -ENODEV; + + host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid, + au1xmmc_dbdma_callback, (void *)host); + if (!host->tx_chan) { + dev_err(&host->pdev->dev, "cannot allocate TX DMA\n"); + return -ENODEV; + } - u32 rxchan, txchan; - - int txid = au1xmmc_card_table[host->id].tx_devid; - int rxid = au1xmmc_card_table[host->id].rx_devid; + host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid, + au1xmmc_dbdma_callback, (void *)host); + if (!host->rx_chan) { + dev_err(&host->pdev->dev, "cannot allocate RX DMA\n"); + au1xxx_dbdma_chan_free(host->tx_chan); + return -ENODEV; + } - /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride - of 8 bits. And since devices are shared, we need to create - our own to avoid freaking out other devices - */ + au1xxx_dbdma_set_devwidth(host->tx_chan, 8); + au1xxx_dbdma_set_devwidth(host->rx_chan, 8); - int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); + au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT); + au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); - txchan = au1xxx_dbdma_chan_alloc(memid, txid, - au1xmmc_dma_callback, (void *) host); + /* DBDMA is good to go */ + host->flags |= HOST_F_DMA; - rxchan = au1xxx_dbdma_chan_alloc(rxid, memid, - au1xmmc_dma_callback, (void *) host); + return 0; +} - au1xxx_dbdma_set_devwidth(txchan, 8); - au1xxx_dbdma_set_devwidth(rxchan, 8); +static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host) +{ + if (host->flags & HOST_F_DMA) { + host->flags &= ~HOST_F_DMA; + au1xxx_dbdma_chan_free(host->tx_chan); + au1xxx_dbdma_chan_free(host->rx_chan); + } +} +#endif - au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT); - au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT); +static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) +{ + struct au1xmmc_host *host = mmc_priv(mmc); - host->tx_chan = txchan; - host->rx_chan = rxchan; + if (en) + IRQ_ON(host, SD_CONFIG_SI); + else + IRQ_OFF(host, SD_CONFIG_SI); } static const struct mmc_host_ops au1xmmc_ops = { .request = au1xmmc_request, .set_ios = au1xmmc_set_ios, .get_ro = au1xmmc_card_readonly, + .get_cd = au1xmmc_card_inserted, + .enable_sdio_irq = au1xmmc_enable_sdio_irq, }; static int __devinit au1xmmc_probe(struct platform_device *pdev) { + struct mmc_host *mmc; + struct au1xmmc_host *host; + struct resource *r; + int ret; + + mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "no memory for mmc_host\n"); + ret = -ENOMEM; + goto out0; + } - int i, ret = 0; - - /* THe interrupt is shared among all controllers */ - ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0); + host = mmc_priv(mmc); + host->mmc = mmc; + host->platdata = pdev->dev.platform_data; + host->pdev = pdev; - if (ret) { - printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n", - AU1100_SD_IRQ, ret); - return -ENXIO; + ret = -ENODEV; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no mmio defined\n"); + goto out1; } - disable_irq(AU1100_SD_IRQ); + host->ioarea = request_mem_region(r->start, r->end - r->start + 1, + pdev->name); + if (!host->ioarea) { + dev_err(&pdev->dev, "mmio already in use\n"); + goto out1; + } - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { - struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); - struct au1xmmc_host *host = 0; + host->iobase = (unsigned long)ioremap(r->start, 0x3c); + if (!host->iobase) { + dev_err(&pdev->dev, "cannot remap mmio\n"); + goto out2; + } - if (!mmc) { - printk(DRIVER_NAME "ERROR: no mem for host %d\n", i); - au1xmmc_hosts[i] = 0; - continue; - } + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!r) { + dev_err(&pdev->dev, "no IRQ defined\n"); + goto out3; + } - mmc->ops = &au1xmmc_ops; + host->irq = r->start; + /* IRQ is shared among both SD controllers */ + ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED, + DRIVER_NAME, host); + if (ret) { + dev_err(&pdev->dev, "cannot grab IRQ\n"); + goto out3; + } - mmc->f_min = 450000; - mmc->f_max = 24000000; + mmc->ops = &au1xmmc_ops; - mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; - mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; + mmc->f_min = 450000; + mmc->f_max = 24000000; - mmc->max_blk_size = 2048; - mmc->max_blk_count = 512; + mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; + mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; - mmc->ocr_avail = AU1XMMC_OCR; + mmc->max_blk_size = 2048; + mmc->max_blk_count = 512; - host = mmc_priv(mmc); - host->mmc = mmc; + mmc->ocr_avail = AU1XMMC_OCR; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; - host->id = i; - host->iobase = au1xmmc_card_table[host->id].iobase; - host->clock = 0; - host->power_mode = MMC_POWER_OFF; + host->status = HOST_S_IDLE; - host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0; - host->status = HOST_S_IDLE; + /* board-specific carddetect setup, if any */ + if (host->platdata && host->platdata->cd_setup) { + ret = host->platdata->cd_setup(mmc, 1); + if (ret) { + dev_warn(&pdev->dev, "board CD setup failed\n"); + mmc->caps |= MMC_CAP_NEEDS_POLL; + } + } else + mmc->caps |= MMC_CAP_NEEDS_POLL; - init_timer(&host->timer); + tasklet_init(&host->data_task, au1xmmc_tasklet_data, + (unsigned long)host); - host->timer.function = au1xmmc_poll_event; - host->timer.data = (unsigned long) host; - host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT; + tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, + (unsigned long)host); - tasklet_init(&host->data_task, au1xmmc_tasklet_data, - (unsigned long) host); +#ifdef CONFIG_SOC_AU1200 + ret = au1xmmc_dbdma_init(host); + if (ret) + printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n"); +#endif - tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, - (unsigned long) host); +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) { + struct led_classdev *led = host->platdata->led; + led->name = mmc_hostname(mmc); + led->brightness = LED_OFF; + led->default_trigger = mmc_hostname(mmc); + ret = led_classdev_register(mmc_dev(mmc), led); + if (ret) + goto out5; + } +#endif - spin_lock_init(&host->lock); + au1xmmc_reset_controller(host); - if (dma != 0) - au1xmmc_init_dma(host); + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "cannot add mmc host\n"); + goto out6; + } - au1xmmc_reset_controller(host); + platform_set_drvdata(pdev, mmc); - mmc_add_host(mmc); - au1xmmc_hosts[i] = host; + printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" + " (mode=%s)\n", pdev->id, host->iobase, + host->flags & HOST_F_DMA ? "dma" : "pio"); - add_timer(&host->timer); + return 0; /* all ok */ - printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n", - host->id, host->iobase, dma ? "dma" : "pio"); - } +out6: +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) + led_classdev_unregister(host->platdata->led); +out5: +#endif + au_writel(0, HOST_ENABLE(host)); + au_writel(0, HOST_CONFIG(host)); + au_writel(0, HOST_CONFIG2(host)); + au_sync(); - enable_irq(AU1100_SD_IRQ); +#ifdef CONFIG_SOC_AU1200 + au1xmmc_dbdma_shutdown(host); +#endif - return 0; + tasklet_kill(&host->data_task); + tasklet_kill(&host->finish_task); + + if (host->platdata && host->platdata->cd_setup && + !(mmc->caps & MMC_CAP_NEEDS_POLL)) + host->platdata->cd_setup(mmc, 0); + + free_irq(host->irq, host); +out3: + iounmap((void *)host->iobase); +out2: + release_resource(host->ioarea); + kfree(host->ioarea); +out1: + mmc_free_host(mmc); +out0: + return ret; } static int __devexit au1xmmc_remove(struct platform_device *pdev) { + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct au1xmmc_host *host; + + if (mmc) { + host = mmc_priv(mmc); - int i; + mmc_remove_host(mmc); - disable_irq(AU1100_SD_IRQ); +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) + led_classdev_unregister(host->platdata->led); +#endif - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { - struct au1xmmc_host *host = au1xmmc_hosts[i]; - if (!host) continue; + if (host->platdata && host->platdata->cd_setup && + !(mmc->caps & MMC_CAP_NEEDS_POLL)) + host->platdata->cd_setup(mmc, 0); + + au_writel(0, HOST_ENABLE(host)); + au_writel(0, HOST_CONFIG(host)); + au_writel(0, HOST_CONFIG2(host)); + au_sync(); tasklet_kill(&host->data_task); tasklet_kill(&host->finish_task); - del_timer_sync(&host->timer); +#ifdef CONFIG_SOC_AU1200 + au1xmmc_dbdma_shutdown(host); +#endif au1xmmc_set_power(host, 0); - mmc_remove_host(host->mmc); - - au1xxx_dbdma_chan_free(host->tx_chan); - au1xxx_dbdma_chan_free(host->rx_chan); + free_irq(host->irq, host); + iounmap((void *)host->iobase); + release_resource(host->ioarea); + kfree(host->ioarea); - au_writel(0x0, HOST_ENABLE(host)); - au_sync(); + mmc_free_host(mmc); } - - free_irq(AU1100_SD_IRQ, 0); return 0; } @@ -1004,21 +1140,31 @@ static struct platform_driver au1xmmc_driver = { static int __init au1xmmc_init(void) { +#ifdef CONFIG_SOC_AU1200 + /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride + * of 8 bits. And since devices are shared, we need to create + * our own to avoid freaking out other devices. + */ + memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); + if (!memid) + printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n"); +#endif return platform_driver_register(&au1xmmc_driver); } static void __exit au1xmmc_exit(void) { +#ifdef CONFIG_SOC_AU1200 + if (memid) + au1xxx_ddma_del_device(memid); +#endif platform_driver_unregister(&au1xmmc_driver); } module_init(au1xmmc_init); module_exit(au1xmmc_exit); -#ifdef MODULE MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:au1xxx-mmc"); -#endif - diff --git a/drivers/mmc/host/au1xmmc.h b/drivers/mmc/host/au1xmmc.h deleted file mode 100644 index 341cbdf0bac..00000000000 --- a/drivers/mmc/host/au1xmmc.h +++ /dev/null @@ -1,96 +0,0 @@ -#ifndef _AU1XMMC_H_ -#define _AU1XMMC_H_ - -/* Hardware definitions */ - -#define AU1XMMC_DESCRIPTOR_COUNT 1 -#define AU1XMMC_DESCRIPTOR_SIZE 2048 - -#define AU1XMMC_OCR ( MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ - MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ - MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) - -/* Easy access macros */ - -#define HOST_STATUS(h) ((h)->iobase + SD_STATUS) -#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) -#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) -#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) -#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) -#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) -#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) -#define HOST_CMD(h) ((h)->iobase + SD_CMD) -#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) -#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) -#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) - -#define DMA_CHANNEL(h) \ - ( ((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) - -/* This gives us a hard value for the stop command that we can write directly - * to the command register - */ - -#define STOP_CMD (SD_CMD_RT_1B|SD_CMD_CT_7|(0xC << SD_CMD_CI_SHIFT)|SD_CMD_GO) - -/* This is the set of interrupts that we configure by default */ - -#if 0 -#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_DD | \ - SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I) -#endif - -#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | \ - SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I) -/* The poll event (looking for insert/remove events runs twice a second */ -#define AU1XMMC_DETECT_TIMEOUT (HZ/2) - -struct au1xmmc_host { - struct mmc_host *mmc; - struct mmc_request *mrq; - - u32 id; - - u32 flags; - u32 iobase; - u32 clock; - u32 bus_width; - u32 power_mode; - - int status; - - struct { - int len; - int dir; - } dma; - - struct { - int index; - int offset; - int len; - } pio; - - u32 tx_chan; - u32 rx_chan; - - struct timer_list timer; - struct tasklet_struct finish_task; - struct tasklet_struct data_task; - - spinlock_t lock; -}; - -/* Status flags used by the host structure */ - -#define HOST_F_XMIT 0x0001 -#define HOST_F_RECV 0x0002 -#define HOST_F_DMA 0x0010 -#define HOST_F_ACTIVE 0x0100 -#define HOST_F_STOP 0x1000 - -#define HOST_S_IDLE 0x0001 -#define HOST_S_CMD 0x0002 -#define HOST_S_DATA 0x0003 -#define HOST_S_STOP 0x0004 - -#endif diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index eed211b2ac7..5e880c0f134 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c @@ -892,9 +892,12 @@ static int imxmci_get_ro(struct mmc_host *mmc) struct imxmci_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) - return host->pdata->get_ro(mmc_dev(mmc)); - /* Host doesn't support read only detection so assume writeable */ - return 0; + return !!host->pdata->get_ro(mmc_dev(mmc)); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; } diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 35508584ac2..41cc63360e4 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1126,16 +1126,28 @@ static int mmc_spi_get_ro(struct mmc_host *mmc) struct mmc_spi_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) - return host->pdata->get_ro(mmc->parent); - /* board doesn't support read only detection; assume writeable */ - return 0; + return !!host->pdata->get_ro(mmc->parent); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; } +static int mmc_spi_get_cd(struct mmc_host *mmc) +{ + struct mmc_spi_host *host = mmc_priv(mmc); + + if (host->pdata && host->pdata->get_cd) + return !!host->pdata->get_cd(mmc->parent); + return -ENOSYS; +} static const struct mmc_host_ops mmc_spi_ops = { .request = mmc_spi_request, .set_ios = mmc_spi_set_ios, .get_ro = mmc_spi_get_ro, + .get_cd = mmc_spi_get_cd, }; @@ -1240,10 +1252,7 @@ static int mmc_spi_probe(struct spi_device *spi) mmc->ops = &mmc_spi_ops; mmc->max_blk_size = MMC_SPI_BLOCKSIZE; - /* As long as we keep track of the number of successfully - * transmitted blocks, we're good for multiwrite. - */ - mmc->caps = MMC_CAP_SPI | MMC_CAP_MULTIWRITE; + mmc->caps = MMC_CAP_SPI; /* SPI doesn't need the lowspeed device identification thing for * MMC or SD cards, since it never comes up in open drain mode. @@ -1319,17 +1328,23 @@ static int mmc_spi_probe(struct spi_device *spi) goto fail_glue_init; } + /* pass platform capabilities, if any */ + if (host->pdata) + mmc->caps |= host->pdata->caps; + status = mmc_add_host(mmc); if (status != 0) goto fail_add_host; - dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n", + dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", mmc->class_dev.bus_id, host->dma_dev ? "" : ", no DMA", (host->pdata && host->pdata->get_ro) ? "" : ", no WP", (host->pdata && host->pdata->setpower) - ? "" : ", no poweroff"); + ? "" : ", no poweroff", + (mmc->caps & MMC_CAP_NEEDS_POLL) + ? ", cd polling" : ""); return 0; fail_add_host: diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index da5fecad74d..696cf3647ce 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -535,7 +535,6 @@ static int mmci_probe(struct amba_device *dev, void *id) mmc->f_min = (host->mclk + 511) / 512; mmc->f_max = min(host->mclk, fmax); mmc->ocr_avail = plat->ocr_mask; - mmc->caps = MMC_CAP_MULTIWRITE; /* * We can do SGIO diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 549517c3567..dbc26eb6a89 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1317,7 +1317,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) host->slots[id] = slot; - mmc->caps = MMC_CAP_MULTIWRITE; + mmc->caps = 0; if (host->pdata->conf.wire4) mmc->caps |= MMC_CAP_4_BIT_DATA; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index d89475d3698..d39f5973886 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -374,9 +374,12 @@ static int pxamci_get_ro(struct mmc_host *mmc) struct pxamci_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) - return host->pdata->get_ro(mmc_dev(mmc)); - /* Host doesn't support read only detection so assume writeable */ - return 0; + return !!host->pdata->get_ro(mmc_dev(mmc)); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; } static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c new file mode 100644 index 00000000000..6a1e4994b72 --- /dev/null +++ b/drivers/mmc/host/s3cmci.c @@ -0,0 +1,1446 @@ +/* + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver + * + * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/dma-mapping.h> +#include <linux/clk.h> +#include <linux/mmc/host.h> +#include <linux/platform_device.h> +#include <linux/irq.h> +#include <linux/io.h> + +#include <asm/dma.h> + +#include <asm/arch/regs-sdi.h> +#include <asm/arch/regs-gpio.h> + +#include <asm/plat-s3c24xx/mci.h> + +#include "s3cmci.h" + +#define DRIVER_NAME "s3c-mci" + +enum dbg_channels { + dbg_err = (1 << 0), + dbg_debug = (1 << 1), + dbg_info = (1 << 2), + dbg_irq = (1 << 3), + dbg_sg = (1 << 4), + dbg_dma = (1 << 5), + dbg_pio = (1 << 6), + dbg_fail = (1 << 7), + dbg_conf = (1 << 8), +}; + +static const int dbgmap_err = dbg_err | dbg_fail; +static const int dbgmap_info = dbg_info | dbg_conf; +static const int dbgmap_debug = dbg_debug; + +#define dbg(host, channels, args...) \ + do { \ + if (dbgmap_err & channels) \ + dev_err(&host->pdev->dev, args); \ + else if (dbgmap_info & channels) \ + dev_info(&host->pdev->dev, args); \ + else if (dbgmap_debug & channels) \ + dev_dbg(&host->pdev->dev, args); \ + } while (0) + +#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1) + +static struct s3c2410_dma_client s3cmci_dma_client = { + .name = "s3c-mci", +}; + +static void finalize_request(struct s3cmci_host *host); +static void s3cmci_send_request(struct mmc_host *mmc); +static void s3cmci_reset(struct s3cmci_host *host); + +#ifdef CONFIG_MMC_DEBUG + +static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) +{ + u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize; + u32 datcon, datcnt, datsta, fsta, imask; + + con = readl(host->base + S3C2410_SDICON); + pre = readl(host->base + S3C2410_SDIPRE); + cmdarg = readl(host->base + S3C2410_SDICMDARG); + cmdcon = readl(host->base + S3C2410_SDICMDCON); + cmdsta = readl(host->base + S3C2410_SDICMDSTAT); + r0 = readl(host->base + S3C2410_SDIRSP0); + r1 = readl(host->base + S3C2410_SDIRSP1); + r2 = readl(host->base + S3C2410_SDIRSP2); + r3 = readl(host->base + S3C2410_SDIRSP3); + timer = readl(host->base + S3C2410_SDITIMER); + bsize = readl(host->base + S3C2410_SDIBSIZE); + datcon = readl(host->base + S3C2410_SDIDCON); + datcnt = readl(host->base + S3C2410_SDIDCNT); + datsta = readl(host->base + S3C2410_SDIDSTA); + fsta = readl(host->base + S3C2410_SDIFSTA); + imask = readl(host->base + host->sdiimsk); + + dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n", + prefix, con, pre, timer); + + dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n", + prefix, cmdcon, cmdarg, cmdsta); + + dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]" + " DSTA:[%08x] DCNT:[%08x]\n", + prefix, datcon, fsta, datsta, datcnt); + + dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]" + " R2:[%08x] R3:[%08x]\n", + prefix, r0, r1, r2, r3); +} + +static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, + int stop) +{ + snprintf(host->dbgmsg_cmd, 300, + "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u", + host->ccnt, (stop ? " (STOP)" : ""), + cmd->opcode, cmd->arg, cmd->flags, cmd->retries); + + if (cmd->data) { + snprintf(host->dbgmsg_dat, 300, + "#%u bsize:%u blocks:%u bytes:%u", + host->dcnt, cmd->data->blksz, + cmd->data->blocks, + cmd->data->blocks * cmd->data->blksz); + } else { + host->dbgmsg_dat[0] = '\0'; + } +} + +static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd, + int fail) +{ + unsigned int dbglvl = fail ? dbg_fail : dbg_debug; + + if (!cmd) + return; + + if (cmd->error == 0) { + dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n", + host->dbgmsg_cmd, cmd->resp[0]); + } else { + dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n", + cmd->error, host->dbgmsg_cmd, host->status); + } + + if (!cmd->data) + return; + + if (cmd->data->error == 0) { + dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat); + } else { + dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n", + cmd->data->error, host->dbgmsg_dat, + readl(host->base + S3C2410_SDIDCNT)); + } +} +#else +static void dbg_dumpcmd(struct s3cmci_host *host, + struct mmc_command *cmd, int fail) { } + +static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, + int stop) { } + +static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { } + +#endif /* CONFIG_MMC_DEBUG */ + +static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) +{ + u32 newmask; + + newmask = readl(host->base + host->sdiimsk); + newmask |= imask; + + writel(newmask, host->base + host->sdiimsk); + + return newmask; +} + +static inline u32 disable_imask(struct s3cmci_host *host, u32 imask) +{ + u32 newmask; + + newmask = readl(host->base + host->sdiimsk); + newmask &= ~imask; + + writel(newmask, host->base + host->sdiimsk); + + return newmask; +} + +static inline void clear_imask(struct s3cmci_host *host) +{ + writel(0, host->base + host->sdiimsk); +} + +static inline int get_data_buffer(struct s3cmci_host *host, + u32 *words, u32 **pointer) +{ + struct scatterlist *sg; + + if (host->pio_active == XFER_NONE) + return -EINVAL; + + if ((!host->mrq) || (!host->mrq->data)) + return -EINVAL; + + if (host->pio_sgptr >= host->mrq->data->sg_len) { + dbg(host, dbg_debug, "no more buffers (%i/%i)\n", + host->pio_sgptr, host->mrq->data->sg_len); + return -EBUSY; + } + sg = &host->mrq->data->sg[host->pio_sgptr]; + + *words = sg->length >> 2; + *pointer = sg_virt(sg); + + host->pio_sgptr++; + + dbg(host, dbg_sg, "new buffer (%i/%i)\n", + host->pio_sgptr, host->mrq->data->sg_len); + + return 0; +} + +static inline u32 fifo_count(struct s3cmci_host *host) +{ + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); + + fifostat &= S3C2410_SDIFSTA_COUNTMASK; + return fifostat >> 2; +} + +static inline u32 fifo_free(struct s3cmci_host *host) +{ + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); + + fifostat &= S3C2410_SDIFSTA_COUNTMASK; + return (63 - fifostat) >> 2; +} + +static void do_pio_read(struct s3cmci_host *host) +{ + int res; + u32 fifo; + void __iomem *from_ptr; + + /* write real prescaler to host, it might be set slow to fix */ + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + from_ptr = host->base + host->sdidata; + + while ((fifo = fifo_count(host))) { + if (!host->pio_words) { + res = get_data_buffer(host, &host->pio_words, + &host->pio_ptr); + if (res) { + host->pio_active = XFER_NONE; + host->complete_what = COMPLETION_FINALIZE; + + dbg(host, dbg_pio, "pio_read(): " + "complete (no more data).\n"); + return; + } + + dbg(host, dbg_pio, + "pio_read(): new target: [%i]@[%p]\n", + host->pio_words, host->pio_ptr); + } + + dbg(host, dbg_pio, + "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n", + fifo, host->pio_words, + readl(host->base + S3C2410_SDIDCNT)); + + if (fifo > host->pio_words) + fifo = host->pio_words; + + host->pio_words -= fifo; + host->pio_count += fifo; + + while (fifo--) + *(host->pio_ptr++) = readl(from_ptr); + } + + if (!host->pio_words) { + res = get_data_buffer(host, &host->pio_words, &host->pio_ptr); + if (res) { + dbg(host, dbg_pio, + "pio_read(): complete (no more buffers).\n"); + host->pio_active = XFER_NONE; + host->complete_what = COMPLETION_FINALIZE; + + return; + } + } + + enable_imask(host, + S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST); +} + +static void do_pio_write(struct s3cmci_host *host) +{ + void __iomem *to_ptr; + int res; + u32 fifo; + + to_ptr = host->base + host->sdidata; + + while ((fifo = fifo_free(host))) { + if (!host->pio_words) { + res = get_data_buffer(host, &host->pio_words, + &host->pio_ptr); + if (res) { + dbg(host, dbg_pio, + "pio_write(): complete (no more data).\n"); + host->pio_active = XFER_NONE; + + return; + } + + dbg(host, dbg_pio, + "pio_write(): new source: [%i]@[%p]\n", + host->pio_words, host->pio_ptr); + + } + + if (fifo > host->pio_words) + fifo = host->pio_words; + + host->pio_words -= fifo; + host->pio_count += fifo; + + while (fifo--) + writel(*(host->pio_ptr++), to_ptr); + } + + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); +} + +static void pio_tasklet(unsigned long data) +{ + struct s3cmci_host *host = (struct s3cmci_host *) data; + + + disable_irq(host->irq); + + if (host->pio_active == XFER_WRITE) + do_pio_write(host); + + if (host->pio_active == XFER_READ) + do_pio_read(host); + + if (host->complete_what == COMPLETION_FINALIZE) { + clear_imask(host); + if (host->pio_active != XFER_NONE) { + dbg(host, dbg_err, "unfinished %s " + "- pio_count:[%u] pio_words:[%u]\n", + (host->pio_active == XFER_READ) ? "read" : "write", + host->pio_count, host->pio_words); + + if (host->mrq->data) + host->mrq->data->error = -EINVAL; + } + + finalize_request(host); + } else + enable_irq(host->irq); +} + +/* + * ISR for SDI Interface IRQ + * Communication between driver and ISR works as follows: + * host->mrq points to current request + * host->complete_what Indicates when the request is considered done + * COMPLETION_CMDSENT when the command was sent + * COMPLETION_RSPFIN when a response was received + * COMPLETION_XFERFINISH when the data transfer is finished + * COMPLETION_XFERFINISH_RSPFIN both of the above. + * host->complete_request is the completion-object the driver waits for + * + * 1) Driver sets up host->mrq and host->complete_what + * 2) Driver prepares the transfer + * 3) Driver enables interrupts + * 4) Driver starts transfer + * 5) Driver waits for host->complete_rquest + * 6) ISR checks for request status (errors and success) + * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error + * 7) ISR completes host->complete_request + * 8) ISR disables interrupts + * 9) Driver wakes up and takes care of the request + * + * Note: "->error"-fields are expected to be set to 0 before the request + * was issued by mmc.c - therefore they are only set, when an error + * contition comes up + */ + +static irqreturn_t s3cmci_irq(int irq, void *dev_id) +{ + struct s3cmci_host *host = dev_id; + struct mmc_command *cmd; + u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; + u32 mci_cclear, mci_dclear; + unsigned long iflags; + + spin_lock_irqsave(&host->complete_lock, iflags); + + mci_csta = readl(host->base + S3C2410_SDICMDSTAT); + mci_dsta = readl(host->base + S3C2410_SDIDSTA); + mci_dcnt = readl(host->base + S3C2410_SDIDCNT); + mci_fsta = readl(host->base + S3C2410_SDIFSTA); + mci_imsk = readl(host->base + host->sdiimsk); + mci_cclear = 0; + mci_dclear = 0; + + if ((host->complete_what == COMPLETION_NONE) || + (host->complete_what == COMPLETION_FINALIZE)) { + host->status = "nothing to complete"; + clear_imask(host); + goto irq_out; + } + + if (!host->mrq) { + host->status = "no active mrq"; + clear_imask(host); + goto irq_out; + } + + cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd; + + if (!cmd) { + host->status = "no active cmd"; + clear_imask(host); + goto irq_out; + } + + if (!host->dodma) { + if ((host->pio_active == XFER_WRITE) && + (mci_fsta & S3C2410_SDIFSTA_TFDET)) { + + disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); + tasklet_schedule(&host->pio_tasklet); + host->status = "pio tx"; + } + + if ((host->pio_active == XFER_READ) && + (mci_fsta & S3C2410_SDIFSTA_RFDET)) { + + disable_imask(host, + S3C2410_SDIIMSK_RXFIFOHALF | + S3C2410_SDIIMSK_RXFIFOLAST); + + tasklet_schedule(&host->pio_tasklet); + host->status = "pio rx"; + } + } + + if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) { + dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n"); + cmd->error = -ETIMEDOUT; + host->status = "error: command timeout"; + goto fail_transfer; + } + + if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) { + if (host->complete_what == COMPLETION_CMDSENT) { + host->status = "ok: command sent"; + goto close_transfer; + } + + mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT; + } + + if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) { + if (cmd->flags & MMC_RSP_CRC) { + if (host->mrq->cmd->flags & MMC_RSP_136) { + dbg(host, dbg_irq, + "fixup: ignore CRC fail with long rsp\n"); + } else { + /* note, we used to fail the transfer + * here, but it seems that this is just + * the hardware getting it wrong. + * + * cmd->error = -EILSEQ; + * host->status = "error: bad command crc"; + * goto fail_transfer; + */ + } + } + + mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL; + } + + if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) { + if (host->complete_what == COMPLETION_RSPFIN) { + host->status = "ok: command response received"; + goto close_transfer; + } + + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) + host->complete_what = COMPLETION_XFERFINISH; + + mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN; + } + + /* errors handled after this point are only relevant + when a data transfer is in progress */ + + if (!cmd->data) + goto clear_status_bits; + + /* Check for FIFO failure */ + if (host->is2440) { + if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) { + dbg(host, dbg_err, "FIFO failure\n"); + host->mrq->data->error = -EILSEQ; + host->status = "error: 2440 fifo failure"; + goto fail_transfer; + } + } else { + if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) { + dbg(host, dbg_err, "FIFO failure\n"); + cmd->data->error = -EILSEQ; + host->status = "error: fifo failure"; + goto fail_transfer; + } + } + + if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) { + dbg(host, dbg_err, "bad data crc (outgoing)\n"); + cmd->data->error = -EILSEQ; + host->status = "error: bad data crc (outgoing)"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) { + dbg(host, dbg_err, "bad data crc (incoming)\n"); + cmd->data->error = -EILSEQ; + host->status = "error: bad data crc (incoming)"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) { + dbg(host, dbg_err, "data timeout\n"); + cmd->data->error = -ETIMEDOUT; + host->status = "error: data timeout"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) { + if (host->complete_what == COMPLETION_XFERFINISH) { + host->status = "ok: data transfer completed"; + goto close_transfer; + } + + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) + host->complete_what = COMPLETION_RSPFIN; + + mci_dclear |= S3C2410_SDIDSTA_XFERFINISH; + } + +clear_status_bits: + writel(mci_cclear, host->base + S3C2410_SDICMDSTAT); + writel(mci_dclear, host->base + S3C2410_SDIDSTA); + + goto irq_out; + +fail_transfer: + host->pio_active = XFER_NONE; + +close_transfer: + host->complete_what = COMPLETION_FINALIZE; + + clear_imask(host); + tasklet_schedule(&host->pio_tasklet); + + goto irq_out; + +irq_out: + dbg(host, dbg_irq, + "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n", + mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status); + + spin_unlock_irqrestore(&host->complete_lock, iflags); + return IRQ_HANDLED; + +} + +/* + * ISR for the CardDetect Pin +*/ + +static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id) +{ + struct s3cmci_host *host = (struct s3cmci_host *)dev_id; + + dbg(host, dbg_irq, "card detect\n"); + + mmc_detect_change(host->mmc, msecs_to_jiffies(500)); + + return IRQ_HANDLED; +} + +void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id, + int size, enum s3c2410_dma_buffresult result) +{ + struct s3cmci_host *host = buf_id; + unsigned long iflags; + u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt; + + mci_csta = readl(host->base + S3C2410_SDICMDSTAT); + mci_dsta = readl(host->base + S3C2410_SDIDSTA); + mci_fsta = readl(host->base + S3C2410_SDIFSTA); + mci_dcnt = readl(host->base + S3C2410_SDIDCNT); + + BUG_ON(!host->mrq); + BUG_ON(!host->mrq->data); + BUG_ON(!host->dmatogo); + + spin_lock_irqsave(&host->complete_lock, iflags); + + if (result != S3C2410_RES_OK) { + dbg(host, dbg_fail, "DMA FAILED: csta=0x%08x dsta=0x%08x " + "fsta=0x%08x dcnt:0x%08x result:0x%08x toGo:%u\n", + mci_csta, mci_dsta, mci_fsta, + mci_dcnt, result, host->dmatogo); + + goto fail_request; + } + + host->dmatogo--; + if (host->dmatogo) { + dbg(host, dbg_dma, "DMA DONE Size:%i DSTA:[%08x] " + "DCNT:[%08x] toGo:%u\n", + size, mci_dsta, mci_dcnt, host->dmatogo); + + goto out; + } + + dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", + size, mci_dsta, mci_dcnt); + + host->complete_what = COMPLETION_FINALIZE; + +out: + tasklet_schedule(&host->pio_tasklet); + spin_unlock_irqrestore(&host->complete_lock, iflags); + return; + +fail_request: + host->mrq->data->error = -EINVAL; + host->complete_what = COMPLETION_FINALIZE; + writel(0, host->base + host->sdiimsk); + goto out; + +} + +static void finalize_request(struct s3cmci_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + int debug_as_failure = 0; + + if (host->complete_what != COMPLETION_FINALIZE) + return; + + if (!mrq) + return; + + if (cmd->data && (cmd->error == 0) && + (cmd->data->error == 0)) { + if (host->dodma && (!host->dma_complete)) { + dbg(host, dbg_dma, "DMA Missing!\n"); + return; + } + } + + /* Read response from controller. */ + cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0); + cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1); + cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2); + cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3); + + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + if (cmd->error) + debug_as_failure = 1; + + if (cmd->data && cmd->data->error) + debug_as_failure = 1; + + dbg_dumpcmd(host, cmd, debug_as_failure); + + /* Cleanup controller */ + writel(0, host->base + S3C2410_SDICMDARG); + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); + writel(0, host->base + S3C2410_SDICMDCON); + writel(0, host->base + host->sdiimsk); + + if (cmd->data && cmd->error) + cmd->data->error = cmd->error; + + if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { + host->cmd_is_stop = 1; + s3cmci_send_request(host->mmc); + return; + } + + /* If we have no data transfer we are finished here */ + if (!mrq->data) + goto request_done; + + /* Calulate the amout of bytes transfer if there was no error */ + if (mrq->data->error == 0) { + mrq->data->bytes_xfered = + (mrq->data->blocks * mrq->data->blksz); + } else { + mrq->data->bytes_xfered = 0; + } + + /* If we had an error while transfering data we flush the + * DMA channel and the fifo to clear out any garbage. */ + if (mrq->data->error != 0) { + if (host->dodma) + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); + + if (host->is2440) { + /* Clear failure register and reset fifo. */ + writel(S3C2440_SDIFSTA_FIFORESET | + S3C2440_SDIFSTA_FIFOFAIL, + host->base + S3C2410_SDIFSTA); + } else { + u32 mci_con; + + /* reset fifo */ + mci_con = readl(host->base + S3C2410_SDICON); + mci_con |= S3C2410_SDICON_FIFORESET; + + writel(mci_con, host->base + S3C2410_SDICON); + } + } + +request_done: + host->complete_what = COMPLETION_NONE; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + + +void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source) +{ + static enum s3c2410_dmasrc last_source = -1; + static int setup_ok; + + if (last_source == source) + return; + + last_source = source; + + s3c2410_dma_devconfig(host->dma, source, 3, + host->mem->start + host->sdidata); + + if (!setup_ok) { + s3c2410_dma_config(host->dma, 4, + (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI)); + s3c2410_dma_set_buffdone_fn(host->dma, + s3cmci_dma_done_callback); + s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); + setup_ok = 1; + } +} + +static void s3cmci_send_command(struct s3cmci_host *host, + struct mmc_command *cmd) +{ + u32 ccon, imsk; + + imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT | + S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT | + S3C2410_SDIIMSK_RESPONSECRC; + + enable_imask(host, imsk); + + if (cmd->data) + host->complete_what = COMPLETION_XFERFINISH_RSPFIN; + else if (cmd->flags & MMC_RSP_PRESENT) + host->complete_what = COMPLETION_RSPFIN; + else + host->complete_what = COMPLETION_CMDSENT; + + writel(cmd->arg, host->base + S3C2410_SDICMDARG); + + ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX; + ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART; + + if (cmd->flags & MMC_RSP_PRESENT) + ccon |= S3C2410_SDICMDCON_WAITRSP; + + if (cmd->flags & MMC_RSP_136) + ccon |= S3C2410_SDICMDCON_LONGRSP; + + writel(ccon, host->base + S3C2410_SDICMDCON); +} + +static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data) +{ + u32 dcon, imsk, stoptries = 3; + + /* write DCON register */ + + if (!data) { + writel(0, host->base + S3C2410_SDIDCON); + return 0; + } + + if ((data->blksz & 3) != 0) { + /* We cannot deal with unaligned blocks with more than + * one block being transfered. */ + + if (data->blocks > 1) + return -EINVAL; + + /* No support yet for non-word block transfers. */ + return -EINVAL; + } + + while (readl(host->base + S3C2410_SDIDSTA) & + (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) { + + dbg(host, dbg_err, + "mci_setup_data() transfer stillin progress.\n"); + + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); + s3cmci_reset(host); + + if ((stoptries--) == 0) { + dbg_dumpregs(host, "DRF"); + return -EINVAL; + } + } + + dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; + + if (host->dodma) + dcon |= S3C2410_SDIDCON_DMAEN; + + if (host->bus_width == MMC_BUS_WIDTH_4) + dcon |= S3C2410_SDIDCON_WIDEBUS; + + if (!(data->flags & MMC_DATA_STREAM)) + dcon |= S3C2410_SDIDCON_BLOCKMODE; + + if (data->flags & MMC_DATA_WRITE) { + dcon |= S3C2410_SDIDCON_TXAFTERRESP; + dcon |= S3C2410_SDIDCON_XFER_TXSTART; + } + + if (data->flags & MMC_DATA_READ) { + dcon |= S3C2410_SDIDCON_RXAFTERCMD; + dcon |= S3C2410_SDIDCON_XFER_RXSTART; + } + + if (host->is2440) { + dcon |= S3C2440_SDIDCON_DS_WORD; + dcon |= S3C2440_SDIDCON_DATSTART; + } + + writel(dcon, host->base + S3C2410_SDIDCON); + + /* write BSIZE register */ + + writel(data->blksz, host->base + S3C2410_SDIBSIZE); + + /* add to IMASK register */ + imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC | + S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH; + + enable_imask(host, imsk); + + /* write TIMER register */ + + if (host->is2440) { + writel(0x007FFFFF, host->base + S3C2410_SDITIMER); + } else { + writel(0x0000FFFF, host->base + S3C2410_SDITIMER); + + /* FIX: set slow clock to prevent timeouts on read */ + if (data->flags & MMC_DATA_READ) + writel(0xFF, host->base + S3C2410_SDIPRE); + } + + return 0; +} + +#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ) + +static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data) +{ + int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); + + host->pio_sgptr = 0; + host->pio_words = 0; + host->pio_count = 0; + host->pio_active = rw ? XFER_WRITE : XFER_READ; + + if (rw) { + do_pio_write(host); + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); + } else { + enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF + | S3C2410_SDIIMSK_RXFIFOLAST); + } + + return 0; +} + +static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) +{ + int dma_len, i; + int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); + + s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); + + dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (dma_len == 0) + return -ENOMEM; + + host->dma_complete = 0; + host->dmatogo = dma_len; + + for (i = 0; i < dma_len; i++) { + int res; + + dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, + sg_dma_address(&data->sg[i]), + sg_dma_len(&data->sg[i])); + + res = s3c2410_dma_enqueue(host->dma, (void *) host, + sg_dma_address(&data->sg[i]), + sg_dma_len(&data->sg[i])); + + if (res) { + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); + return -EBUSY; + } + } + + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_START); + + return 0; +} + +static void s3cmci_send_request(struct mmc_host *mmc) +{ + struct s3cmci_host *host = mmc_priv(mmc); + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + + host->ccnt++; + prepare_dbgmsg(host, cmd, host->cmd_is_stop); + + /* Clear command, data and fifo status registers + Fifo clear only necessary on 2440, but doesn't hurt on 2410 + */ + writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT); + writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA); + writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA); + + if (cmd->data) { + int res = s3cmci_setup_data(host, cmd->data); + + host->dcnt++; + + if (res) { + dbg(host, dbg_err, "setup data error %d\n", res); + cmd->error = res; + cmd->data->error = res; + + mmc_request_done(mmc, mrq); + return; + } + + if (host->dodma) + res = s3cmci_prepare_dma(host, cmd->data); + else + res = s3cmci_prepare_pio(host, cmd->data); + + if (res) { + dbg(host, dbg_err, "data prepare error %d\n", res); + cmd->error = res; + cmd->data->error = res; + + mmc_request_done(mmc, mrq); + return; + } + } + + /* Send command */ + s3cmci_send_command(host, cmd); + + /* Enable Interrupt */ + enable_irq(host->irq); +} + +static int s3cmci_card_present(struct s3cmci_host *host) +{ + struct s3c24xx_mci_pdata *pdata = host->pdata; + int ret; + + if (pdata->gpio_detect == 0) + return -ENOSYS; + + ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; + return ret ^ pdata->detect_invert; +} + +static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct s3cmci_host *host = mmc_priv(mmc); + + host->status = "mmc request"; + host->cmd_is_stop = 0; + host->mrq = mrq; + + if (s3cmci_card_present(host) == 0) { + dbg(host, dbg_err, "%s: no medium present\n", __func__); + host->mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + } else + s3cmci_send_request(mmc); +} + +static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct s3cmci_host *host = mmc_priv(mmc); + u32 mci_psc, mci_con; + + /* Set the power state */ + + mci_con = readl(host->base + S3C2410_SDICON); + + switch (ios->power_mode) { + case MMC_POWER_ON: + case MMC_POWER_UP: + s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); + s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); + s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); + s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); + s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); + s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); + + if (host->pdata->set_power) + host->pdata->set_power(ios->power_mode, ios->vdd); + + if (!host->is2440) + mci_con |= S3C2410_SDICON_FIFORESET; + + break; + + case MMC_POWER_OFF: + default: + s3c2410_gpio_setpin(S3C2410_GPE5, 0); + s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP); + + if (host->is2440) + mci_con |= S3C2440_SDICON_SDRESET; + + if (host->pdata->set_power) + host->pdata->set_power(ios->power_mode, ios->vdd); + + break; + } + + /* Set clock */ + for (mci_psc = 0; mci_psc < 255; mci_psc++) { + host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1)); + + if (host->real_rate <= ios->clock) + break; + } + + if (mci_psc > 255) + mci_psc = 255; + + host->prescaler = mci_psc; + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + /* If requested clock is 0, real_rate will be 0, too */ + if (ios->clock == 0) + host->real_rate = 0; + + /* Set CLOCK_ENABLE */ + if (ios->clock) + mci_con |= S3C2410_SDICON_CLOCKTYPE; + else + mci_con &= ~S3C2410_SDICON_CLOCKTYPE; + + writel(mci_con, host->base + S3C2410_SDICON); + + if ((ios->power_mode == MMC_POWER_ON) || + (ios->power_mode == MMC_POWER_UP)) { + dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n", + host->real_rate/1000, ios->clock/1000); + } else { + dbg(host, dbg_conf, "powered down.\n"); + } + + host->bus_width = ios->bus_width; +} + +static void s3cmci_reset(struct s3cmci_host *host) +{ + u32 con = readl(host->base + S3C2410_SDICON); + + con |= S3C2440_SDICON_SDRESET; + writel(con, host->base + S3C2410_SDICON); +} + +static int s3cmci_get_ro(struct mmc_host *mmc) +{ + struct s3cmci_host *host = mmc_priv(mmc); + struct s3c24xx_mci_pdata *pdata = host->pdata; + int ret; + + if (pdata->gpio_wprotect == 0) + return 0; + + ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); + + if (pdata->wprotect_invert) + ret = !ret; + + return ret; +} + +static struct mmc_host_ops s3cmci_ops = { + .request = s3cmci_request, + .set_ios = s3cmci_set_ios, + .get_ro = s3cmci_get_ro, +}; + +static struct s3c24xx_mci_pdata s3cmci_def_pdata = { + /* This is currently here to avoid a number of if (host->pdata) + * checks. Any zero fields to ensure reaonable defaults are picked. */ +}; + +static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) +{ + struct s3cmci_host *host; + struct mmc_host *mmc; + int ret; + + mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto probe_out; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->pdev = pdev; + host->is2440 = is2440; + + host->pdata = pdev->dev.platform_data; + if (!host->pdata) { + pdev->dev.platform_data = &s3cmci_def_pdata; + host->pdata = &s3cmci_def_pdata; + } + + spin_lock_init(&host->complete_lock); + tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host); + + if (is2440) { + host->sdiimsk = S3C2440_SDIIMSK; + host->sdidata = S3C2440_SDIDATA; + host->clk_div = 1; + } else { + host->sdiimsk = S3C2410_SDIIMSK; + host->sdidata = S3C2410_SDIDATA; + host->clk_div = 2; + } + + host->dodma = 0; + host->complete_what = COMPLETION_NONE; + host->pio_active = XFER_NONE; + + host->dma = S3CMCI_DMA; + + host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!host->mem) { + dev_err(&pdev->dev, + "failed to get io memory region resouce.\n"); + + ret = -ENOENT; + goto probe_free_host; + } + + host->mem = request_mem_region(host->mem->start, + RESSIZE(host->mem), pdev->name); + + if (!host->mem) { + dev_err(&pdev->dev, "failed to request io memory region.\n"); + ret = -ENOENT; + goto probe_free_host; + } + + host->base = ioremap(host->mem->start, RESSIZE(host->mem)); + if (host->base == 0) { + dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); + ret = -EINVAL; + goto probe_free_mem_region; + } + + host->irq = platform_get_irq(pdev, 0); + if (host->irq == 0) { + dev_err(&pdev->dev, "failed to get interrupt resouce.\n"); + ret = -EINVAL; + goto probe_iounmap; + } + + if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) { + dev_err(&pdev->dev, "failed to request mci interrupt.\n"); + ret = -ENOENT; + goto probe_iounmap; + } + + /* We get spurious interrupts even when we have set the IMSK + * register to ignore everything, so use disable_irq() to make + * ensure we don't lock the system with un-serviceable requests. */ + + disable_irq(host->irq); + + host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); + + if (host->irq_cd >= 0) { + if (request_irq(host->irq_cd, s3cmci_irq_cd, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + DRIVER_NAME, host)) { + dev_err(&pdev->dev, "can't get card detect irq.\n"); + ret = -ENOENT; + goto probe_free_irq; + } + } else { + dev_warn(&pdev->dev, "host detect has no irq available\n"); + s3c2410_gpio_cfgpin(host->pdata->gpio_detect, + S3C2410_GPIO_INPUT); + } + + if (host->pdata->gpio_wprotect) + s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect, + S3C2410_GPIO_INPUT); + + if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { + dev_err(&pdev->dev, "unable to get DMA channel.\n"); + ret = -EBUSY; + goto probe_free_irq_cd; + } + + host->clk = clk_get(&pdev->dev, "sdi"); + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "failed to find clock source.\n"); + ret = PTR_ERR(host->clk); + host->clk = NULL; + goto probe_free_host; + } + + ret = clk_enable(host->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock source.\n"); + goto clk_free; + } + + host->clk_rate = clk_get_rate(host->clk); + + mmc->ops = &s3cmci_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA; + mmc->f_min = host->clk_rate / (host->clk_div * 256); + mmc->f_max = host->clk_rate / host->clk_div; + + if (host->pdata->ocr_avail) + mmc->ocr_avail = host->pdata->ocr_avail; + + mmc->max_blk_count = 4095; + mmc->max_blk_size = 4095; + mmc->max_req_size = 4095 * 512; + mmc->max_seg_size = mmc->max_req_size; + + mmc->max_phys_segs = 128; + mmc->max_hw_segs = 128; + + dbg(host, dbg_debug, + "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n", + (host->is2440?"2440":""), + host->base, host->irq, host->irq_cd, host->dma); + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "failed to add mmc host.\n"); + goto free_dmabuf; + } + + platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "initialisation done.\n"); + + return 0; + + free_dmabuf: + clk_disable(host->clk); + + clk_free: + clk_put(host->clk); + + probe_free_irq_cd: + if (host->irq_cd >= 0) + free_irq(host->irq_cd, host); + + probe_free_irq: + free_irq(host->irq, host); + + probe_iounmap: + iounmap(host->base); + + probe_free_mem_region: + release_mem_region(host->mem->start, RESSIZE(host->mem)); + + probe_free_host: + mmc_free_host(mmc); + probe_out: + return ret; +} + +static int __devexit s3cmci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct s3cmci_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + + clk_disable(host->clk); + clk_put(host->clk); + + tasklet_disable(&host->pio_tasklet); + s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); + + if (host->irq_cd >= 0) + free_irq(host->irq_cd, host); + free_irq(host->irq, host); + + iounmap(host->base); + release_mem_region(host->mem->start, RESSIZE(host->mem)); + + mmc_free_host(mmc); + return 0; +} + +static int __devinit s3cmci_probe_2410(struct platform_device *dev) +{ + return s3cmci_probe(dev, 0); +} + +static int __devinit s3cmci_probe_2412(struct platform_device *dev) +{ + return s3cmci_probe(dev, 1); +} + +static int __devinit s3cmci_probe_2440(struct platform_device *dev) +{ + return s3cmci_probe(dev, 1); +} + +#ifdef CONFIG_PM + +static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + + return mmc_suspend_host(mmc, state); +} + +static int s3cmci_resume(struct platform_device *dev) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + + return mmc_resume_host(mmc); +} + +#else /* CONFIG_PM */ +#define s3cmci_suspend NULL +#define s3cmci_resume NULL +#endif /* CONFIG_PM */ + + +static struct platform_driver s3cmci_driver_2410 = { + .driver.name = "s3c2410-sdi", + .driver.owner = THIS_MODULE, + .probe = s3cmci_probe_2410, + .remove = __devexit_p(s3cmci_remove), + .suspend = s3cmci_suspend, + .resume = s3cmci_resume, +}; + +static struct platform_driver s3cmci_driver_2412 = { + .driver.name = "s3c2412-sdi", + .driver.owner = THIS_MODULE, + .probe = s3cmci_probe_2412, + .remove = __devexit_p(s3cmci_remove), + .suspend = s3cmci_suspend, + .resume = s3cmci_resume, +}; + +static struct platform_driver s3cmci_driver_2440 = { + .driver.name = "s3c2440-sdi", + .driver.owner = THIS_MODULE, + .probe = s3cmci_probe_2440, + .remove = __devexit_p(s3cmci_remove), + .suspend = s3cmci_suspend, + .resume = s3cmci_resume, +}; + + +static int __init s3cmci_init(void) +{ + platform_driver_register(&s3cmci_driver_2410); + platform_driver_register(&s3cmci_driver_2412); + platform_driver_register(&s3cmci_driver_2440); + return 0; +} + +static void __exit s3cmci_exit(void) +{ + platform_driver_unregister(&s3cmci_driver_2410); + platform_driver_unregister(&s3cmci_driver_2412); + platform_driver_unregister(&s3cmci_driver_2440); +} + +module_init(s3cmci_init); +module_exit(s3cmci_exit); + +MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>"); +MODULE_ALIAS("platform:s3c2410-sdi"); +MODULE_ALIAS("platform:s3c2412-sdi"); +MODULE_ALIAS("platform:s3c2440-sdi"); diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h new file mode 100644 index 00000000000..37d9c60010c --- /dev/null +++ b/drivers/mmc/host/s3cmci.h @@ -0,0 +1,70 @@ +/* + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver + * + * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* FIXME: DMA Resource management ?! */ +#define S3CMCI_DMA 0 + +enum s3cmci_waitfor { + COMPLETION_NONE, + COMPLETION_FINALIZE, + COMPLETION_CMDSENT, + COMPLETION_RSPFIN, + COMPLETION_XFERFINISH, + COMPLETION_XFERFINISH_RSPFIN, +}; + +struct s3cmci_host { + struct platform_device *pdev; + struct s3c24xx_mci_pdata *pdata; + struct mmc_host *mmc; + struct resource *mem; + struct clk *clk; + void __iomem *base; + int irq; + int irq_cd; + int dma; + + unsigned long clk_rate; + unsigned long clk_div; + unsigned long real_rate; + u8 prescaler; + + int is2440; + unsigned sdiimsk; + unsigned sdidata; + int dodma; + int dmatogo; + + struct mmc_request *mrq; + int cmd_is_stop; + + spinlock_t complete_lock; + enum s3cmci_waitfor complete_what; + + int dma_complete; + + u32 pio_sgptr; + u32 pio_words; + u32 pio_count; + u32 *pio_ptr; +#define XFER_NONE 0 +#define XFER_READ 1 +#define XFER_WRITE 2 + u32 pio_active; + + int bus_width; + + char dbgmsg_cmd[301]; + char dbgmsg_dat[301]; + char *status; + + unsigned int ccnt, dcnt; + struct tasklet_struct pio_tasklet; +}; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c new file mode 100644 index 00000000000..deb607c52c0 --- /dev/null +++ b/drivers/mmc/host/sdhci-pci.c @@ -0,0 +1,732 @@ +/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface + * + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * Thanks to the following companies for their support: + * + * - JMicron (hardware and technical support) + */ + +#include <linux/delay.h> +#include <linux/highmem.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> + +#include <linux/mmc/host.h> + +#include <asm/scatterlist.h> +#include <asm/io.h> + +#include "sdhci.h" + +/* + * PCI registers + */ + +#define PCI_SDHCI_IFPIO 0x00 +#define PCI_SDHCI_IFDMA 0x01 +#define PCI_SDHCI_IFVENDOR 0x02 + +#define PCI_SLOT_INFO 0x40 /* 8 bits */ +#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) +#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 + +#define MAX_SLOTS 8 + +struct sdhci_pci_chip; +struct sdhci_pci_slot; + +struct sdhci_pci_fixes { + unsigned int quirks; + + int (*probe)(struct sdhci_pci_chip*); + + int (*probe_slot)(struct sdhci_pci_slot*); + void (*remove_slot)(struct sdhci_pci_slot*, int); + + int (*suspend)(struct sdhci_pci_chip*, + pm_message_t); + int (*resume)(struct sdhci_pci_chip*); +}; + +struct sdhci_pci_slot { + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + + int pci_bar; +}; + +struct sdhci_pci_chip { + struct pci_dev *pdev; + + unsigned int quirks; + const struct sdhci_pci_fixes *fixes; + + int num_slots; /* Slots on controller */ + struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ +}; + + +/*****************************************************************************\ + * * + * Hardware specific quirk handling * + * * +\*****************************************************************************/ + +static int ricoh_probe(struct sdhci_pci_chip *chip) +{ + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET; + + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) + chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_ricoh = { + .probe = ricoh_probe, + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR, +}; + +static const struct sdhci_pci_fixes sdhci_ene_712 = { + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_BROKEN_DMA, +}; + +static const struct sdhci_pci_fixes sdhci_ene_714 = { + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | + SDHCI_QUIRK_BROKEN_DMA, +}; + +static const struct sdhci_pci_fixes sdhci_cafe = { + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, +}; + +static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) +{ + u8 scratch; + int ret; + + ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); + if (ret) + return ret; + + /* + * Turn PMOS on [bit 0], set over current detection to 2.4 V + * [bit 1:2] and enable over current debouncing [bit 6]. + */ + if (on) + scratch |= 0x47; + else + scratch &= ~0x47; + + ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); + if (ret) + return ret; + + return 0; +} + +static int jmicron_probe(struct sdhci_pci_chip *chip) +{ + int ret; + + if (chip->pdev->revision == 0) { + chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE | + SDHCI_QUIRK_RESET_AFTER_REQUEST; + } + + /* + * JMicron chips can have two interfaces to the same hardware + * in order to work around limitations in Microsoft's driver. + * We need to make sure we only bind to one of them. + * + * This code assumes two things: + * + * 1. The PCI code adds subfunctions in order. + * + * 2. The MMC interface has a lower subfunction number + * than the SD interface. + */ + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { + struct pci_dev *sd_dev; + + sd_dev = NULL; + while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, + PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { + if ((PCI_SLOT(chip->pdev->devfn) == + PCI_SLOT(sd_dev->devfn)) && + (chip->pdev->bus == sd_dev->bus)) + break; + } + + if (sd_dev) { + pci_dev_put(sd_dev); + dev_info(&chip->pdev->dev, "Refusing to bind to " + "secondary interface.\n"); + return -ENODEV; + } + } + + /* + * JMicron chips need a bit of a nudge to enable the power + * output pins. + */ + ret = jmicron_pmos(chip, 1); + if (ret) { + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); + return ret; + } + + return 0; +} + +static void jmicron_enable_mmc(struct sdhci_host *host, int on) +{ + u8 scratch; + + scratch = readb(host->ioaddr + 0xC0); + + if (on) + scratch |= 0x01; + else + scratch &= ~0x01; + + writeb(scratch, host->ioaddr + 0xC0); +} + +static int jmicron_probe_slot(struct sdhci_pci_slot *slot) +{ + if (slot->chip->pdev->revision == 0) { + u16 version; + + version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); + version = (version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT; + + /* + * Older versions of the chip have lots of nasty glitches + * in the ADMA engine. It's best just to avoid it + * completely. + */ + if (version < 0xAC) + slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; + } + + /* + * The secondary interface requires a bit set to get the + * interrupts. + */ + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) + jmicron_enable_mmc(slot->host, 1); + + return 0; +} + +static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) +{ + if (dead) + return; + + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) + jmicron_enable_mmc(slot->host, 0); +} + +static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) +{ + int i; + + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { + for (i = 0;i < chip->num_slots;i++) + jmicron_enable_mmc(chip->slots[i]->host, 0); + } + + return 0; +} + +static int jmicron_resume(struct sdhci_pci_chip *chip) +{ + int ret, i; + + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { + for (i = 0;i < chip->num_slots;i++) + jmicron_enable_mmc(chip->slots[i]->host, 1); + } + + ret = jmicron_pmos(chip, 1); + if (ret) { + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); + return ret; + } + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_jmicron = { + .probe = jmicron_probe, + + .probe_slot = jmicron_probe_slot, + .remove_slot = jmicron_remove_slot, + + .suspend = jmicron_suspend, + .resume = jmicron_resume, +}; + +static const struct pci_device_id pci_ids[] __devinitdata = { + { + .vendor = PCI_VENDOR_ID_RICOH, + .device = PCI_DEVICE_ID_RICOH_R5C822, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ricoh, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB712_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_712, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB712_SD_2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_712, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB714_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_714, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB714_SD_2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_714, + }, + + { + .vendor = PCI_VENDOR_ID_MARVELL, + .device = PCI_DEVICE_ID_MARVELL_CAFE_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_cafe, + }, + + { + .vendor = PCI_VENDOR_ID_JMICRON, + .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_jmicron, + }, + + { + .vendor = PCI_VENDOR_ID_JMICRON, + .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_jmicron, + }, + + { /* Generic SD host controller */ + PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) + }, + + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +/*****************************************************************************\ + * * + * SDHCI core callbacks * + * * +\*****************************************************************************/ + +static int sdhci_pci_enable_dma(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot; + struct pci_dev *pdev; + int ret; + + slot = sdhci_priv(host); + pdev = slot->chip->pdev; + + if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && + ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && + (host->flags & SDHCI_USE_DMA)) { + dev_warn(&pdev->dev, "Will use DMA mode even though HW " + "doesn't fully claim to support it.\n"); + } + + ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (ret) + return ret; + + pci_set_master(pdev); + + return 0; +} + +static struct sdhci_ops sdhci_pci_ops = { + .enable_dma = sdhci_pci_enable_dma, +}; + +/*****************************************************************************\ + * * + * Suspend/resume * + * * +\*****************************************************************************/ + +#ifdef CONFIG_PM + +static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot; + int i, ret; + + chip = pci_get_drvdata(pdev); + if (!chip) + return 0; + + for (i = 0;i < chip->num_slots;i++) { + slot = chip->slots[i]; + if (!slot) + continue; + + ret = sdhci_suspend_host(slot->host, state); + + if (ret) { + for (i--;i >= 0;i--) + sdhci_resume_host(chip->slots[i]->host); + return ret; + } + } + + if (chip->fixes && chip->fixes->suspend) { + ret = chip->fixes->suspend(chip, state); + if (ret) { + for (i = chip->num_slots - 1;i >= 0;i--) + sdhci_resume_host(chip->slots[i]->host); + return ret; + } + } + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int sdhci_pci_resume (struct pci_dev *pdev) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot; + int i, ret; + + chip = pci_get_drvdata(pdev); + if (!chip) + return 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + + if (chip->fixes && chip->fixes->resume) { + ret = chip->fixes->resume(chip); + if (ret) + return ret; + } + + for (i = 0;i < chip->num_slots;i++) { + slot = chip->slots[i]; + if (!slot) + continue; + + ret = sdhci_resume_host(slot->host); + if (ret) + return ret; + } + + return 0; +} + +#else /* CONFIG_PM */ + +#define sdhci_pci_suspend NULL +#define sdhci_pci_resume NULL + +#endif /* CONFIG_PM */ + +/*****************************************************************************\ + * * + * Device probing/removal * + * * +\*****************************************************************************/ + +static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( + struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar) +{ + struct sdhci_pci_slot *slot; + struct sdhci_host *host; + + resource_size_t addr; + + int ret; + + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); + return ERR_PTR(-ENODEV); + } + + if (pci_resource_len(pdev, bar) != 0x100) { + dev_err(&pdev->dev, "Invalid iomem size. You may " + "experience problems.\n"); + } + + if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { + dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); + return ERR_PTR(-ENODEV); + } + + if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { + dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); + return ERR_PTR(-ENODEV); + } + + host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + goto unmap; + } + + slot = sdhci_priv(host); + + slot->chip = chip; + slot->host = host; + slot->pci_bar = bar; + + host->hw_name = "PCI"; + host->ops = &sdhci_pci_ops; + host->quirks = chip->quirks; + + host->irq = pdev->irq; + + ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); + if (ret) { + dev_err(&pdev->dev, "cannot request region\n"); + return ERR_PTR(ret); + } + + addr = pci_resource_start(pdev, bar); + host->ioaddr = ioremap_nocache(addr, pci_resource_len(pdev, bar)); + if (!host->ioaddr) { + dev_err(&pdev->dev, "failed to remap registers\n"); + goto release; + } + + if (chip->fixes && chip->fixes->probe_slot) { + ret = chip->fixes->probe_slot(slot); + if (ret) + goto unmap; + } + + ret = sdhci_add_host(host); + if (ret) + goto remove; + + return slot; + +remove: + if (chip->fixes && chip->fixes->remove_slot) + chip->fixes->remove_slot(slot, 0); + +unmap: + iounmap(host->ioaddr); + +release: + pci_release_region(pdev, bar); + sdhci_free_host(host); + + return ERR_PTR(ret); +} + +static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) +{ + int dead; + u32 scratch; + + dead = 0; + scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); + if (scratch == (u32)-1) + dead = 1; + + sdhci_remove_host(slot->host, dead); + + if (slot->chip->fixes && slot->chip->fixes->remove_slot) + slot->chip->fixes->remove_slot(slot, dead); + + pci_release_region(slot->chip->pdev, slot->pci_bar); + + sdhci_free_host(slot->host); +} + +static int __devinit sdhci_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot; + + u8 slots, rev, first_bar; + int ret, i; + + BUG_ON(pdev == NULL); + BUG_ON(ent == NULL); + + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); + + dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", + (int)pdev->vendor, (int)pdev->device, (int)rev); + + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); + if (ret) + return ret; + + slots = PCI_SLOT_INFO_SLOTS(slots) + 1; + dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); + if (slots == 0) + return -ENODEV; + + BUG_ON(slots > MAX_SLOTS); + + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); + if (ret) + return ret; + + first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; + + if (first_bar > 5) { + dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); + return -ENODEV; + } + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL); + if (!chip) { + ret = -ENOMEM; + goto err; + } + + chip->pdev = pdev; + chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; + if (chip->fixes) + chip->quirks = chip->fixes->quirks; + chip->num_slots = slots; + + pci_set_drvdata(pdev, chip); + + if (chip->fixes && chip->fixes->probe) { + ret = chip->fixes->probe(chip); + if (ret) + goto free; + } + + for (i = 0;i < slots;i++) { + slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); + if (IS_ERR(slot)) { + for (i--;i >= 0;i--) + sdhci_pci_remove_slot(chip->slots[i]); + ret = PTR_ERR(slot); + goto free; + } + + chip->slots[i] = slot; + } + + return 0; + +free: + pci_set_drvdata(pdev, NULL); + kfree(chip); + +err: + pci_disable_device(pdev); + return ret; +} + +static void __devexit sdhci_pci_remove(struct pci_dev *pdev) +{ + int i; + struct sdhci_pci_chip *chip; + + chip = pci_get_drvdata(pdev); + + if (chip) { + for (i = 0;i < chip->num_slots; i++) + sdhci_pci_remove_slot(chip->slots[i]); + + pci_set_drvdata(pdev, NULL); + kfree(chip); + } + + pci_disable_device(pdev); +} + +static struct pci_driver sdhci_driver = { + .name = "sdhci-pci", + .id_table = pci_ids, + .probe = sdhci_pci_probe, + .remove = __devexit_p(sdhci_pci_remove), + .suspend = sdhci_pci_suspend, + .resume = sdhci_pci_resume, +}; + +/*****************************************************************************\ + * * + * Driver init/exit * + * * +\*****************************************************************************/ + +static int __init sdhci_drv_init(void) +{ + return pci_register_driver(&sdhci_driver); +} + +static void __exit sdhci_drv_exit(void) +{ + pci_unregister_driver(&sdhci_driver); +} + +module_init(sdhci_drv_init); +module_exit(sdhci_drv_exit); + +MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); +MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index b413aa6c246..17701c3da73 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -15,7 +15,7 @@ #include <linux/delay.h> #include <linux/highmem.h> -#include <linux/pci.h> +#include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> @@ -32,135 +32,6 @@ static unsigned int debug_quirks = 0; -/* - * Different quirks to handle when the hardware deviates from a strict - * interpretation of the SDHCI specification. - */ - -/* Controller doesn't honor resets unless we touch the clock register */ -#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) -/* Controller has bad caps bits, but really supports DMA */ -#define SDHCI_QUIRK_FORCE_DMA (1<<1) -/* Controller doesn't like to be reset when there is no card inserted. */ -#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) -/* Controller doesn't like clearing the power reg before a change */ -#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) -/* Controller has flaky internal state so reset it on each ios change */ -#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) -/* Controller has an unusable DMA engine */ -#define SDHCI_QUIRK_BROKEN_DMA (1<<5) -/* Controller can only DMA from 32-bit aligned addresses */ -#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) -/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ -#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) -/* Controller needs to be reset after each request to stay stable */ -#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8) -/* Controller needs voltage and power writes to happen separately */ -#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9) -/* Controller has an off-by-one issue with timeout value */ -#define SDHCI_QUIRK_INCR_TIMEOUT_CONTROL (1<<10) - -static const struct pci_device_id pci_ids[] __devinitdata = { - { - .vendor = PCI_VENDOR_ID_RICOH, - .device = PCI_DEVICE_ID_RICOH_R5C822, - .subvendor = PCI_VENDOR_ID_IBM, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET | - SDHCI_QUIRK_FORCE_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_RICOH, - .device = PCI_DEVICE_ID_RICOH_R5C822, - .subvendor = PCI_VENDOR_ID_SAMSUNG, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_FORCE_DMA | - SDHCI_QUIRK_NO_CARD_NO_RESET, - }, - - { - .vendor = PCI_VENDOR_ID_RICOH, - .device = PCI_DEVICE_ID_RICOH_R5C822, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_FORCE_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_TI, - .device = PCI_DEVICE_ID_TI_XX21_XX11_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_FORCE_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB712_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB712_SD_2, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB714_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB714_SD_2, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_MARVELL, - .device = PCI_DEVICE_ID_MARVELL_CAFE_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | - SDHCI_QUIRK_INCR_TIMEOUT_CONTROL, - }, - - { - .vendor = PCI_VENDOR_ID_JMICRON, - .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR | - SDHCI_QUIRK_32BIT_DMA_SIZE | - SDHCI_QUIRK_RESET_AFTER_REQUEST, - }, - - { /* Generic SD host controller */ - PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) - }, - - { /* end: all zeroes */ }, -}; - -MODULE_DEVICE_TABLE(pci, pci_ids); - static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); static void sdhci_finish_data(struct sdhci_host *); @@ -215,7 +86,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) { unsigned long timeout; - if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) return; @@ -253,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host) SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | - SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; + SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | + SDHCI_INT_ADMA_ERROR; writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); @@ -443,23 +315,226 @@ static void sdhci_transfer_pio(struct sdhci_host *host) DBG("PIO transfer complete.\n"); } -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) +static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) { - u8 count; - unsigned target_timeout, current_timeout; + local_irq_save(*flags); + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; +} - WARN_ON(host->data); +static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) +{ + kunmap_atomic(buffer, KM_BIO_SRC_IRQ); + local_irq_restore(*flags); +} - if (data == NULL) - return; +static int sdhci_adma_table_pre(struct sdhci_host *host, + struct mmc_data *data) +{ + int direction; - /* Sanity checks */ - BUG_ON(data->blksz * data->blocks > 524288); - BUG_ON(data->blksz > host->mmc->max_blk_size); - BUG_ON(data->blocks > 65535); + u8 *desc; + u8 *align; + dma_addr_t addr; + dma_addr_t align_addr; + int len, offset; - host->data = data; - host->data_early = 0; + struct scatterlist *sg; + int i; + char *buffer; + unsigned long flags; + + /* + * The spec does not specify endianness of descriptor table. + * We currently guess that it is LE. + */ + + if (data->flags & MMC_DATA_READ) + direction = DMA_FROM_DEVICE; + else + direction = DMA_TO_DEVICE; + + /* + * The ADMA descriptor table is mapped further down as we + * need to fill it with data first. + */ + + host->align_addr = dma_map_single(mmc_dev(host->mmc), + host->align_buffer, 128 * 4, direction); + if (dma_mapping_error(host->align_addr)) + goto fail; + BUG_ON(host->align_addr & 0x3); + + host->sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, direction); + if (host->sg_count == 0) + goto unmap_align; + + desc = host->adma_desc; + align = host->align_buffer; + + align_addr = host->align_addr; + + for_each_sg(data->sg, sg, host->sg_count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + + /* + * The SDHCI specification states that ADMA + * addresses must be 32-bit aligned. If they + * aren't, then we use a bounce buffer for + * the (up to three) bytes that screw up the + * alignment. + */ + offset = (4 - (addr & 0x3)) & 0x3; + if (offset) { + if (data->flags & MMC_DATA_WRITE) { + buffer = sdhci_kmap_atomic(sg, &flags); + memcpy(align, buffer, offset); + sdhci_kunmap_atomic(buffer, &flags); + } + + desc[7] = (align_addr >> 24) & 0xff; + desc[6] = (align_addr >> 16) & 0xff; + desc[5] = (align_addr >> 8) & 0xff; + desc[4] = (align_addr >> 0) & 0xff; + + BUG_ON(offset > 65536); + + desc[3] = (offset >> 8) & 0xff; + desc[2] = (offset >> 0) & 0xff; + + desc[1] = 0x00; + desc[0] = 0x21; /* tran, valid */ + + align += 4; + align_addr += 4; + + desc += 8; + + addr += offset; + len -= offset; + } + + desc[7] = (addr >> 24) & 0xff; + desc[6] = (addr >> 16) & 0xff; + desc[5] = (addr >> 8) & 0xff; + desc[4] = (addr >> 0) & 0xff; + + BUG_ON(len > 65536); + + desc[3] = (len >> 8) & 0xff; + desc[2] = (len >> 0) & 0xff; + + desc[1] = 0x00; + desc[0] = 0x21; /* tran, valid */ + + desc += 8; + + /* + * If this triggers then we have a calculation bug + * somewhere. :/ + */ + WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); + } + + /* + * Add a terminating entry. + */ + desc[7] = 0; + desc[6] = 0; + desc[5] = 0; + desc[4] = 0; + + desc[3] = 0; + desc[2] = 0; + + desc[1] = 0x00; + desc[0] = 0x03; /* nop, end, valid */ + + /* + * Resync align buffer as we might have changed it. + */ + if (data->flags & MMC_DATA_WRITE) { + dma_sync_single_for_device(mmc_dev(host->mmc), + host->align_addr, 128 * 4, direction); + } + + host->adma_addr = dma_map_single(mmc_dev(host->mmc), + host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); + if (dma_mapping_error(host->align_addr)) + goto unmap_entries; + BUG_ON(host->adma_addr & 0x3); + + return 0; + +unmap_entries: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); +unmap_align: + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, + 128 * 4, direction); +fail: + return -EINVAL; +} + +static void sdhci_adma_table_post(struct sdhci_host *host, + struct mmc_data *data) +{ + int direction; + + struct scatterlist *sg; + int i, size; + u8 *align; + char *buffer; + unsigned long flags; + + if (data->flags & MMC_DATA_READ) + direction = DMA_FROM_DEVICE; + else + direction = DMA_TO_DEVICE; + + dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, + (128 * 2 + 1) * 4, DMA_TO_DEVICE); + + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, + 128 * 4, direction); + + if (data->flags & MMC_DATA_READ) { + dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); + + align = host->align_buffer; + + for_each_sg(data->sg, sg, host->sg_count, i) { + if (sg_dma_address(sg) & 0x3) { + size = 4 - (sg_dma_address(sg) & 0x3); + + buffer = sdhci_kmap_atomic(sg, &flags); + memcpy(buffer, align, size); + sdhci_kunmap_atomic(buffer, &flags); + + align += 4; + } + } + } + + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); +} + +static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) +{ + u8 count; + unsigned target_timeout, current_timeout; + + /* + * If the host controller provides us with an incorrect timeout + * value, just skip the check and use 0xE. The hardware may take + * longer to time out, but that's much better than having a too-short + * timeout value. + */ + if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)) + return 0xE; /* timeout in us */ target_timeout = data->timeout_ns / 1000 + @@ -484,52 +559,158 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) break; } - /* - * Compensate for an off-by-one error in the CaFe hardware; otherwise, - * a too-small count gives us interrupt timeouts. - */ - if ((host->chip->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) - count++; - if (count >= 0xF) { printk(KERN_WARNING "%s: Too large timeout requested!\n", mmc_hostname(host->mmc)); count = 0xE; } + return count; +} + +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) +{ + u8 count; + u8 ctrl; + int ret; + + WARN_ON(host->data); + + if (data == NULL) + return; + + /* Sanity checks */ + BUG_ON(data->blksz * data->blocks > 524288); + BUG_ON(data->blksz > host->mmc->max_blk_size); + BUG_ON(data->blocks > 65535); + + host->data = data; + host->data_early = 0; + + count = sdhci_calc_timeout(host, data); writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); if (host->flags & SDHCI_USE_DMA) host->flags |= SDHCI_REQ_USE_DMA; - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && - (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && - ((data->blksz * data->blocks) & 0x3))) { - DBG("Reverting to PIO because of transfer size (%d)\n", - data->blksz * data->blocks); - host->flags &= ~SDHCI_REQ_USE_DMA; + /* + * FIXME: This doesn't account for merging when mapping the + * scatterlist. + */ + if (host->flags & SDHCI_REQ_USE_DMA) { + int broken, i; + struct scatterlist *sg; + + broken = 0; + if (host->flags & SDHCI_USE_ADMA) { + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) + broken = 1; + } else { + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) + broken = 1; + } + + if (unlikely(broken)) { + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->length & 0x3) { + DBG("Reverting to PIO because of " + "transfer size (%d)\n", + sg->length); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + } + } } /* * The assumption here being that alignment is the same after * translation to device address space. */ - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && - (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && - (data->sg->offset & 0x3))) { - DBG("Reverting to PIO because of bad alignment\n"); - host->flags &= ~SDHCI_REQ_USE_DMA; + if (host->flags & SDHCI_REQ_USE_DMA) { + int broken, i; + struct scatterlist *sg; + + broken = 0; + if (host->flags & SDHCI_USE_ADMA) { + /* + * As we use 3 byte chunks to work around + * alignment problems, we need to check this + * quirk. + */ + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) + broken = 1; + } else { + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) + broken = 1; + } + + if (unlikely(broken)) { + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 0x3) { + DBG("Reverting to PIO because of " + "bad alignment\n"); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + } + } } if (host->flags & SDHCI_REQ_USE_DMA) { - int count; + if (host->flags & SDHCI_USE_ADMA) { + ret = sdhci_adma_table_pre(host, data); + if (ret) { + /* + * This only happens when someone fed + * us an invalid request. + */ + WARN_ON(1); + host->flags &= ~SDHCI_USE_DMA; + } else { + writel(host->adma_addr, + host->ioaddr + SDHCI_ADMA_ADDRESS); + } + } else { + int sg_cnt; + + sg_cnt = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : + DMA_TO_DEVICE); + if (sg_cnt == 0) { + /* + * This only happens when someone fed + * us an invalid request. + */ + WARN_ON(1); + host->flags &= ~SDHCI_USE_DMA; + } else { + WARN_ON(count != 1); + writel(sg_dma_address(data->sg), + host->ioaddr + SDHCI_DMA_ADDRESS); + } + } + } - count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, - (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); - BUG_ON(count != 1); + /* + * Always adjust the DMA selection as some controllers + * (e.g. JMicron) can't do PIO properly when the selection + * is ADMA. + */ + if (host->version >= SDHCI_SPEC_200) { + ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if ((host->flags & SDHCI_REQ_USE_DMA) && + (host->flags & SDHCI_USE_ADMA)) + ctrl |= SDHCI_CTRL_ADMA32; + else + ctrl |= SDHCI_CTRL_SDMA; + writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); + } - writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS); - } else { + if (!(host->flags & SDHCI_REQ_USE_DMA)) { host->cur_sg = data->sg; host->num_sg = data->sg_len; @@ -567,7 +748,6 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, static void sdhci_finish_data(struct sdhci_host *host) { struct mmc_data *data; - u16 blocks; BUG_ON(!host->data); @@ -575,25 +755,26 @@ static void sdhci_finish_data(struct sdhci_host *host) host->data = NULL; if (host->flags & SDHCI_REQ_USE_DMA) { - pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, - (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); + if (host->flags & SDHCI_USE_ADMA) + sdhci_adma_table_post(host, data); + else { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE); + } } /* - * Controller doesn't count down when in single block mode. + * The specification states that the block count register must + * be updated, but it does not specify at what point in the + * data flow. That makes the register entirely useless to read + * back so we have to assume that nothing made it to the card + * in the event of an error. */ - if (data->blocks == 1) - blocks = (data->error == 0) ? 0 : 1; + if (data->error) + data->bytes_xfered = 0; else - blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT); - data->bytes_xfered = data->blksz * (data->blocks - blocks); - - if (!data->error && blocks) { - printk(KERN_ERR "%s: Controller signalled completion even " - "though there were blocks left.\n", - mmc_hostname(host->mmc)); - data->error = -EIO; - } + data->bytes_xfered = data->blksz * data->blocks; if (data->stop) { /* @@ -775,7 +956,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) * Spec says that we should clear the power reg before setting * a new value. Some controllers don't seem to like this though. */ - if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); pwr = SDHCI_POWER_ON; @@ -797,10 +978,10 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) } /* - * At least the CaFe chip gets confused if we set the voltage + * At least the Marvell CaFe chip gets confused if we set the voltage * and set turn on power at the same time, so set the voltage first. */ - if ((host->chip->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) + if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) writeb(pwr & ~SDHCI_POWER_ON, host->ioaddr + SDHCI_POWER_CONTROL); @@ -833,7 +1014,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->mrq = mrq; - if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { + if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) + || (host->flags & SDHCI_DEVICE_DEAD)) { host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } else @@ -853,6 +1035,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + /* * Reset the chip on each power off. * Should clear out any weird states. @@ -888,9 +1073,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) * signalling timeout and CRC errors even on CMD0. Resetting * it on each ios seems to solve the problem. */ - if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) + if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); +out: mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } @@ -905,7 +1091,10 @@ static int sdhci_get_ro(struct mmc_host *mmc) spin_lock_irqsave(&host->lock, flags); - present = readl(host->ioaddr + SDHCI_PRESENT_STATE); + if (host->flags & SDHCI_DEVICE_DEAD) + present = 0; + else + present = readl(host->ioaddr + SDHCI_PRESENT_STATE); spin_unlock_irqrestore(&host->lock, flags); @@ -922,6 +1111,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + ier = readl(host->ioaddr + SDHCI_INT_ENABLE); ier &= ~SDHCI_INT_CARD_INT; @@ -931,6 +1123,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) writel(ier, host->ioaddr + SDHCI_INT_ENABLE); writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); +out: mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -996,13 +1189,14 @@ static void sdhci_tasklet_finish(unsigned long param) * The controller needs a reset of internal state machines * upon error conditions. */ - if (mrq->cmd->error || - (mrq->data && (mrq->data->error || - (mrq->data->stop && mrq->data->stop->error))) || - (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { + if (!(host->flags & SDHCI_DEVICE_DEAD) && + (mrq->cmd->error || + (mrq->data && (mrq->data->error || + (mrq->data->stop && mrq->data->stop->error))) || + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { /* Some controllers need this kick or reset won't work here */ - if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { + if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { unsigned int clock; /* This is to force an update */ @@ -1116,6 +1310,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) host->data->error = -ETIMEDOUT; else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) host->data->error = -EILSEQ; + else if (intmask & SDHCI_INT_ADMA_ERROR) + host->data->error = -EIO; if (host->data->error) sdhci_finish_data(host); @@ -1234,218 +1430,167 @@ out: #ifdef CONFIG_PM -static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) +int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) { - struct sdhci_chip *chip; - int i, ret; - - chip = pci_get_drvdata(pdev); - if (!chip) - return 0; - - DBG("Suspending...\n"); - - for (i = 0;i < chip->num_slots;i++) { - if (!chip->hosts[i]) - continue; - ret = mmc_suspend_host(chip->hosts[i]->mmc, state); - if (ret) { - for (i--;i >= 0;i--) - mmc_resume_host(chip->hosts[i]->mmc); - return ret; - } - } - - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + int ret; - for (i = 0;i < chip->num_slots;i++) { - if (!chip->hosts[i]) - continue; - free_irq(chip->hosts[i]->irq, chip->hosts[i]); - } + ret = mmc_suspend_host(host->mmc, state); + if (ret) + return ret; - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + free_irq(host->irq, host); return 0; } -static int sdhci_resume (struct pci_dev *pdev) -{ - struct sdhci_chip *chip; - int i, ret; +EXPORT_SYMBOL_GPL(sdhci_suspend_host); - chip = pci_get_drvdata(pdev); - if (!chip) - return 0; +int sdhci_resume_host(struct sdhci_host *host) +{ + int ret; - DBG("Resuming...\n"); + if (host->flags & SDHCI_USE_DMA) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); + ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, + mmc_hostname(host->mmc), host); if (ret) return ret; - for (i = 0;i < chip->num_slots;i++) { - if (!chip->hosts[i]) - continue; - if (chip->hosts[i]->flags & SDHCI_USE_DMA) - pci_set_master(pdev); - ret = request_irq(chip->hosts[i]->irq, sdhci_irq, - IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc), - chip->hosts[i]); - if (ret) - return ret; - sdhci_init(chip->hosts[i]); - mmiowb(); - ret = mmc_resume_host(chip->hosts[i]->mmc); - if (ret) - return ret; - } + sdhci_init(host); + mmiowb(); + + ret = mmc_resume_host(host->mmc); + if (ret) + return ret; return 0; } -#else /* CONFIG_PM */ - -#define sdhci_suspend NULL -#define sdhci_resume NULL +EXPORT_SYMBOL_GPL(sdhci_resume_host); #endif /* CONFIG_PM */ /*****************************************************************************\ * * - * Device probing/removal * + * Device allocation/registration * * * \*****************************************************************************/ -static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) +struct sdhci_host *sdhci_alloc_host(struct device *dev, + size_t priv_size) { - int ret; - unsigned int version; - struct sdhci_chip *chip; struct mmc_host *mmc; struct sdhci_host *host; - u8 first_bar; - unsigned int caps; - - chip = pci_get_drvdata(pdev); - BUG_ON(!chip); - - ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); - if (ret) - return ret; - - first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; - - if (first_bar > 5) { - printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n"); - return -ENODEV; - } - - if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) { - printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n"); - return -ENODEV; - } - - if (pci_resource_len(pdev, first_bar + slot) != 0x100) { - printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. " - "You may experience problems.\n"); - } - - if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { - printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n"); - return -ENODEV; - } - - if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { - printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n"); - return -ENODEV; - } + WARN_ON(dev == NULL); - mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); + mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); if (!mmc) - return -ENOMEM; + return ERR_PTR(-ENOMEM); host = mmc_priv(mmc); host->mmc = mmc; - host->chip = chip; - chip->hosts[slot] = host; + return host; +} - host->bar = first_bar + slot; +EXPORT_SYMBOL_GPL(sdhci_alloc_host); - host->addr = pci_resource_start(pdev, host->bar); - host->irq = pdev->irq; +int sdhci_add_host(struct sdhci_host *host) +{ + struct mmc_host *mmc; + unsigned int caps; + int ret; - DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq); + WARN_ON(host == NULL); + if (host == NULL) + return -EINVAL; - ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc)); - if (ret) - goto free; + mmc = host->mmc; - host->ioaddr = ioremap_nocache(host->addr, - pci_resource_len(pdev, host->bar)); - if (!host->ioaddr) { - ret = -ENOMEM; - goto release; - } + if (debug_quirks) + host->quirks = debug_quirks; sdhci_reset(host, SDHCI_RESET_ALL); - version = readw(host->ioaddr + SDHCI_HOST_VERSION); - version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; - if (version > 1) { + host->version = readw(host->ioaddr + SDHCI_HOST_VERSION); + host->version = (host->version & SDHCI_SPEC_VER_MASK) + >> SDHCI_SPEC_VER_SHIFT; + if (host->version > SDHCI_SPEC_200) { printk(KERN_ERR "%s: Unknown controller version (%d). " "You may experience problems.\n", mmc_hostname(mmc), - version); + host->version); } caps = readl(host->ioaddr + SDHCI_CAPABILITIES); - if (chip->quirks & SDHCI_QUIRK_FORCE_DMA) + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_DMA; else if (!(caps & SDHCI_CAN_DO_DMA)) DBG("Controller doesn't have DMA capability\n"); else host->flags |= SDHCI_USE_DMA; - if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) && + if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && (host->flags & SDHCI_USE_DMA)) { DBG("Disabling DMA as it is marked broken\n"); host->flags &= ~SDHCI_USE_DMA; } - if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && - (host->flags & SDHCI_USE_DMA)) { - printk(KERN_WARNING "%s: Will use DMA " - "mode even though HW doesn't fully " - "claim to support it.\n", mmc_hostname(mmc)); + if (host->flags & SDHCI_USE_DMA) { + if ((host->version >= SDHCI_SPEC_200) && + (caps & SDHCI_CAN_DO_ADMA2)) + host->flags |= SDHCI_USE_ADMA; + } + + if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && + (host->flags & SDHCI_USE_ADMA)) { + DBG("Disabling ADMA as it is marked broken\n"); + host->flags &= ~SDHCI_USE_ADMA; } if (host->flags & SDHCI_USE_DMA) { - if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { - printk(KERN_WARNING "%s: No suitable DMA available. " - "Falling back to PIO.\n", mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_DMA; + if (host->ops->enable_dma) { + if (host->ops->enable_dma(host)) { + printk(KERN_WARNING "%s: No suitable DMA " + "available. Falling back to PIO.\n", + mmc_hostname(mmc)); + host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); + } } } - if (host->flags & SDHCI_USE_DMA) - pci_set_master(pdev); - else /* XXX: Hack to get MMC layer to avoid highmem */ - pdev->dma_mask = 0; + if (host->flags & SDHCI_USE_ADMA) { + /* + * We need to allocate descriptors for all sg entries + * (128) and potentially one alignment transfer for + * each of those entries. + */ + host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); + host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); + if (!host->adma_desc || !host->align_buffer) { + kfree(host->adma_desc); + kfree(host->align_buffer); + printk(KERN_WARNING "%s: Unable to allocate ADMA " + "buffers. Falling back to standard DMA.\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; + } + } + + /* XXX: Hack to get MMC layer to avoid highmem */ + if (!(host->flags & SDHCI_USE_DMA)) + mmc_dev(host->mmc)->dma_mask = NULL; host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; if (host->max_clk == 0) { printk(KERN_ERR "%s: Hardware doesn't specify base clock " "frequency.\n", mmc_hostname(mmc)); - ret = -ENODEV; - goto unmap; + return -ENODEV; } host->max_clk *= 1000000; @@ -1454,8 +1599,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) if (host->timeout_clk == 0) { printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " "frequency.\n", mmc_hostname(mmc)); - ret = -ENODEV; - goto unmap; + return -ENODEV; } if (caps & SDHCI_TIMEOUT_CLK_UNIT) host->timeout_clk *= 1000; @@ -1466,7 +1610,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) mmc->ops = &sdhci_ops; mmc->f_min = host->max_clk / 256; mmc->f_max = host->max_clk; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; if (caps & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED; @@ -1482,20 +1626,22 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) if (mmc->ocr_avail == 0) { printk(KERN_ERR "%s: Hardware doesn't report any " "support voltages.\n", mmc_hostname(mmc)); - ret = -ENODEV; - goto unmap; + return -ENODEV; } spin_lock_init(&host->lock); /* - * Maximum number of segments. Hardware cannot do scatter lists. + * Maximum number of segments. Depends on if the hardware + * can do scatter/gather or not. */ - if (host->flags & SDHCI_USE_DMA) + if (host->flags & SDHCI_USE_ADMA) + mmc->max_hw_segs = 128; + else if (host->flags & SDHCI_USE_DMA) mmc->max_hw_segs = 1; - else - mmc->max_hw_segs = 16; - mmc->max_phys_segs = 16; + else /* PIO */ + mmc->max_hw_segs = 128; + mmc->max_phys_segs = 128; /* * Maximum number of sectors in one transfer. Limited by DMA boundary @@ -1505,9 +1651,13 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) /* * Maximum segment size. Could be one segment with the maximum number - * of bytes. + * of bytes. When doing hardware scatter/gather, each entry cannot + * be larger than 64 KiB though. */ - mmc->max_seg_size = mmc->max_req_size; + if (host->flags & SDHCI_USE_ADMA) + mmc->max_seg_size = 65536; + else + mmc->max_seg_size = mmc->max_req_size; /* * Maximum block size. This varies from controller to controller and @@ -1553,7 +1703,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) host->led.default_trigger = mmc_hostname(mmc); host->led.brightness_set = sdhci_led_control; - ret = led_classdev_register(&pdev->dev, &host->led); + ret = led_classdev_register(mmc_dev(mmc), &host->led); if (ret) goto reset; #endif @@ -1562,8 +1712,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) mmc_add_host(mmc); - printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", - mmc_hostname(mmc), host->addr, host->irq, + printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", + mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, + (host->flags & SDHCI_USE_ADMA)?"A":"", (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); return 0; @@ -1576,35 +1727,40 @@ reset: untasklet: tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); -unmap: - iounmap(host->ioaddr); -release: - pci_release_region(pdev, host->bar); -free: - mmc_free_host(mmc); return ret; } -static void sdhci_remove_slot(struct pci_dev *pdev, int slot) +EXPORT_SYMBOL_GPL(sdhci_add_host); + +void sdhci_remove_host(struct sdhci_host *host, int dead) { - struct sdhci_chip *chip; - struct mmc_host *mmc; - struct sdhci_host *host; + unsigned long flags; - chip = pci_get_drvdata(pdev); - host = chip->hosts[slot]; - mmc = host->mmc; + if (dead) { + spin_lock_irqsave(&host->lock, flags); + + host->flags |= SDHCI_DEVICE_DEAD; + + if (host->mrq) { + printk(KERN_ERR "%s: Controller removed during " + " transfer!\n", mmc_hostname(host->mmc)); - chip->hosts[slot] = NULL; + host->mrq->cmd->error = -ENOMEDIUM; + tasklet_schedule(&host->finish_tasklet); + } + + spin_unlock_irqrestore(&host->lock, flags); + } - mmc_remove_host(mmc); + mmc_remove_host(host->mmc); #ifdef CONFIG_LEDS_CLASS led_classdev_unregister(&host->led); #endif - sdhci_reset(host, SDHCI_RESET_ALL); + if (!dead) + sdhci_reset(host, SDHCI_RESET_ALL); free_irq(host->irq, host); @@ -1613,106 +1769,21 @@ static void sdhci_remove_slot(struct pci_dev *pdev, int slot) tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); - iounmap(host->ioaddr); - - pci_release_region(pdev, host->bar); + kfree(host->adma_desc); + kfree(host->align_buffer); - mmc_free_host(mmc); + host->adma_desc = NULL; + host->align_buffer = NULL; } -static int __devinit sdhci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int ret, i; - u8 slots, rev; - struct sdhci_chip *chip; - - BUG_ON(pdev == NULL); - BUG_ON(ent == NULL); +EXPORT_SYMBOL_GPL(sdhci_remove_host); - pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); - - printk(KERN_INFO DRIVER_NAME - ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n", - pci_name(pdev), (int)pdev->vendor, (int)pdev->device, - (int)rev); - - ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); - if (ret) - return ret; - - slots = PCI_SLOT_INFO_SLOTS(slots) + 1; - DBG("found %d slot(s)\n", slots); - if (slots == 0) - return -ENODEV; - - ret = pci_enable_device(pdev); - if (ret) - return ret; - - chip = kzalloc(sizeof(struct sdhci_chip) + - sizeof(struct sdhci_host*) * slots, GFP_KERNEL); - if (!chip) { - ret = -ENOMEM; - goto err; - } - - chip->pdev = pdev; - chip->quirks = ent->driver_data; - - if (debug_quirks) - chip->quirks = debug_quirks; - - chip->num_slots = slots; - pci_set_drvdata(pdev, chip); - - for (i = 0;i < slots;i++) { - ret = sdhci_probe_slot(pdev, i); - if (ret) { - for (i--;i >= 0;i--) - sdhci_remove_slot(pdev, i); - goto free; - } - } - - return 0; - -free: - pci_set_drvdata(pdev, NULL); - kfree(chip); - -err: - pci_disable_device(pdev); - return ret; -} - -static void __devexit sdhci_remove(struct pci_dev *pdev) +void sdhci_free_host(struct sdhci_host *host) { - int i; - struct sdhci_chip *chip; - - chip = pci_get_drvdata(pdev); - - if (chip) { - for (i = 0;i < chip->num_slots;i++) - sdhci_remove_slot(pdev, i); - - pci_set_drvdata(pdev, NULL); - - kfree(chip); - } - - pci_disable_device(pdev); + mmc_free_host(host->mmc); } -static struct pci_driver sdhci_driver = { - .name = DRIVER_NAME, - .id_table = pci_ids, - .probe = sdhci_probe, - .remove = __devexit_p(sdhci_remove), - .suspend = sdhci_suspend, - .resume = sdhci_resume, -}; +EXPORT_SYMBOL_GPL(sdhci_free_host); /*****************************************************************************\ * * @@ -1726,14 +1797,11 @@ static int __init sdhci_drv_init(void) ": Secure Digital Host Controller Interface driver\n"); printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); - return pci_register_driver(&sdhci_driver); + return 0; } static void __exit sdhci_drv_exit(void) { - DBG("Exiting\n"); - - pci_unregister_driver(&sdhci_driver); } module_init(sdhci_drv_init); @@ -1742,7 +1810,7 @@ module_exit(sdhci_drv_exit); module_param(debug_quirks, uint, 0444); MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); -MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); +MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 299118de893..5bb35528176 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -10,18 +10,6 @@ */ /* - * PCI registers - */ - -#define PCI_SDHCI_IFPIO 0x00 -#define PCI_SDHCI_IFDMA 0x01 -#define PCI_SDHCI_IFVENDOR 0x02 - -#define PCI_SLOT_INFO 0x40 /* 8 bits */ -#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) -#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 - -/* * Controller registers */ @@ -72,6 +60,11 @@ #define SDHCI_CTRL_LED 0x01 #define SDHCI_CTRL_4BITBUS 0x02 #define SDHCI_CTRL_HISPD 0x04 +#define SDHCI_CTRL_DMA_MASK 0x18 +#define SDHCI_CTRL_SDMA 0x00 +#define SDHCI_CTRL_ADMA1 0x08 +#define SDHCI_CTRL_ADMA32 0x10 +#define SDHCI_CTRL_ADMA64 0x18 #define SDHCI_POWER_CONTROL 0x29 #define SDHCI_POWER_ON 0x01 @@ -117,6 +110,7 @@ #define SDHCI_INT_DATA_END_BIT 0x00400000 #define SDHCI_INT_BUS_POWER 0x00800000 #define SDHCI_INT_ACMD12ERR 0x01000000 +#define SDHCI_INT_ADMA_ERROR 0x02000000 #define SDHCI_INT_NORMAL_MASK 0x00007FFF #define SDHCI_INT_ERROR_MASK 0xFFFF8000 @@ -140,11 +134,14 @@ #define SDHCI_CLOCK_BASE_SHIFT 8 #define SDHCI_MAX_BLOCK_MASK 0x00030000 #define SDHCI_MAX_BLOCK_SHIFT 16 +#define SDHCI_CAN_DO_ADMA2 0x00080000 +#define SDHCI_CAN_DO_ADMA1 0x00100000 #define SDHCI_CAN_DO_HISPD 0x00200000 #define SDHCI_CAN_DO_DMA 0x00400000 #define SDHCI_CAN_VDD_330 0x01000000 #define SDHCI_CAN_VDD_300 0x02000000 #define SDHCI_CAN_VDD_180 0x04000000 +#define SDHCI_CAN_64BIT 0x10000000 /* 44-47 reserved for more caps */ @@ -152,7 +149,16 @@ /* 4C-4F reserved for more max current */ -/* 50-FB reserved */ +#define SDHCI_SET_ACMD12_ERROR 0x50 +#define SDHCI_SET_INT_ERROR 0x52 + +#define SDHCI_ADMA_ERROR 0x54 + +/* 55-57 reserved */ + +#define SDHCI_ADMA_ADDRESS 0x58 + +/* 60-FB reserved */ #define SDHCI_SLOT_INT_STATUS 0xFC @@ -161,11 +167,50 @@ #define SDHCI_VENDOR_VER_SHIFT 8 #define SDHCI_SPEC_VER_MASK 0x00FF #define SDHCI_SPEC_VER_SHIFT 0 +#define SDHCI_SPEC_100 0 +#define SDHCI_SPEC_200 1 -struct sdhci_chip; +struct sdhci_ops; struct sdhci_host { - struct sdhci_chip *chip; + /* Data set by hardware interface driver */ + const char *hw_name; /* Hardware bus name */ + + unsigned int quirks; /* Deviations from spec. */ + +/* Controller doesn't honor resets unless we touch the clock register */ +#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) +/* Controller has bad caps bits, but really supports DMA */ +#define SDHCI_QUIRK_FORCE_DMA (1<<1) +/* Controller doesn't like to be reset when there is no card inserted. */ +#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) +/* Controller doesn't like clearing the power reg before a change */ +#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) +/* Controller has flaky internal state so reset it on each ios change */ +#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) +/* Controller has an unusable DMA engine */ +#define SDHCI_QUIRK_BROKEN_DMA (1<<5) +/* Controller has an unusable ADMA engine */ +#define SDHCI_QUIRK_BROKEN_ADMA (1<<6) +/* Controller can only DMA from 32-bit aligned addresses */ +#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7) +/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8) +/* Controller can only ADMA chunks that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9) +/* Controller needs to be reset after each request to stay stable */ +#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10) +/* Controller needs voltage and power writes to happen separately */ +#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11) +/* Controller provides an incorrect timeout value for transfers */ +#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) + + int irq; /* Device IRQ */ + void __iomem * ioaddr; /* Mapped address */ + + const struct sdhci_ops *ops; /* Low level hw interface */ + + /* Internal data */ struct mmc_host *mmc; /* MMC structure */ #ifdef CONFIG_LEDS_CLASS @@ -176,7 +221,11 @@ struct sdhci_host { int flags; /* Host attributes */ #define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ -#define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ +#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ +#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ +#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ + + unsigned int version; /* SDHCI spec. version */ unsigned int max_clk; /* Max possible freq (MHz) */ unsigned int timeout_clk; /* Timeout freq (KHz) */ @@ -194,22 +243,41 @@ struct sdhci_host { int offset; /* Offset into current sg */ int remain; /* Bytes left in current */ - int irq; /* Device IRQ */ - int bar; /* PCI BAR index */ - unsigned long addr; /* Bus address */ - void __iomem * ioaddr; /* Mapped address */ + int sg_count; /* Mapped sg entries */ + + u8 *adma_desc; /* ADMA descriptor table */ + u8 *align_buffer; /* Bounce buffer */ + + dma_addr_t adma_addr; /* Mapped ADMA descr. table */ + dma_addr_t align_addr; /* Mapped bounce buffer */ struct tasklet_struct card_tasklet; /* Tasklet structures */ struct tasklet_struct finish_tasklet; struct timer_list timer; /* Timer for timeouts */ -}; -struct sdhci_chip { - struct pci_dev *pdev; + unsigned long private[0] ____cacheline_aligned; +}; - unsigned long quirks; - int num_slots; /* Slots on controller */ - struct sdhci_host *hosts[0]; /* Pointers to hosts */ +struct sdhci_ops { + int (*enable_dma)(struct sdhci_host *host); }; + + +extern struct sdhci_host *sdhci_alloc_host(struct device *dev, + size_t priv_size); +extern void sdhci_free_host(struct sdhci_host *host); + +static inline void *sdhci_priv(struct sdhci_host *host) +{ + return (void *)host->private; +} + +extern int sdhci_add_host(struct sdhci_host *host); +extern void sdhci_remove_host(struct sdhci_host *host, int dead); + +#ifdef CONFIG_PM +extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); +extern int sdhci_resume_host(struct sdhci_host *host); +#endif diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c new file mode 100644 index 00000000000..f99e9f72162 --- /dev/null +++ b/drivers/mmc/host/sdricoh_cs.c @@ -0,0 +1,575 @@ +/* + * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be + * found on some Ricoh RL5c476 II cardbus bridge + * + * Copyright (C) 2006 - 2008 Sascha Sommer <saschasommer@freenet.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +/* +#define DEBUG +#define VERBOSE_DEBUG +*/ +#include <linux/delay.h> +#include <linux/highmem.h> +#include <linux/pci.h> +#include <linux/ioport.h> +#include <linux/scatterlist.h> +#include <linux/version.h> + +#include <pcmcia/cs_types.h> +#include <pcmcia/cs.h> +#include <pcmcia/cistpl.h> +#include <pcmcia/ds.h> +#include <linux/io.h> + +#include <linux/mmc/host.h> + +#define DRIVER_NAME "sdricoh_cs" + +static unsigned int switchlocked; + +/* i/o region */ +#define SDRICOH_PCI_REGION 0 +#define SDRICOH_PCI_REGION_SIZE 0x1000 + +/* registers */ +#define R104_VERSION 0x104 +#define R200_CMD 0x200 +#define R204_CMD_ARG 0x204 +#define R208_DATAIO 0x208 +#define R20C_RESP 0x20c +#define R21C_STATUS 0x21c +#define R2E0_INIT 0x2e0 +#define R2E4_STATUS_RESP 0x2e4 +#define R2F0_RESET 0x2f0 +#define R224_MODE 0x224 +#define R226_BLOCKSIZE 0x226 +#define R228_POWER 0x228 +#define R230_DATA 0x230 + +/* flags for the R21C_STATUS register */ +#define STATUS_CMD_FINISHED 0x00000001 +#define STATUS_TRANSFER_FINISHED 0x00000004 +#define STATUS_CARD_INSERTED 0x00000020 +#define STATUS_CARD_LOCKED 0x00000080 +#define STATUS_CMD_TIMEOUT 0x00400000 +#define STATUS_READY_TO_READ 0x01000000 +#define STATUS_READY_TO_WRITE 0x02000000 +#define STATUS_BUSY 0x40000000 + +/* timeouts */ +#define INIT_TIMEOUT 100 +#define CMD_TIMEOUT 100000 +#define TRANSFER_TIMEOUT 100000 +#define BUSY_TIMEOUT 32767 + +/* list of supported pcmcia devices */ +static struct pcmcia_device_id pcmcia_ids[] = { + /* vendor and device strings followed by their crc32 hashes */ + PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, + 0xc3901202), + PCMCIA_DEVICE_NULL, +}; + +MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids); + +/* mmc privdata */ +struct sdricoh_host { + struct device *dev; + struct mmc_host *mmc; /* MMC structure */ + unsigned char __iomem *iobase; + struct pci_dev *pci_dev; + int app_cmd; +}; + +/***************** register i/o helper functions *****************************/ + +static inline unsigned int sdricoh_readl(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readl(host->iobase + reg); + dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value); + return value; +} + +static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg, + unsigned int value) +{ + writel(value, host->iobase + reg); + dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value); + +} + +static inline unsigned int sdricoh_readw(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readw(host->iobase + reg); + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); + return value; +} + +static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg, + unsigned short value) +{ + writew(value, host->iobase + reg); + dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value); +} + +static inline unsigned int sdricoh_readb(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readb(host->iobase + reg); + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); + return value; +} + +static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted, + unsigned int timeout){ + unsigned int loop; + unsigned int status = 0; + struct device *dev = host->dev; + for (loop = 0; loop < timeout; loop++) { + status = sdricoh_readl(host, R21C_STATUS); + sdricoh_writel(host, R2E4_STATUS_RESP, status); + if (status & wanted) + break; + } + + if (loop == timeout) { + dev_err(dev, "query_status: timeout waiting for %x\n", wanted); + return -ETIMEDOUT; + } + + /* do not do this check in the loop as some commands fail otherwise */ + if (status & 0x7F0000) { + dev_err(dev, "waiting for status bit %x failed\n", wanted); + return -EINVAL; + } + return 0; + +} + +static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode, + unsigned int arg) +{ + unsigned int status; + int result = 0; + unsigned int loop = 0; + /* reset status reg? */ + sdricoh_writel(host, R21C_STATUS, 0x18); + /* fill parameters */ + sdricoh_writel(host, R204_CMD_ARG, arg); + sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode); + /* wait for command completion */ + if (opcode) { + for (loop = 0; loop < CMD_TIMEOUT; loop++) { + status = sdricoh_readl(host, R21C_STATUS); + sdricoh_writel(host, R2E4_STATUS_RESP, status); + if (status & STATUS_CMD_FINISHED) + break; + } + /* don't check for timeout in the loop it is not always + reset correctly + */ + if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT) + result = -ETIMEDOUT; + + } + + return result; + +} + +static int sdricoh_reset(struct sdricoh_host *host) +{ + dev_dbg(host->dev, "reset\n"); + sdricoh_writel(host, R2F0_RESET, 0x10001); + sdricoh_writel(host, R2E0_INIT, 0x10000); + if (sdricoh_readl(host, R2E0_INIT) != 0x10000) + return -EIO; + sdricoh_writel(host, R2E0_INIT, 0x10007); + + sdricoh_writel(host, R224_MODE, 0x2000000); + sdricoh_writel(host, R228_POWER, 0xe0); + + + /* status register ? */ + sdricoh_writel(host, R21C_STATUS, 0x18); + + return 0; +} + +static int sdricoh_blockio(struct sdricoh_host *host, int read, + u8 *buf, int len) +{ + int size; + u32 data = 0; + /* wait until the data is available */ + if (read) { + if (sdricoh_query_status(host, STATUS_READY_TO_READ, + TRANSFER_TIMEOUT)) + return -ETIMEDOUT; + sdricoh_writel(host, R21C_STATUS, 0x18); + /* read data */ + while (len) { + data = sdricoh_readl(host, R230_DATA); + size = min(len, 4); + len -= size; + while (size) { + *buf = data & 0xFF; + buf++; + data >>= 8; + size--; + } + } + } else { + if (sdricoh_query_status(host, STATUS_READY_TO_WRITE, + TRANSFER_TIMEOUT)) + return -ETIMEDOUT; + sdricoh_writel(host, R21C_STATUS, 0x18); + /* write data */ + while (len) { + size = min(len, 4); + len -= size; + while (size) { + data >>= 8; + data |= (u32)*buf << 24; + buf++; + size--; + } + sdricoh_writel(host, R230_DATA, data); + } + } + + if (len) + return -EIO; + + return 0; +} + +static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdricoh_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = cmd->data; + struct device *dev = host->dev; + unsigned char opcode = cmd->opcode; + int i; + + dev_dbg(dev, "=============================\n"); + dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode); + + sdricoh_writel(host, R21C_STATUS, 0x18); + + /* MMC_APP_CMDs need some special handling */ + if (host->app_cmd) { + opcode |= 64; + host->app_cmd = 0; + } else if (opcode == 55) + host->app_cmd = 1; + + /* read/write commands seem to require this */ + if (data) { + sdricoh_writew(host, R226_BLOCKSIZE, data->blksz); + sdricoh_writel(host, R208_DATAIO, 0); + } + + cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg); + + /* read response buffer */ + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* CRC is stripped so we need to do some shifting. */ + for (i = 0; i < 4; i++) { + cmd->resp[i] = + sdricoh_readl(host, + R20C_RESP + (3 - i) * 4) << 8; + if (i != 3) + cmd->resp[i] |= + sdricoh_readb(host, R20C_RESP + + (3 - i) * 4 - 1); + } + } else + cmd->resp[0] = sdricoh_readl(host, R20C_RESP); + } + + /* transfer data */ + if (data && cmd->error == 0) { + dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i " + "sg length %i\n", data->blksz, data->blocks, + data->sg_len, data->sg->length); + + /* enter data reading mode */ + sdricoh_writel(host, R21C_STATUS, 0x837f031e); + for (i = 0; i < data->blocks; i++) { + size_t len = data->blksz; + u8 *buf; + struct page *page; + int result; + page = sg_page(data->sg); + + buf = kmap(page) + data->sg->offset + (len * i); + result = + sdricoh_blockio(host, + data->flags & MMC_DATA_READ, buf, len); + kunmap(page); + flush_dcache_page(page); + if (result) { + dev_err(dev, "sdricoh_request: cmd %i " + "block transfer failed\n", cmd->opcode); + cmd->error = result; + break; + } else + data->bytes_xfered += len; + } + + sdricoh_writel(host, R208_DATAIO, 1); + + if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED, + TRANSFER_TIMEOUT)) { + dev_err(dev, "sdricoh_request: transfer end error\n"); + cmd->error = -EINVAL; + } + } + /* FIXME check busy flag */ + + mmc_request_done(mmc, mrq); + dev_dbg(dev, "=============================\n"); +} + +static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdricoh_host *host = mmc_priv(mmc); + dev_dbg(host->dev, "set_ios\n"); + + if (ios->power_mode == MMC_POWER_ON) { + sdricoh_writel(host, R228_POWER, 0xc0e0); + + if (ios->bus_width == MMC_BUS_WIDTH_4) { + sdricoh_writel(host, R224_MODE, 0x2000300); + sdricoh_writel(host, R228_POWER, 0x40e0); + } else { + sdricoh_writel(host, R224_MODE, 0x2000340); + } + + } else if (ios->power_mode == MMC_POWER_UP) { + sdricoh_writel(host, R224_MODE, 0x2000320); + sdricoh_writel(host, R228_POWER, 0xe0); + } +} + +static int sdricoh_get_ro(struct mmc_host *mmc) +{ + struct sdricoh_host *host = mmc_priv(mmc); + unsigned int status; + + status = sdricoh_readl(host, R21C_STATUS); + sdricoh_writel(host, R2E4_STATUS_RESP, status); + + /* some notebooks seem to have the locked flag switched */ + if (switchlocked) + return !(status & STATUS_CARD_LOCKED); + + return (status & STATUS_CARD_LOCKED); +} + +static struct mmc_host_ops sdricoh_ops = { + .request = sdricoh_request, + .set_ios = sdricoh_set_ios, + .get_ro = sdricoh_get_ro, +}; + +/* initialize the control and register it to the mmc framework */ +static int sdricoh_init_mmc(struct pci_dev *pci_dev, + struct pcmcia_device *pcmcia_dev) +{ + int result = 0; + void __iomem *iobase = NULL; + struct mmc_host *mmc = NULL; + struct sdricoh_host *host = NULL; + struct device *dev = &pcmcia_dev->dev; + /* map iomem */ + if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) != + SDRICOH_PCI_REGION_SIZE) { + dev_dbg(dev, "unexpected pci resource len\n"); + return -ENODEV; + } + iobase = + pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE); + if (!iobase) { + dev_err(dev, "unable to map iobase\n"); + return -ENODEV; + } + /* check version? */ + if (readl(iobase + R104_VERSION) != 0x4000) { + dev_dbg(dev, "no supported mmc controller found\n"); + result = -ENODEV; + goto err; + } + /* allocate privdata */ + mmc = pcmcia_dev->priv = + mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev); + if (!mmc) { + dev_err(dev, "mmc_alloc_host failed\n"); + result = -ENOMEM; + goto err; + } + host = mmc_priv(mmc); + + host->iobase = iobase; + host->dev = dev; + host->pci_dev = pci_dev; + + mmc->ops = &sdricoh_ops; + + /* FIXME: frequency and voltage handling is done by the controller + */ + mmc->f_min = 450000; + mmc->f_max = 24000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps |= MMC_CAP_4_BIT_DATA; + + mmc->max_seg_size = 1024 * 512; + mmc->max_blk_size = 512; + + /* reset the controler */ + if (sdricoh_reset(host)) { + dev_dbg(dev, "could not reset\n"); + result = -EIO; + goto err; + + } + + result = mmc_add_host(mmc); + + if (!result) { + dev_dbg(dev, "mmc host registered\n"); + return 0; + } + +err: + if (iobase) + iounmap(iobase); + if (mmc) + mmc_free_host(mmc); + + return result; +} + +/* search for supported mmc controllers */ +static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev) +{ + struct pci_dev *pci_dev = NULL; + + dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" + " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); + + /* search pci cardbus bridge that contains the mmc controler */ + /* the io region is already claimed by yenta_socket... */ + while ((pci_dev = + pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, + pci_dev))) { + /* try to init the device */ + if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) { + dev_info(&pcmcia_dev->dev, "MMC controller found\n"); + return 0; + } + + } + dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n"); + return -ENODEV; +} + +static void sdricoh_pcmcia_detach(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + + dev_dbg(&link->dev, "detach\n"); + + /* remove mmc host */ + if (mmc) { + struct sdricoh_host *host = mmc_priv(mmc); + mmc_remove_host(mmc); + pci_iounmap(host->pci_dev, host->iobase); + pci_dev_put(host->pci_dev); + mmc_free_host(mmc); + } + pcmcia_disable_device(link); + +} + +#ifdef CONFIG_PM +static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + dev_dbg(&link->dev, "suspend\n"); + mmc_suspend_host(mmc, PMSG_SUSPEND); + return 0; +} + +static int sdricoh_pcmcia_resume(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + dev_dbg(&link->dev, "resume\n"); + sdricoh_reset(mmc_priv(mmc)); + mmc_resume_host(mmc); + return 0; +} +#else +#define sdricoh_pcmcia_suspend NULL +#define sdricoh_pcmcia_resume NULL +#endif + +static struct pcmcia_driver sdricoh_driver = { + .drv = { + .name = DRIVER_NAME, + }, + .probe = sdricoh_pcmcia_probe, + .remove = sdricoh_pcmcia_detach, + .id_table = pcmcia_ids, + .suspend = sdricoh_pcmcia_suspend, + .resume = sdricoh_pcmcia_resume, +}; + +/*****************************************************************************\ + * * + * Driver init/exit * + * * +\*****************************************************************************/ + +static int __init sdricoh_drv_init(void) +{ + return pcmcia_register_driver(&sdricoh_driver); +} + +static void __exit sdricoh_drv_exit(void) +{ + pcmcia_unregister_driver(&sdricoh_driver); +} + +module_init(sdricoh_drv_init); +module_exit(sdricoh_drv_exit); + +module_param(switchlocked, uint, 0444); + +MODULE_AUTHOR("Sascha Sommer <saschasommer@freenet.de>"); +MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver"); +MODULE_LICENSE("GPL"); + +MODULE_PARM_DESC(switchlocked, "Switch the cards locked status." + "Use this when unlocked cards are shown readonly (default 0)"); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 1c14a186f00..13844843e8d 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -973,7 +973,7 @@ static int tifm_sd_probe(struct tifm_dev *sock) mmc->ops = &tifm_sd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; + mmc->caps = MMC_CAP_4_BIT_DATA; mmc->f_min = 20000000 / 60; mmc->f_max = 24000000; diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index c303e7f57ab..adda3795203 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -68,16 +68,16 @@ static const int unlock_codes[] = { 0x83, 0x87 }; static const int valid_ids[] = { 0x7112, - }; +}; #ifdef CONFIG_PNP -static unsigned int nopnp = 0; +static unsigned int param_nopnp = 0; #else -static const unsigned int nopnp = 1; +static const unsigned int param_nopnp = 1; #endif -static unsigned int io = 0x248; -static unsigned int irq = 6; -static int dma = 2; +static unsigned int param_io = 0x248; +static unsigned int param_irq = 6; +static int param_dma = 2; /* * Basic functions @@ -939,7 +939,7 @@ static int wbsd_get_ro(struct mmc_host *mmc) spin_unlock_bh(&host->lock); - return csr & WBSD_WRPT; + return !!(csr & WBSD_WRPT); } static const struct mmc_host_ops wbsd_ops = { @@ -1219,7 +1219,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev) mmc->f_min = 375000; mmc->f_max = 24000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; + mmc->caps = MMC_CAP_4_BIT_DATA; spin_lock_init(&host->lock); @@ -1420,7 +1420,7 @@ kfree: dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); - host->dma_addr = (dma_addr_t)NULL; + host->dma_addr = 0; kfree(host->dma_buffer); host->dma_buffer = NULL; @@ -1445,7 +1445,7 @@ static void wbsd_release_dma(struct wbsd_host *host) host->dma = -1; host->dma_buffer = NULL; - host->dma_addr = (dma_addr_t)NULL; + host->dma_addr = 0; } /* @@ -1765,7 +1765,7 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp) static int __devinit wbsd_probe(struct platform_device *dev) { /* Use the module parameters for resources */ - return wbsd_init(&dev->dev, io, irq, dma, 0); + return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); } static int __devexit wbsd_remove(struct platform_device *dev) @@ -1979,14 +1979,14 @@ static int __init wbsd_drv_init(void) #ifdef CONFIG_PNP - if (!nopnp) { + if (!param_nopnp) { result = pnp_register_driver(&wbsd_pnp_driver); if (result < 0) return result; } #endif /* CONFIG_PNP */ - if (nopnp) { + if (param_nopnp) { result = platform_driver_register(&wbsd_driver); if (result < 0) return result; @@ -2012,12 +2012,12 @@ static void __exit wbsd_drv_exit(void) { #ifdef CONFIG_PNP - if (!nopnp) + if (!param_nopnp) pnp_unregister_driver(&wbsd_pnp_driver); #endif /* CONFIG_PNP */ - if (nopnp) { + if (param_nopnp) { platform_device_unregister(wbsd_device); platform_driver_unregister(&wbsd_driver); @@ -2029,11 +2029,11 @@ static void __exit wbsd_drv_exit(void) module_init(wbsd_drv_init); module_exit(wbsd_drv_exit); #ifdef CONFIG_PNP -module_param(nopnp, uint, 0444); +module_param_named(nopnp, param_nopnp, uint, 0444); #endif -module_param(io, uint, 0444); -module_param(irq, uint, 0444); -module_param(dma, int, 0444); +module_param_named(io, param_io, uint, 0444); +module_param_named(irq, param_irq, uint, 0444); +module_param_named(dma, param_dma, int, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 665341e4305..387a1339501 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -585,8 +585,9 @@ static struct config_item_type netconsole_target_type = { * Group operations and type for netconsole_subsys. */ -static struct config_item *make_netconsole_target(struct config_group *group, - const char *name) +static int make_netconsole_target(struct config_group *group, + const char *name, + struct config_item **new_item) { unsigned long flags; struct netconsole_target *nt; @@ -598,7 +599,7 @@ static struct config_item *make_netconsole_target(struct config_group *group, nt = kzalloc(sizeof(*nt), GFP_KERNEL); if (!nt) { printk(KERN_ERR "netconsole: failed to allocate memory\n"); - return NULL; + return -ENOMEM; } nt->np.name = "netconsole"; @@ -615,7 +616,8 @@ static struct config_item *make_netconsole_target(struct config_group *group, list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); - return &nt->item; + *new_item = &nt->item; + return 0; } static void drop_netconsole_target(struct config_group *group, diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 3dd537be87d..b54e2ea8346 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1,7 +1,7 @@ /* * linux/drivers/net/wireless/libertas/if_sdio.c * - * Copyright 2007 Pierre Ossman + * Copyright 2007-2008 Pierre Ossman * * Inspired by if_cs.c, Copyright 2007 Holger Schurig * @@ -266,13 +266,10 @@ static int if_sdio_card_to_host(struct if_sdio_card *card) /* * The transfer must be in one transaction or the firmware - * goes suicidal. + * goes suicidal. There's no way to guarantee that for all + * controllers, but we can at least try. */ - chunk = size; - if ((chunk > card->func->cur_blksize) || (chunk > 512)) { - chunk = (chunk + card->func->cur_blksize - 1) / - card->func->cur_blksize * card->func->cur_blksize; - } + chunk = sdio_align_size(card->func, size); ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk); if (ret) @@ -696,13 +693,10 @@ static int if_sdio_host_to_card(struct lbs_private *priv, /* * The transfer must be in one transaction or the firmware - * goes suicidal. + * goes suicidal. There's no way to guarantee that for all + * controllers, but we can at least try. */ - size = nb + 4; - if ((size > card->func->cur_blksize) || (size > 512)) { - size = (size + card->func->cur_blksize - 1) / - card->func->cur_blksize * card->func->cur_blksize; - } + size = sdio_align_size(card->func, nb + 4); packet = kzalloc(sizeof(struct if_sdio_packet) + size, GFP_ATOMIC); diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 4d1ce2e7361..7d63f8ced24 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -2,7 +2,7 @@ # Makefile for the PCI bus specific drivers. # -obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ +obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ pci-driver.o search.o pci-sysfs.o rom.o setup-res.o obj-$(CONFIG_PROC_FS) += proc.o diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index f8c187a763b..93e37f0666a 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -30,6 +30,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> +#include <linux/pci-acpi.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/actypes.h> @@ -299,7 +300,7 @@ free_and_return: * * @handle - the handle of the hotplug controller. */ -acpi_status acpi_run_oshp(acpi_handle handle) +static acpi_status acpi_run_oshp(acpi_handle handle) { acpi_status status; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -322,9 +323,6 @@ acpi_status acpi_run_oshp(acpi_handle handle) kfree(string.pointer); return status; } -EXPORT_SYMBOL_GPL(acpi_run_oshp); - - /* acpi_get_hp_params_from_firmware * @@ -374,6 +372,85 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, } EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); +/** + * acpi_get_hp_hw_control_from_firmware + * @dev: the pci_dev of the bridge that has a hotplug controller + * @flags: requested control bits for _OSC + * + * Attempt to take hotplug control from firmware. + */ +int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) +{ + acpi_status status; + acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); + struct pci_dev *pdev = dev; + struct pci_bus *parent; + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; + + flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | + OSC_SHPC_NATIVE_HP_CONTROL | + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); + if (!flags) { + err("Invalid flags %u specified!\n", flags); + return -EINVAL; + } + + /* + * Per PCI firmware specification, we should run the ACPI _OSC + * method to get control of hotplug hardware before using it. If + * an _OSC is missing, we look for an OSHP to do the same thing. + * To handle different BIOS behavior, we look for _OSC and OSHP + * within the scope of the hotplug controller and its parents, + * upto the host bridge under which this controller exists. + */ + while (!handle) { + /* + * This hotplug controller was not listed in the ACPI name + * space at all. Try to get acpi handle of parent pci bus. + */ + if (!pdev || !pdev->bus->parent) + break; + parent = pdev->bus->parent; + dbg("Could not find %s in acpi namespace, trying parent\n", + pci_name(pdev)); + if (!parent->self) + /* Parent must be a host bridge */ + handle = acpi_get_pci_rootbridge_handle( + pci_domain_nr(parent), + parent->number); + else + handle = DEVICE_ACPI_HANDLE(&(parent->self->dev)); + pdev = parent->self; + } + + while (handle) { + acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); + dbg("Trying to get hotplug control for %s \n", + (char *)string.pointer); + status = pci_osc_control_set(handle, flags); + if (status == AE_NOT_FOUND) + status = acpi_run_oshp(handle); + if (ACPI_SUCCESS(status)) { + dbg("Gained control for hotplug HW for pci %s (%s)\n", + pci_name(dev), (char *)string.pointer); + kfree(string.pointer); + return 0; + } + if (acpi_root_bridge(handle)) + break; + chandle = handle; + status = acpi_get_parent(chandle, &handle); + if (ACPI_FAILURE(status)) + break; + } + + dbg("Cannot get control of hotplug hardware for pci %s\n", + pci_name(dev)); + + kfree(string.pointer); + return -ENODEV; +} +EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); /* acpi_root_bridge - check to see if this acpi object is a root bridge * diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 7a29164d4b3..eecf7cbf413 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -215,7 +215,6 @@ extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot); -extern u32 acpiphp_get_address (struct acpiphp_slot *slot); /* variables */ extern int acpiphp_debug; diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 7af68ba2790..0e496e866a8 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -70,7 +70,6 @@ static int disable_slot (struct hotplug_slot *slot); static int set_attention_status (struct hotplug_slot *slot, u8 value); static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); -static int get_address (struct hotplug_slot *slot, u32 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); @@ -83,7 +82,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = { .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, - .get_address = get_address, }; @@ -274,23 +272,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) return 0; } - -/** - * get_address - get pci address of a slot - * @hotplug_slot: slot to get status - * @value: pointer to struct pci_busdev (seg, bus, dev) - */ -static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) -{ - struct slot *slot = hotplug_slot->private; - - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - - *value = acpiphp_get_address(slot->acpi_slot); - - return 0; -} - static int __init init_acpi(void) { int retval; @@ -357,7 +338,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) acpiphp_slot->slot = slot; snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); - retval = pci_hp_register(slot->hotplug_slot); + retval = pci_hp_register(slot->hotplug_slot, + acpiphp_slot->bridge->pci_bus, + acpiphp_slot->device); + if (retval == -EBUSY) + goto error_hpslot; if (retval) { err("pci_hp_register failed with error %d\n", retval); goto error_hpslot; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 91156f85a92..a3e4705dd8f 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -258,7 +258,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) bridge->pci_bus->number, slot->device); retval = acpiphp_register_hotplug_slot(slot); if (retval) { - warn("acpiphp_register_hotplug_slot failed(err code = 0x%x)\n", retval); + if (retval == -EBUSY) + warn("Slot %d already registered by another " + "hotplug driver\n", slot->sun); + else + warn("acpiphp_register_hotplug_slot failed " + "(err code = 0x%x)\n", retval); goto err_exit; } } @@ -1878,19 +1883,3 @@ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot) return (sta == 0) ? 0 : 1; } - - -/* - * pci address (seg/bus/dev) - */ -u32 acpiphp_get_address(struct acpiphp_slot *slot) -{ - u32 address; - struct pci_bus *pci_bus = slot->bridge->pci_bus; - - address = (pci_domain_nr(pci_bus) << 16) | - (pci_bus->number << 8) | - slot->device; - - return address; -} diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index ede9051fdb5..2b7c45e3937 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -33,8 +33,10 @@ #include <linux/kobject.h> #include <asm/uaccess.h> #include <linux/moduleparam.h> +#include <linux/pci.h> #include "acpiphp.h" +#include "../pci.h" #define DRIVER_VERSION "1.0.1" #define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" @@ -430,7 +432,7 @@ static int __init ibm_acpiphp_init(void) int retval = 0; acpi_status status; struct acpi_device *device; - struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; + struct kobject *sysdir = &pci_slots_kset->kobj; dbg("%s\n", __func__); @@ -477,7 +479,7 @@ init_return: static void __exit ibm_acpiphp_exit(void) { acpi_status status; - struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; + struct kobject *sysdir = &pci_slots_kset->kobj; dbg("%s\n", __func__); diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index d8a6b80ab42..935947991dc 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -285,7 +285,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) info->attention_status = cpci_get_attention_status(slot); dbg("registering slot %s", slot->hotplug_slot->name); - status = pci_hp_register(slot->hotplug_slot); + status = pci_hp_register(slot->hotplug_slot, bus, i); if (status) { err("pci_hp_register failed with error %d", status); goto error_name; diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 36b115b27b0..54defec51d0 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -434,7 +434,9 @@ static int ctrl_slot_setup(struct controller *ctrl, slot->bus, slot->device, slot->number, ctrl->slot_device_offset, slot_number); - result = pci_hp_register(hotplug_slot); + result = pci_hp_register(hotplug_slot, + ctrl->pci_dev->subordinate, + slot->device); if (result) { err("pci_hp_register failed with error %d\n", result); goto error_name; diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c index 7e9a827c268..40337a06c18 100644 --- a/drivers/pci/hotplug/fakephp.c +++ b/drivers/pci/hotplug/fakephp.c @@ -66,6 +66,7 @@ struct dummy_slot { struct pci_dev *dev; struct work_struct remove_work; unsigned long removed; + char name[8]; }; static int debug; @@ -100,6 +101,7 @@ static int add_slot(struct pci_dev *dev) struct dummy_slot *dslot; struct hotplug_slot *slot; int retval = -ENOMEM; + static int count = 1; slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!slot) @@ -113,18 +115,18 @@ static int add_slot(struct pci_dev *dev) slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; - slot->name = &dev->dev.bus_id[0]; - dbg("slot->name = %s\n", slot->name); - dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL); if (!dslot) goto error_info; + slot->name = dslot->name; + snprintf(slot->name, sizeof(dslot->name), "fake%d", count++); + dbg("slot->name = %s\n", slot->name); slot->ops = &dummy_hotplug_slot_ops; slot->release = &dummy_release; slot->private = dslot; - retval = pci_hp_register(slot); + retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn)); if (retval) { err("pci_hp_register failed with error %d\n", retval); goto error_dslot; @@ -148,17 +150,17 @@ error: static int __init pci_scan_buses(void) { struct pci_dev *dev = NULL; - int retval = 0; + int lastslot = 0; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - retval = add_slot(dev); - if (retval) { - pci_dev_put(dev); - break; - } + if (PCI_FUNC(dev->devfn) > 0 && + lastslot == PCI_SLOT(dev->devfn)) + continue; + lastslot = PCI_SLOT(dev->devfn); + add_slot(dev); } - return retval; + return 0; } static void remove_slot(struct dummy_slot *dslot) @@ -296,23 +298,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) return 0; } -/* find the hotplug_slot for the pci_dev */ -static struct hotplug_slot *get_slot_from_dev(struct pci_dev *dev) -{ - struct dummy_slot *dslot; - - list_for_each_entry(dslot, &slot_list, node) { - if (dslot->dev == dev) - return dslot->slot; - } - return NULL; -} - - static int disable_slot(struct hotplug_slot *slot) { struct dummy_slot *dslot; - struct hotplug_slot *hslot; struct pci_dev *dev; int func; @@ -322,41 +310,27 @@ static int disable_slot(struct hotplug_slot *slot) dbg("%s - physical_slot = %s\n", __func__, slot->name); - /* don't disable bridged devices just yet, we can't handle them easily... */ - if (dslot->dev->subordinate) { - err("Can't remove PCI devices with other PCI devices behind it yet.\n"); - return -ENODEV; - } - if (test_and_set_bit(0, &dslot->removed)) { - dbg("Slot already scheduled for removal\n"); - return -ENODEV; - } - /* search for subfunctions and disable them first */ - if (!(dslot->dev->devfn & 7)) { - for (func = 1; func < 8; func++) { - dev = pci_get_slot(dslot->dev->bus, - dslot->dev->devfn + func); - if (dev) { - hslot = get_slot_from_dev(dev); - if (hslot) - disable_slot(hslot); - else { - err("Hotplug slot not found for subfunction of PCI device\n"); - return -ENODEV; - } - pci_dev_put(dev); - } else - dbg("No device in slot found\n"); + for (func = 7; func >= 0; func--) { + dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); + if (!dev) + continue; + + if (test_and_set_bit(0, &dslot->removed)) { + dbg("Slot already scheduled for removal\n"); + return -ENODEV; } - } - /* remove the device from the pci core */ - pci_remove_bus_device(dslot->dev); + /* queue work item to blow away this sysfs entry and other + * parts. + */ + INIT_WORK(&dslot->remove_work, remove_slot_worker); + queue_work(dummyphp_wq, &dslot->remove_work); - /* queue work item to blow away this sysfs entry and other parts. */ - INIT_WORK(&dslot->remove_work, remove_slot_worker); - queue_work(dummyphp_wq, &dslot->remove_work); + /* blow away this sysfs entry and other parts. */ + remove_slot(dslot); + pci_dev_put(dev); + } return 0; } diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index dca7efc14be..8467d028732 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -1001,7 +1001,8 @@ static int __init ebda_rsrc_controller (void) tmp_slot = list_entry (list, struct slot, ibm_slot_list); snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); - pci_hp_register (tmp_slot->hotplug_slot); + pci_hp_register(tmp_slot->hotplug_slot, + pci_find_bus(0, tmp_slot->bus), tmp_slot->device); } print_ebda_hpc (); diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index a11021e8ce3..5f85b1b120e 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -40,6 +40,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <asm/uaccess.h> +#include "../pci.h" #define MY_NAME "pci_hotplug" @@ -60,41 +61,7 @@ static int debug; ////////////////////////////////////////////////////////////////// static LIST_HEAD(pci_hotplug_slot_list); - -struct kset *pci_hotplug_slots_kset; - -static ssize_t hotplug_slot_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct hotplug_slot *slot = to_hotplug_slot(kobj); - struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr); - return attribute->show ? attribute->show(slot, buf) : -EIO; -} - -static ssize_t hotplug_slot_attr_store(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t len) -{ - struct hotplug_slot *slot = to_hotplug_slot(kobj); - struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr); - return attribute->store ? attribute->store(slot, buf, len) : -EIO; -} - -static struct sysfs_ops hotplug_slot_sysfs_ops = { - .show = hotplug_slot_attr_show, - .store = hotplug_slot_attr_store, -}; - -static void hotplug_slot_release(struct kobject *kobj) -{ - struct hotplug_slot *slot = to_hotplug_slot(kobj); - if (slot->release) - slot->release(slot); -} - -static struct kobj_type hotplug_slot_ktype = { - .sysfs_ops = &hotplug_slot_sysfs_ops, - .release = &hotplug_slot_release, -}; +static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock); /* these strings match up with the values in pci_bus_speed */ static char *pci_bus_speed_strings[] = { @@ -149,16 +116,15 @@ GET_STATUS(power_status, u8) GET_STATUS(attention_status, u8) GET_STATUS(latch_status, u8) GET_STATUS(adapter_status, u8) -GET_STATUS(address, u32) GET_STATUS(max_bus_speed, enum pci_bus_speed) GET_STATUS(cur_bus_speed, enum pci_bus_speed) -static ssize_t power_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t power_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_power_status (slot, &value); + retval = get_power_status(slot->hotplug, &value); if (retval) goto exit; retval = sprintf (buf, "%d\n", value); @@ -166,9 +132,10 @@ exit: return retval; } -static ssize_t power_write_file (struct hotplug_slot *slot, const char *buf, +static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf, size_t count) { + struct hotplug_slot *slot = pci_slot->hotplug; unsigned long lpower; u8 power; int retval = 0; @@ -204,29 +171,30 @@ exit: return count; } -static struct hotplug_slot_attribute hotplug_slot_attr_power = { +static struct pci_slot_attribute hotplug_slot_attr_power = { .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .show = power_read_file, .store = power_write_file }; -static ssize_t attention_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t attention_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_attention_status (slot, &value); + retval = get_attention_status(slot->hotplug, &value); if (retval) goto exit; - retval = sprintf (buf, "%d\n", value); + retval = sprintf(buf, "%d\n", value); exit: return retval; } -static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf, +static ssize_t attention_write_file(struct pci_slot *slot, const char *buf, size_t count) { + struct hotplug_slot_ops *ops = slot->hotplug->ops; unsigned long lattention; u8 attention; int retval = 0; @@ -235,13 +203,13 @@ static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf, attention = (u8)(lattention & 0xff); dbg (" - attention = %d\n", attention); - if (!try_module_get(slot->ops->owner)) { + if (!try_module_get(ops->owner)) { retval = -ENODEV; goto exit; } - if (slot->ops->set_attention_status) - retval = slot->ops->set_attention_status(slot, attention); - module_put(slot->ops->owner); + if (ops->set_attention_status) + retval = ops->set_attention_status(slot->hotplug, attention); + module_put(ops->owner); exit: if (retval) @@ -249,18 +217,18 @@ exit: return count; } -static struct hotplug_slot_attribute hotplug_slot_attr_attention = { +static struct pci_slot_attribute hotplug_slot_attr_attention = { .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .show = attention_read_file, .store = attention_write_file }; -static ssize_t latch_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t latch_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_latch_status (slot, &value); + retval = get_latch_status(slot->hotplug, &value); if (retval) goto exit; retval = sprintf (buf, "%d\n", value); @@ -269,17 +237,17 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_latch = { +static struct pci_slot_attribute hotplug_slot_attr_latch = { .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO}, .show = latch_read_file, }; -static ssize_t presence_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t presence_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_adapter_status (slot, &value); + retval = get_adapter_status(slot->hotplug, &value); if (retval) goto exit; retval = sprintf (buf, "%d\n", value); @@ -288,42 +256,20 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_presence = { +static struct pci_slot_attribute hotplug_slot_attr_presence = { .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO}, .show = presence_read_file, }; -static ssize_t address_read_file (struct hotplug_slot *slot, char *buf) -{ - int retval; - u32 address; - - retval = get_address (slot, &address); - if (retval) - goto exit; - retval = sprintf (buf, "%04x:%02x:%02x\n", - (address >> 16) & 0xffff, - (address >> 8) & 0xff, - address & 0xff); - -exit: - return retval; -} - -static struct hotplug_slot_attribute hotplug_slot_attr_address = { - .attr = {.name = "address", .mode = S_IFREG | S_IRUGO}, - .show = address_read_file, -}; - static char *unknown_speed = "Unknown bus speed"; -static ssize_t max_bus_speed_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf) { char *speed_string; int retval; enum pci_bus_speed value; - retval = get_max_bus_speed (slot, &value); + retval = get_max_bus_speed(slot->hotplug, &value); if (retval) goto exit; @@ -338,18 +284,18 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_max_bus_speed = { +static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = { .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, .show = max_bus_speed_read_file, }; -static ssize_t cur_bus_speed_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf) { char *speed_string; int retval; enum pci_bus_speed value; - retval = get_cur_bus_speed (slot, &value); + retval = get_cur_bus_speed(slot->hotplug, &value); if (retval) goto exit; @@ -364,14 +310,15 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_cur_bus_speed = { +static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = { .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, .show = cur_bus_speed_read_file, }; -static ssize_t test_write_file (struct hotplug_slot *slot, const char *buf, +static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, size_t count) { + struct hotplug_slot *slot = pci_slot->hotplug; unsigned long ltest; u32 test; int retval = 0; @@ -394,13 +341,14 @@ exit: return count; } -static struct hotplug_slot_attribute hotplug_slot_attr_test = { +static struct pci_slot_attribute hotplug_slot_attr_test = { .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .store = test_write_file }; -static int has_power_file (struct hotplug_slot *slot) +static int has_power_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if ((slot->ops->enable_slot) || @@ -410,8 +358,9 @@ static int has_power_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_attention_file (struct hotplug_slot *slot) +static int has_attention_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if ((slot->ops->set_attention_status) || @@ -420,8 +369,9 @@ static int has_attention_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_latch_file (struct hotplug_slot *slot) +static int has_latch_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_latch_status) @@ -429,8 +379,9 @@ static int has_latch_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_adapter_file (struct hotplug_slot *slot) +static int has_adapter_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_adapter_status) @@ -438,17 +389,9 @@ static int has_adapter_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_address_file (struct hotplug_slot *slot) -{ - if ((!slot) || (!slot->ops)) - return -ENODEV; - if (slot->ops->get_address) - return 0; - return -ENOENT; -} - -static int has_max_bus_speed_file (struct hotplug_slot *slot) +static int has_max_bus_speed_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_max_bus_speed) @@ -456,8 +399,9 @@ static int has_max_bus_speed_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_cur_bus_speed_file (struct hotplug_slot *slot) +static int has_cur_bus_speed_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_cur_bus_speed) @@ -465,8 +409,9 @@ static int has_cur_bus_speed_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_test_file (struct hotplug_slot *slot) +static int has_test_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->hardware_test) @@ -474,7 +419,7 @@ static int has_test_file (struct hotplug_slot *slot) return -ENOENT; } -static int fs_add_slot (struct hotplug_slot *slot) +static int fs_add_slot(struct pci_slot *slot) { int retval = 0; @@ -505,13 +450,6 @@ static int fs_add_slot (struct hotplug_slot *slot) goto exit_adapter; } - if (has_address_file(slot) == 0) { - retval = sysfs_create_file(&slot->kobj, - &hotplug_slot_attr_address.attr); - if (retval) - goto exit_address; - } - if (has_max_bus_speed_file(slot) == 0) { retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); @@ -544,10 +482,6 @@ exit_cur_speed: sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); exit_max_speed: - if (has_address_file(slot) == 0) - sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr); - -exit_address: if (has_adapter_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); @@ -567,7 +501,7 @@ exit: return retval; } -static void fs_remove_slot (struct hotplug_slot *slot) +static void fs_remove_slot(struct pci_slot *slot) { if (has_power_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); @@ -581,9 +515,6 @@ static void fs_remove_slot (struct hotplug_slot *slot) if (has_adapter_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); - if (has_address_file(slot) == 0) - sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr); - if (has_max_bus_speed_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); @@ -599,27 +530,33 @@ static struct hotplug_slot *get_slot_from_name (const char *name) struct hotplug_slot *slot; struct list_head *tmp; + spin_lock(&pci_hotplug_slot_list_lock); list_for_each (tmp, &pci_hotplug_slot_list) { slot = list_entry (tmp, struct hotplug_slot, slot_list); if (strcmp(slot->name, name) == 0) - return slot; + goto out; } - return NULL; + slot = NULL; +out: + spin_unlock(&pci_hotplug_slot_list_lock); + return slot; } /** * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem + * @bus: bus this slot is on * @slot: pointer to the &struct hotplug_slot to register + * @slot_nr: slot number * * Registers a hotplug slot with the pci hotplug subsystem, which will allow * userspace interaction to the slot. * * Returns 0 if successful, anything else for an error. */ -int pci_hp_register (struct hotplug_slot *slot) +int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr) { int result; - struct hotplug_slot *tmp; + struct pci_slot *pci_slot; if (slot == NULL) return -ENODEV; @@ -632,57 +569,89 @@ int pci_hp_register (struct hotplug_slot *slot) } /* Check if we have already registered a slot with the same name. */ - tmp = get_slot_from_name(slot->name); - if (tmp) + if (get_slot_from_name(slot->name)) return -EEXIST; - slot->kobj.kset = pci_hotplug_slots_kset; - result = kobject_init_and_add(&slot->kobj, &hotplug_slot_ktype, NULL, - "%s", slot->name); - if (result) { - err("Unable to register kobject '%s'", slot->name); - return -EINVAL; + /* + * No problems if we call this interface from both ACPI_PCI_SLOT + * driver and call it here again. If we've already created the + * pci_slot, the interface will simply bump the refcount. + */ + pci_slot = pci_create_slot(bus, slot_nr, slot->name); + if (IS_ERR(pci_slot)) + return PTR_ERR(pci_slot); + + if (pci_slot->hotplug) { + dbg("%s: already claimed\n", __func__); + pci_destroy_slot(pci_slot); + return -EBUSY; } - list_add (&slot->slot_list, &pci_hotplug_slot_list); + slot->pci_slot = pci_slot; + pci_slot->hotplug = slot; + + /* + * Allow pcihp drivers to override the ACPI_PCI_SLOT name. + */ + if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) { + result = kobject_rename(&pci_slot->kobj, slot->name); + if (result) { + pci_destroy_slot(pci_slot); + return result; + } + } + + spin_lock(&pci_hotplug_slot_list_lock); + list_add(&slot->slot_list, &pci_hotplug_slot_list); + spin_unlock(&pci_hotplug_slot_list_lock); + + result = fs_add_slot(pci_slot); + kobject_uevent(&pci_slot->kobj, KOBJ_ADD); + dbg("Added slot %s to the list\n", slot->name); + - result = fs_add_slot (slot); - kobject_uevent(&slot->kobj, KOBJ_ADD); - dbg ("Added slot %s to the list\n", slot->name); return result; } /** * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem - * @slot: pointer to the &struct hotplug_slot to deregister + * @hotplug: pointer to the &struct hotplug_slot to deregister * * The @slot must have been registered with the pci hotplug subsystem * previously with a call to pci_hp_register(). * * Returns 0 if successful, anything else for an error. */ -int pci_hp_deregister (struct hotplug_slot *slot) +int pci_hp_deregister(struct hotplug_slot *hotplug) { struct hotplug_slot *temp; + struct pci_slot *slot; - if (slot == NULL) + if (!hotplug) return -ENODEV; - temp = get_slot_from_name (slot->name); - if (temp != slot) { + temp = get_slot_from_name(hotplug->name); + if (temp != hotplug) return -ENODEV; - } - list_del (&slot->slot_list); - fs_remove_slot (slot); - dbg ("Removed slot %s from the list\n", slot->name); - kobject_put(&slot->kobj); + spin_lock(&pci_hotplug_slot_list_lock); + list_del(&hotplug->slot_list); + spin_unlock(&pci_hotplug_slot_list_lock); + + slot = hotplug->pci_slot; + fs_remove_slot(slot); + dbg("Removed slot %s from the list\n", hotplug->name); + + hotplug->release(hotplug); + slot->hotplug = NULL; + pci_destroy_slot(slot); + return 0; } /** * pci_hp_change_slot_info - changes the slot's information structure in the core - * @slot: pointer to the slot whose info has changed + * @hotplug: pointer to the slot whose info has changed * @info: pointer to the info copy into the slot's info structure * * @slot must have been registered with the pci @@ -690,13 +659,15 @@ int pci_hp_deregister (struct hotplug_slot *slot) * * Returns 0 if successful, anything else for an error. */ -int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, +int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug, struct hotplug_slot_info *info) { - if ((slot == NULL) || (info == NULL)) + struct pci_slot *slot; + if (!hotplug || !info) return -ENODEV; + slot = hotplug->pci_slot; - memcpy (slot->info, info, sizeof (struct hotplug_slot_info)); + memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info)); return 0; } @@ -704,36 +675,22 @@ int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, static int __init pci_hotplug_init (void) { int result; - struct kset *pci_bus_kset; - pci_bus_kset = bus_get_kset(&pci_bus_type); - - pci_hotplug_slots_kset = kset_create_and_add("slots", NULL, - &pci_bus_kset->kobj); - if (!pci_hotplug_slots_kset) { - result = -ENOMEM; - err("Register subsys error\n"); - goto exit; - } result = cpci_hotplug_init(debug); if (result) { err ("cpci_hotplug_init with error %d\n", result); - goto err_subsys; + goto err_cpci; } info (DRIVER_DESC " version: " DRIVER_VERSION "\n"); - goto exit; -err_subsys: - kset_unregister(pci_hotplug_slots_kset); -exit: +err_cpci: return result; } static void __exit pci_hotplug_exit (void) { cpci_hotplug_exit(); - kset_unregister(pci_hotplug_slots_kset); } module_init(pci_hotplug_init); @@ -745,7 +702,6 @@ MODULE_LICENSE("GPL"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); -EXPORT_SYMBOL_GPL(pci_hotplug_slots_kset); EXPORT_SYMBOL_GPL(pci_hp_register); EXPORT_SYMBOL_GPL(pci_hp_deregister); EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 79c9ddaad3f..e3a1e7e7dba 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -43,6 +43,7 @@ extern int pciehp_poll_mode; extern int pciehp_poll_time; extern int pciehp_debug; extern int pciehp_force; +extern int pciehp_slot_with_bus; extern struct workqueue_struct *pciehp_wq; #define dbg(format, arg...) \ @@ -96,7 +97,7 @@ struct controller { u32 slot_cap; u8 cap_base; struct timer_list poll_timer; - volatile int cmd_busy; + int cmd_busy; unsigned int no_cmd_complete:1; }; @@ -156,10 +157,10 @@ extern u8 pciehp_handle_power_fault(struct slot *p_slot); extern int pciehp_configure_device(struct slot *p_slot); extern int pciehp_unconfigure_device(struct slot *p_slot); extern void pciehp_queue_pushbutton_work(struct work_struct *work); -int pcie_init(struct controller *ctrl, struct pcie_device *dev); +struct controller *pcie_init(struct pcie_device *dev); int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); -int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev); +int pcie_enable_notification(struct controller *ctrl); static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) { @@ -202,8 +203,13 @@ struct hpc_ops { #include <acpi/actypes.h> #include <linux/pci-acpi.h> -#define pciehp_get_hp_hw_control_from_firmware(dev) \ - pciehp_acpi_get_hp_hw_control_from_firmware(dev) +static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) +{ + u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); + return acpi_get_hp_hw_control_from_firmware(dev, flags); +} + static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, struct hotplug_params *hpp) { diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 48a2ed37891..3677495c4f9 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -72,7 +72,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); -static int get_address (struct hotplug_slot *slot, u32 *value); static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); @@ -85,7 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, - .get_address = get_address, .get_max_bus_speed = get_max_bus_speed, .get_cur_bus_speed = get_cur_bus_speed, }; @@ -185,23 +183,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = { */ static void release_slot(struct hotplug_slot *hotplug_slot) { - struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot); - kfree(slot); -} - -static void make_slot_name(struct slot *slot) -{ - if (pciehp_slot_with_bus) - snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d", - slot->bus, slot->number); - else - snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", - slot->number); + kfree(hotplug_slot->info); + kfree(hotplug_slot); } static int init_slots(struct controller *ctrl) @@ -210,49 +195,34 @@ static int init_slots(struct controller *ctrl) struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; int retval = -ENOMEM; - int i; - - for (i = 0; i < ctrl->num_slots; i++) { - slot = kzalloc(sizeof(*slot), GFP_KERNEL); - if (!slot) - goto error; + list_for_each_entry(slot, &ctrl->slot_list, slot_list) { hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); if (!hotplug_slot) - goto error_slot; - slot->hotplug_slot = hotplug_slot; + goto error; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) goto error_hpslot; - hotplug_slot->info = info; - - hotplug_slot->name = slot->name; - - slot->hp_slot = i; - slot->ctrl = ctrl; - slot->bus = ctrl->pci_dev->subordinate->number; - slot->device = ctrl->slot_device_offset + i; - slot->hpc_ops = ctrl->hpc_ops; - slot->number = ctrl->first_slot; - mutex_init(&slot->lock); - INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); /* register this slot with the hotplug pci core */ + hotplug_slot->info = info; + hotplug_slot->name = slot->name; hotplug_slot->private = slot; hotplug_slot->release = &release_slot; - make_slot_name(slot); hotplug_slot->ops = &pciehp_hotplug_slot_ops; - get_power_status(hotplug_slot, &info->power_status); get_attention_status(hotplug_slot, &info->attention_status); get_latch_status(hotplug_slot, &info->latch_status); get_adapter_status(hotplug_slot, &info->adapter_status); + slot->hotplug_slot = hotplug_slot; dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " "slot_device_offset=%x\n", slot->bus, slot->device, slot->hp_slot, slot->number, ctrl->slot_device_offset); - retval = pci_hp_register(hotplug_slot); + retval = pci_hp_register(hotplug_slot, + ctrl->pci_dev->subordinate, + slot->device); if (retval) { err("pci_hp_register failed with error %d\n", retval); if (retval == -EEXIST) @@ -263,7 +233,7 @@ static int init_slots(struct controller *ctrl) } /* create additional sysfs entries */ if (EMI(ctrl)) { - retval = sysfs_create_file(&hotplug_slot->kobj, + retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj, &hotplug_slot_attr_lock.attr); if (retval) { pci_hp_deregister(hotplug_slot); @@ -271,8 +241,6 @@ static int init_slots(struct controller *ctrl) goto error_info; } } - - list_add(&slot->slot_list, &ctrl->slot_list); } return 0; @@ -280,27 +248,18 @@ error_info: kfree(info); error_hpslot: kfree(hotplug_slot); -error_slot: - kfree(slot); error: return retval; } static void cleanup_slots(struct controller *ctrl) { - struct list_head *tmp; - struct list_head *next; struct slot *slot; - list_for_each_safe(tmp, next, &ctrl->slot_list) { - slot = list_entry(tmp, struct slot, slot_list); - list_del(&slot->slot_list); + list_for_each_entry(slot, &ctrl->slot_list, slot_list) { if (EMI(ctrl)) - sysfs_remove_file(&slot->hotplug_slot->kobj, + sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj, &hotplug_slot_attr_lock.attr); - cancel_delayed_work(&slot->work); - flush_scheduled_work(); - flush_workqueue(pciehp_wq); pci_hp_deregister(slot->hotplug_slot); } } @@ -398,19 +357,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) return 0; } -static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) -{ - struct slot *slot = hotplug_slot->private; - struct pci_bus *bus = slot->ctrl->pci_dev->subordinate; - - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - - *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device; - - return 0; -} - -static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) +static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, + enum pci_bus_speed *value) { struct slot *slot = hotplug_slot->private; int retval; @@ -444,34 +392,30 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ struct controller *ctrl; struct slot *t_slot; u8 value; - struct pci_dev *pdev; + struct pci_dev *pdev = dev->port; - ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); - if (!ctrl) { - err("%s : out of memory\n", __func__); + if (pciehp_force) + dbg("Bypassing BIOS check for pciehp use on %s\n", + pci_name(pdev)); + else if (pciehp_get_hp_hw_control_from_firmware(pdev)) goto err_out_none; - } - INIT_LIST_HEAD(&ctrl->slot_list); - - pdev = dev->port; - ctrl->pci_dev = pdev; - rc = pcie_init(ctrl, dev); - if (rc) { + ctrl = pcie_init(dev); + if (!ctrl) { dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); - goto err_out_free_ctrl; + goto err_out_none; } - - pci_set_drvdata(pdev, ctrl); - - dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", - __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), pdev->irq); + set_service_data(dev, ctrl); /* Setup the slot information structures */ rc = init_slots(ctrl); if (rc) { - err("%s: slot initialization failed\n", PCIE_MODULE_NAME); + if (rc == -EBUSY) + warn("%s: slot already registered by another " + "hotplug driver\n", PCIE_MODULE_NAME); + else + err("%s: slot initialization failed\n", + PCIE_MODULE_NAME); goto err_out_release_ctlr; } @@ -495,20 +439,16 @@ err_out_free_ctrl_slot: cleanup_slots(ctrl); err_out_release_ctlr: ctrl->hpc_ops->release_ctlr(ctrl); -err_out_free_ctrl: - kfree(ctrl); err_out_none: return -ENODEV; } static void pciehp_remove (struct pcie_device *dev) { - struct pci_dev *pdev = dev->port; - struct controller *ctrl = pci_get_drvdata(pdev); + struct controller *ctrl = get_service_data(dev); cleanup_slots(ctrl); ctrl->hpc_ops->release_ctlr(ctrl); - kfree(ctrl); } #ifdef CONFIG_PM @@ -522,13 +462,12 @@ static int pciehp_resume (struct pcie_device *dev) { printk("%s ENTRY\n", __func__); if (pciehp_force) { - struct pci_dev *pdev = dev->port; - struct controller *ctrl = pci_get_drvdata(pdev); + struct controller *ctrl = get_service_data(dev); struct slot *t_slot; u8 status; /* reinitialize the chipset's event detection logic */ - pcie_init_hardware_part2(ctrl, dev); + pcie_enable_notification(ctrl); t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 79f10496316..1323a43285d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -247,30 +247,32 @@ static inline void pciehp_free_irq(struct controller *ctrl) free_irq(ctrl->pci_dev->irq, ctrl); } -static inline int pcie_poll_cmd(struct controller *ctrl) +static int pcie_poll_cmd(struct controller *ctrl) { u16 slot_status; int timeout = 1000; - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) - if (slot_status & CMD_COMPLETED) - goto completed; - for (timeout = 1000; timeout > 0; timeout -= 100) { - msleep(100); - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) - if (slot_status & CMD_COMPLETED) - goto completed; + if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { + if (slot_status & CMD_COMPLETED) { + pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); + return 1; + } + } + while (timeout > 1000) { + msleep(10); + timeout -= 10; + if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { + if (slot_status & CMD_COMPLETED) { + pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); + return 1; + } + } } return 0; /* timeout */ - -completed: - pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); - return timeout; } -static inline int pcie_wait_cmd(struct controller *ctrl, int poll) +static void pcie_wait_cmd(struct controller *ctrl, int poll) { - int retval = 0; unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; unsigned long timeout = msecs_to_jiffies(msecs); int rc; @@ -278,16 +280,9 @@ static inline int pcie_wait_cmd(struct controller *ctrl, int poll) if (poll) rc = pcie_poll_cmd(ctrl); else - rc = wait_event_interruptible_timeout(ctrl->queue, - !ctrl->cmd_busy, timeout); + rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); if (!rc) dbg("Command not completed in 1000 msec\n"); - else if (rc < 0) { - retval = -EINTR; - info("Command was interrupted by a signal\n"); - } - - return retval; } /** @@ -342,10 +337,6 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) slot_ctrl &= ~mask; slot_ctrl |= (cmd & mask); - /* Don't enable command completed if caller is changing it. */ - if (!(mask & CMD_CMPL_INTR_ENABLE)) - slot_ctrl |= CMD_CMPL_INTR_ENABLE; - ctrl->cmd_busy = 1; smp_mb(); retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); @@ -365,7 +356,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) if (!(slot_ctrl & HP_INTR_ENABLE) || !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) poll = 1; - retval = pcie_wait_cmd(ctrl, poll); + pcie_wait_cmd(ctrl, poll); } out: mutex_unlock(&ctrl->ctrl_lock); @@ -614,23 +605,6 @@ static void hpc_set_green_led_blink(struct slot *slot) __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } -static void hpc_release_ctlr(struct controller *ctrl) -{ - /* Mask Hot-plug Interrupt Enable */ - if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) - err("%s: Cannot mask hotplut interrupt enable\n", __func__); - - /* Free interrupt handler or interrupt polling timer */ - pciehp_free_irq(ctrl); - - /* - * If this is the last controller to be released, destroy the - * pciehp work queue - */ - if (atomic_dec_and_test(&pciehp_num_controllers)) - destroy_workqueue(pciehp_wq); -} - static int hpc_power_on_slot(struct slot * slot) { struct controller *ctrl = slot->ctrl; @@ -785,7 +759,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) intr_loc |= detected; if (!intr_loc) return IRQ_NONE; - if (pciehp_writew(ctrl, SLOTSTATUS, detected)) { + if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { err("%s: Cannot write to SLOTSTATUS\n", __func__); return IRQ_NONE; } @@ -797,25 +771,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) if (intr_loc & CMD_COMPLETED) { ctrl->cmd_busy = 0; smp_mb(); - wake_up_interruptible(&ctrl->queue); + wake_up(&ctrl->queue); } if (!(intr_loc & ~CMD_COMPLETED)) return IRQ_HANDLED; - /* - * Return without handling events if this handler routine is - * called before controller initialization is done. This may - * happen if hotplug event or another interrupt that shares - * the IRQ with pciehp arrives before slot initialization is - * done after interrupt handler is registered. - * - * FIXME - Need more structural fixes. We need to be ready to - * handle the event before installing interrupt handler. - */ p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); - if (!p_slot || !p_slot->hpc_ops) - return IRQ_HANDLED; /* Check MRL Sensor Changed */ if (intr_loc & MRL_SENS_CHANGED) @@ -992,6 +954,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot, return retval; } +static void pcie_release_ctrl(struct controller *ctrl); static struct hpc_ops pciehp_hpc_ops = { .power_on_slot = hpc_power_on_slot, .power_off_slot = hpc_power_off_slot, @@ -1013,97 +976,11 @@ static struct hpc_ops pciehp_hpc_ops = { .green_led_off = hpc_set_green_led_off, .green_led_blink = hpc_set_green_led_blink, - .release_ctlr = hpc_release_ctlr, + .release_ctlr = pcie_release_ctrl, .check_lnk_status = hpc_check_lnk_status, }; -#ifdef CONFIG_ACPI -static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev) -{ - acpi_status status; - acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); - struct pci_dev *pdev = dev; - struct pci_bus *parent; - struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; - - /* - * Per PCI firmware specification, we should run the ACPI _OSC - * method to get control of hotplug hardware before using it. - * If an _OSC is missing, we look for an OSHP to do the same thing. - * To handle different BIOS behavior, we look for _OSC and OSHP - * within the scope of the hotplug controller and its parents, upto - * the host bridge under which this controller exists. - */ - while (!handle) { - /* - * This hotplug controller was not listed in the ACPI name - * space at all. Try to get acpi handle of parent pci bus. - */ - if (!pdev || !pdev->bus->parent) - break; - parent = pdev->bus->parent; - dbg("Could not find %s in acpi namespace, trying parent\n", - pci_name(pdev)); - if (!parent->self) - /* Parent must be a host bridge */ - handle = acpi_get_pci_rootbridge_handle( - pci_domain_nr(parent), - parent->number); - else - handle = DEVICE_ACPI_HANDLE( - &(parent->self->dev)); - pdev = parent->self; - } - - while (handle) { - acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); - dbg("Trying to get hotplug control for %s \n", - (char *)string.pointer); - status = pci_osc_control_set(handle, - OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | - OSC_PCI_EXPRESS_NATIVE_HP_CONTROL); - if (status == AE_NOT_FOUND) - status = acpi_run_oshp(handle); - if (ACPI_SUCCESS(status)) { - dbg("Gained control for hotplug HW for pci %s (%s)\n", - pci_name(dev), (char *)string.pointer); - kfree(string.pointer); - return 0; - } - if (acpi_root_bridge(handle)) - break; - chandle = handle; - status = acpi_get_parent(chandle, &handle); - if (ACPI_FAILURE(status)) - break; - } - - dbg("Cannot get control of hotplug hardware for pci %s\n", - pci_name(dev)); - - kfree(string.pointer); - return -1; -} -#endif - -static int pcie_init_hardware_part1(struct controller *ctrl, - struct pcie_device *dev) -{ - /* Clear all remaining event bits in Slot Status register */ - if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) { - err("%s: Cannot write to SLOTSTATUS register\n", __func__); - return -1; - } - - /* Mask Hot-plug Interrupt Enable */ - if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) { - err("%s: Cannot mask hotplug interrupt enable\n", __func__); - return -1; - } - return 0; -} - -int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev) +int pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; @@ -1115,30 +992,83 @@ int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev) if (MRL_SENS(ctrl)) cmd |= MRL_DETECT_ENABLE; if (!pciehp_poll_mode) - cmd |= HP_INTR_ENABLE; + cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; - mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | - PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE; + mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | + PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; if (pcie_write_cmd(ctrl, cmd, mask)) { err("%s: Cannot enable software notification\n", __func__); - goto abort; + return -1; } + return 0; +} - if (pciehp_force) - dbg("Bypassing BIOS check for pciehp use on %s\n", - pci_name(ctrl->pci_dev)); - else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev)) - goto abort_disable_intr; +static void pcie_disable_notification(struct controller *ctrl) +{ + u16 mask; + mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | + PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; + if (pcie_write_cmd(ctrl, 0, mask)) + warn("%s: Cannot disable software notification\n", __func__); +} +static int pcie_init_notification(struct controller *ctrl) +{ + if (pciehp_request_irq(ctrl)) + return -1; + if (pcie_enable_notification(ctrl)) { + pciehp_free_irq(ctrl); + return -1; + } return 0; +} - /* We end up here for the many possible ways to fail this API. */ -abort_disable_intr: - if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE)) - err("%s : disabling interrupts failed\n", __func__); -abort: - return -1; +static void pcie_shutdown_notification(struct controller *ctrl) +{ + pcie_disable_notification(ctrl); + pciehp_free_irq(ctrl); +} + +static void make_slot_name(struct slot *slot) +{ + if (pciehp_slot_with_bus) + snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d", + slot->bus, slot->number); + else + snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); +} + +static int pcie_init_slot(struct controller *ctrl) +{ + struct slot *slot; + + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) + return -ENOMEM; + + slot->hp_slot = 0; + slot->ctrl = ctrl; + slot->bus = ctrl->pci_dev->subordinate->number; + slot->device = ctrl->slot_device_offset + slot->hp_slot; + slot->hpc_ops = ctrl->hpc_ops; + slot->number = ctrl->first_slot; + make_slot_name(slot); + mutex_init(&slot->lock); + INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); + list_add(&slot->slot_list, &ctrl->slot_list); + return 0; +} + +static void pcie_cleanup_slot(struct controller *ctrl) +{ + struct slot *slot; + slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list); + list_del(&slot->slot_list); + cancel_delayed_work(&slot->work); + flush_scheduled_work(); + flush_workqueue(pciehp_wq); + kfree(slot); } static inline void dbg_ctrl(struct controller *ctrl) @@ -1176,15 +1106,23 @@ static inline void dbg_ctrl(struct controller *ctrl) dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); pciehp_readw(ctrl, SLOTSTATUS, ®16); dbg("Slot Status : 0x%04x\n", reg16); - pciehp_readw(ctrl, SLOTSTATUS, ®16); + pciehp_readw(ctrl, SLOTCTRL, ®16); dbg("Slot Control : 0x%04x\n", reg16); } -int pcie_init(struct controller *ctrl, struct pcie_device *dev) +struct controller *pcie_init(struct pcie_device *dev) { + struct controller *ctrl; u32 slot_cap; struct pci_dev *pdev = dev->port; + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + err("%s : out of memory\n", __func__); + goto abort; + } + INIT_LIST_HEAD(&ctrl->slot_list); + ctrl->pci_dev = pdev; ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!ctrl->cap_base) { @@ -1215,15 +1153,12 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev) !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) ctrl->no_cmd_complete = 1; - info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", - pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device); + /* Clear all remaining event bits in Slot Status register */ + if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) + goto abort_ctrl; - if (pcie_init_hardware_part1(ctrl, dev)) - goto abort; - - if (pciehp_request_irq(ctrl)) - goto abort; + /* Disable sotfware notification */ + pcie_disable_notification(ctrl); /* * If this is the first controller to be initialized, @@ -1231,18 +1166,39 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev) */ if (atomic_add_return(1, &pciehp_num_controllers) == 1) { pciehp_wq = create_singlethread_workqueue("pciehpd"); - if (!pciehp_wq) { - goto abort_free_irq; - } + if (!pciehp_wq) + goto abort_ctrl; } - if (pcie_init_hardware_part2(ctrl, dev)) - goto abort_free_irq; + info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + if (pcie_init_slot(ctrl)) + goto abort_ctrl; - return 0; + if (pcie_init_notification(ctrl)) + goto abort_slot; -abort_free_irq: - pciehp_free_irq(ctrl); + return ctrl; + +abort_slot: + pcie_cleanup_slot(ctrl); +abort_ctrl: + kfree(ctrl); abort: - return -1; + return NULL; +} + +void pcie_release_ctrl(struct controller *ctrl) +{ + pcie_shutdown_notification(ctrl); + pcie_cleanup_slot(ctrl); + /* + * If this is the last controller to be released, destroy the + * pciehp work queue + */ + if (atomic_dec_and_test(&pciehp_num_controllers)) + destroy_workqueue(pciehp_wq); + kfree(ctrl); } diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index 779c5db71be..a796301ea03 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -14,8 +14,10 @@ */ #include <linux/kobject.h> #include <linux/string.h> +#include <linux/pci.h> #include <linux/pci_hotplug.h> #include "rpadlpar.h" +#include "../pci.h" #define DLPAR_KOBJ_NAME "control" @@ -27,7 +29,6 @@ #define MAX_DRC_NAME_LEN 64 - static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t nbytes) { @@ -112,7 +113,7 @@ int dlpar_sysfs_init(void) int error; dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME, - &pci_hotplug_slots_kset->kobj); + &pci_slots_kset->kobj); if (!dlpar_kobj) return -EINVAL; diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 56197b600d3..9b714ea93d2 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -33,33 +33,6 @@ #include <asm/rtas.h> #include "rpaphp.h" -static ssize_t address_read_file (struct hotplug_slot *php_slot, char *buf) -{ - int retval; - struct slot *slot = (struct slot *)php_slot->private; - struct pci_bus *bus; - - if (!slot) - return -ENOENT; - - bus = slot->bus; - if (!bus) - return -ENOENT; - - if (bus->self) - retval = sprintf(buf, pci_name(bus->self)); - else - retval = sprintf(buf, "%04x:%02x:00.0", - pci_domain_nr(bus), bus->number); - - return retval; -} - -static struct hotplug_slot_attribute php_attr_address = { - .attr = {.name = "address", .mode = S_IFREG | S_IRUGO}, - .show = address_read_file, -}; - /* free up the memory used by a slot */ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot) { @@ -135,9 +108,6 @@ int rpaphp_deregister_slot(struct slot *slot) list_del(&slot->rpaphp_slot_list); - /* remove "address" file */ - sysfs_remove_file(&php_slot->kobj, &php_attr_address.attr); - retval = pci_hp_deregister(php_slot); if (retval) err("Problem unregistering a slot %s\n", slot->name); @@ -151,6 +121,7 @@ int rpaphp_register_slot(struct slot *slot) { struct hotplug_slot *php_slot = slot->hotplug_slot; int retval; + int slotno; dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", __func__, slot->dn->full_name, slot->index, slot->name, @@ -162,19 +133,16 @@ int rpaphp_register_slot(struct slot *slot) return -EAGAIN; } - retval = pci_hp_register(php_slot); + if (slot->dn->child) + slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); + else + slotno = -1; + retval = pci_hp_register(php_slot, slot->bus, slotno); if (retval) { err("pci_hp_register failed with error %d\n", retval); return retval; } - /* create "address" file */ - retval = sysfs_create_file(&php_slot->kobj, &php_attr_address.attr); - if (retval) { - err("sysfs_create_file failed with error %d\n", retval); - goto sysfs_fail; - } - /* add slot to our internal list */ list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); info("Slot [%s] registered\n", slot->name); diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index 2fe37cd85b6..410fe0394a8 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -197,13 +197,15 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, static struct hotplug_slot * sn_hp_destroy(void) { struct slot *slot; + struct pci_slot *pci_slot; struct hotplug_slot *bss_hotplug_slot = NULL; list_for_each_entry(slot, &sn_hp_list, hp_list) { bss_hotplug_slot = slot->hotplug_slot; + pci_slot = bss_hotplug_slot->pci_slot; list_del(&((struct slot *)bss_hotplug_slot->private)-> hp_list); - sysfs_remove_file(&bss_hotplug_slot->kobj, + sysfs_remove_file(&pci_slot->kobj, &sn_slot_path_attr.attr); break; } @@ -614,6 +616,7 @@ static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) static int sn_hotplug_slot_register(struct pci_bus *pci_bus) { int device; + struct pci_slot *pci_slot; struct hotplug_slot *bss_hotplug_slot; int rc = 0; @@ -650,11 +653,12 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) bss_hotplug_slot->ops = &sn_hotplug_slot_ops; bss_hotplug_slot->release = &sn_release_slot; - rc = pci_hp_register(bss_hotplug_slot); + rc = pci_hp_register(bss_hotplug_slot, pci_bus, device); if (rc) goto register_err; - rc = sysfs_create_file(&bss_hotplug_slot->kobj, + pci_slot = bss_hotplug_slot->pci_slot; + rc = sysfs_create_file(&pci_slot->kobj, &sn_slot_path_attr.attr); if (rc) goto register_err; @@ -664,7 +668,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) register_err: dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n", - rc); + rc); alloc_err: if (rc == -ENOMEM) diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index f66e8d6315a..8a026f750de 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -170,6 +170,7 @@ extern void shpchp_queue_pushbutton_work(struct work_struct *work); extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); #ifdef CONFIG_ACPI +#include <linux/pci-acpi.h> static inline int get_hp_params_from_firmware(struct pci_dev *dev, struct hotplug_params *hpp) { @@ -177,14 +178,15 @@ static inline int get_hp_params_from_firmware(struct pci_dev *dev, return -ENODEV; return 0; } -#define get_hp_hw_control_from_firmware(pdev) \ - do { \ - if (DEVICE_ACPI_HANDLE(&(pdev->dev))) \ - acpi_run_oshp(DEVICE_ACPI_HANDLE(&(pdev->dev)));\ - } while (0) + +static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) +{ + u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; + return acpi_get_hp_hw_control_from_firmware(dev, flags); +} #else #define get_hp_params_from_firmware(dev, hpp) (-ENODEV) -#define get_hp_hw_control_from_firmware(dev) do { } while (0) +#define get_hp_hw_control_from_firmware(dev) (0) #endif struct ctrl_reg { diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 97848654652..a8cbd039b85 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -39,7 +39,7 @@ int shpchp_debug; int shpchp_poll_mode; int shpchp_poll_time; -int shpchp_slot_with_bus; +static int shpchp_slot_with_bus; struct workqueue_struct *shpchp_wq; #define DRIVER_VERSION "0.4" @@ -68,7 +68,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); -static int get_address (struct hotplug_slot *slot, u32 *value); static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); @@ -81,7 +80,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, - .get_address = get_address, .get_max_bus_speed = get_max_bus_speed, .get_cur_bus_speed = get_cur_bus_speed, }; @@ -159,7 +157,8 @@ static int init_slots(struct controller *ctrl) dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " "slot_device_offset=%x\n", slot->bus, slot->device, slot->hp_slot, slot->number, ctrl->slot_device_offset); - retval = pci_hp_register(slot->hotplug_slot); + retval = pci_hp_register(slot->hotplug_slot, + ctrl->pci_dev->subordinate, slot->device); if (retval) { err("pci_hp_register failed with error %d\n", retval); if (retval == -EEXIST) @@ -288,19 +287,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) return 0; } -static int get_address (struct hotplug_slot *hotplug_slot, u32 *value) -{ - struct slot *slot = get_slot(hotplug_slot); - struct pci_bus *bus = slot->ctrl->pci_dev->subordinate; - - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - - *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device; - - return 0; -} - -static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) +static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, + enum pci_bus_speed *value) { struct slot *slot = get_slot(hotplug_slot); int retval; @@ -330,13 +318,14 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp static int is_shpc_capable(struct pci_dev *dev) { - if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == - PCI_DEVICE_ID_AMD_GOLAM_7450)) - return 1; - if (pci_find_capability(dev, PCI_CAP_ID_SHPC)) - return 1; - - return 0; + if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == + PCI_DEVICE_ID_AMD_GOLAM_7450)) + return 1; + if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) + return 0; + if (get_hp_hw_control_from_firmware(dev)) + return 0; + return 1; } static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 7d770b2cd88..7a0bff364cd 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -1084,7 +1084,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->irq); - get_hp_hw_control_from_firmware(pdev); /* * If this is the first controller to be initialized, diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index bb0642318a9..3f7b81c065d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1748,7 +1748,6 @@ int __init init_dmars(void) deferred_flush = kzalloc(g_num_of_iommus * sizeof(struct deferred_flush_tables), GFP_KERNEL); if (!deferred_flush) { - kfree(g_iommus); ret = -ENOMEM; goto error; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 8c61304cbb3..15af618d36e 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -70,12 +70,10 @@ arch_teardown_msi_irqs(struct pci_dev *dev) } } -static void msi_set_enable(struct pci_dev *dev, int enable) +static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) { - int pos; u16 control; - pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; @@ -85,6 +83,11 @@ static void msi_set_enable(struct pci_dev *dev, int enable) } } +static void msi_set_enable(struct pci_dev *dev, int enable) +{ + __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); +} + static void msix_set_enable(struct pci_dev *dev, int enable) { int pos; @@ -141,7 +144,8 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) mask_bits |= flag & mask; pci_write_config_dword(entry->dev, pos, mask_bits); } else { - msi_set_enable(entry->dev, !flag); + __msi_set_enable(entry->dev, entry->msi_attrib.pos, + !flag); } break; case PCI_CAP_ID_MSIX: @@ -561,9 +565,8 @@ int pci_enable_msi(struct pci_dev* dev) /* Check whether driver already requested for MSI-X irqs */ if (dev->msix_enabled) { - printk(KERN_INFO "PCI: %s: Can't enable MSI. " - "Device already has MSI-X enabled\n", - pci_name(dev)); + dev_info(&dev->dev, "can't enable MSI " + "(MSI-X already enabled)\n"); return -EINVAL; } status = msi_capability_init(dev); @@ -686,9 +689,8 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) /* Check whether driver already requested for MSI irq */ if (dev->msi_enabled) { - printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " - "Device already has an MSI irq assigned\n", - pci_name(dev)); + dev_info(&dev->dev, "can't enable MSI-X " + "(MSI IRQ already assigned)\n"); return -EINVAL; } status = msix_capability_init(dev, entries, nvec); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 9d6fc8e6285..7764768b6a0 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -21,12 +21,19 @@ struct acpi_osc_data { acpi_handle handle; - u32 ctrlset_buf[3]; - u32 global_ctrlsets; + u32 support_set; + u32 control_set; + int is_queried; + u32 query_result; struct list_head sibiling; }; static LIST_HEAD(acpi_osc_data_list); +struct acpi_osc_args { + u32 capbuf[3]; + u32 query_result; +}; + static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) { struct acpi_osc_data *data; @@ -44,42 +51,18 @@ static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) return data; } -static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; +static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, + 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; -static acpi_status -acpi_query_osc ( - acpi_handle handle, - u32 level, - void *context, - void **retval ) +static acpi_status acpi_run_osc(acpi_handle handle, + struct acpi_osc_args *osc_args) { - acpi_status status; - struct acpi_object_list input; - union acpi_object in_params[4]; - struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; - union acpi_object *out_obj; - u32 osc_dw0; - acpi_status *ret_status = (acpi_status *)retval; - struct acpi_osc_data *osc_data; - u32 flags = (unsigned long)context, temp; - acpi_handle tmp; - - status = acpi_get_handle(handle, "_OSC", &tmp); - if (ACPI_FAILURE(status)) - return status; - - osc_data = acpi_get_osc_data(handle); - if (!osc_data) { - printk(KERN_ERR "acpi osc data array is full\n"); - return AE_ERROR; - } - - osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS); - - /* do _OSC query for all possible controls */ - temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE]; - osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; + acpi_status status; + struct acpi_object_list input; + union acpi_object in_params[4]; + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *out_obj; + u32 osc_dw0, flags = osc_args->capbuf[OSC_QUERY_TYPE]; /* Setting up input parameters */ input.count = 4; @@ -93,20 +76,19 @@ acpi_query_osc ( in_params[2].integer.value = 3; in_params[3].type = ACPI_TYPE_BUFFER; in_params[3].buffer.length = 12; - in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf; + in_params[3].buffer.pointer = (u8 *)osc_args->capbuf; status = acpi_evaluate_object(handle, "_OSC", &input, &output); if (ACPI_FAILURE(status)) - goto out_nofree; - out_obj = output.pointer; + return status; + out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) { - printk(KERN_DEBUG - "Evaluate _OSC returns wrong type\n"); + printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n"); status = AE_TYPE; - goto query_osc_out; + goto out_kfree; } - osc_dw0 = *((u32 *) out_obj->buffer.pointer); + osc_dw0 = *((u32 *)out_obj->buffer.pointer); if (osc_dw0) { if (osc_dw0 & OSC_REQUEST_ERROR) printk(KERN_DEBUG "_OSC request fails\n"); @@ -115,93 +97,58 @@ acpi_query_osc ( if (osc_dw0 & OSC_INVALID_REVISION_ERROR) printk(KERN_DEBUG "_OSC invalid revision\n"); if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { - /* Update Global Control Set */ - osc_data->global_ctrlsets = - *((u32 *)(out_obj->buffer.pointer + 8)); - status = AE_OK; - goto query_osc_out; + if (flags & OSC_QUERY_ENABLE) + goto out_success; + printk(KERN_DEBUG "_OSC FW not grant req. control\n"); + status = AE_SUPPORT; + goto out_kfree; } status = AE_ERROR; - goto query_osc_out; + goto out_kfree; } - - /* Update Global Control Set */ - osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); +out_success: + if (flags & OSC_QUERY_ENABLE) + osc_args->query_result = + *((u32 *)(out_obj->buffer.pointer + 8)); status = AE_OK; -query_osc_out: +out_kfree: kfree(output.pointer); -out_nofree: - *ret_status = status; - - osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE; - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp; - if (ACPI_FAILURE(status)) { - /* no osc support at all */ - osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0; - } - return status; } - -static acpi_status -acpi_run_osc ( - acpi_handle handle, - void *context) +static acpi_status acpi_query_osc(acpi_handle handle, + u32 level, void *context, void **retval) { - acpi_status status; - struct acpi_object_list input; - union acpi_object in_params[4]; - struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; - union acpi_object *out_obj; - u32 osc_dw0; - - /* Setting up input parameters */ - input.count = 4; - input.pointer = in_params; - in_params[0].type = ACPI_TYPE_BUFFER; - in_params[0].buffer.length = 16; - in_params[0].buffer.pointer = OSC_UUID; - in_params[1].type = ACPI_TYPE_INTEGER; - in_params[1].integer.value = 1; - in_params[2].type = ACPI_TYPE_INTEGER; - in_params[2].integer.value = 3; - in_params[3].type = ACPI_TYPE_BUFFER; - in_params[3].buffer.length = 12; - in_params[3].buffer.pointer = (u8 *)context; + acpi_status status; + struct acpi_osc_data *osc_data; + u32 flags = (unsigned long)context, support_set; + acpi_handle tmp; + struct acpi_osc_args osc_args; - status = acpi_evaluate_object(handle, "_OSC", &input, &output); - if (ACPI_FAILURE (status)) + status = acpi_get_handle(handle, "_OSC", &tmp); + if (ACPI_FAILURE(status)) return status; - out_obj = output.pointer; - if (out_obj->type != ACPI_TYPE_BUFFER) { - printk(KERN_DEBUG - "Evaluate _OSC returns wrong type\n"); - status = AE_TYPE; - goto run_osc_out; + osc_data = acpi_get_osc_data(handle); + if (!osc_data) { + printk(KERN_ERR "acpi osc data array is full\n"); + return AE_ERROR; } - osc_dw0 = *((u32 *) out_obj->buffer.pointer); - if (osc_dw0) { - if (osc_dw0 & OSC_REQUEST_ERROR) - printk(KERN_DEBUG "_OSC request fails\n"); - if (osc_dw0 & OSC_INVALID_UUID_ERROR) - printk(KERN_DEBUG "_OSC invalid UUID\n"); - if (osc_dw0 & OSC_INVALID_REVISION_ERROR) - printk(KERN_DEBUG "_OSC invalid revision\n"); - if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { - printk(KERN_DEBUG "_OSC FW not grant req. control\n"); - status = AE_SUPPORT; - goto run_osc_out; - } - status = AE_ERROR; - goto run_osc_out; + + /* do _OSC query for all possible controls */ + support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS); + osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; + osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; + osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; + + status = acpi_run_osc(handle, &osc_args); + if (ACPI_SUCCESS(status)) { + osc_data->support_set = support_set; + osc_data->query_result = osc_args.query_result; + osc_data->is_queried = 1; } - status = AE_OK; -run_osc_out: - kfree(output.pointer); return status; } @@ -215,15 +162,11 @@ run_osc_out: **/ acpi_status __pci_osc_support_set(u32 flags, const char *hid) { - acpi_status retval = AE_NOT_FOUND; - - if (!(flags & OSC_SUPPORT_MASKS)) { + if (!(flags & OSC_SUPPORT_MASKS)) return AE_TYPE; - } - acpi_get_devices(hid, - acpi_query_osc, - (void *)(unsigned long)flags, - (void **) &retval ); + + acpi_get_devices(hid, acpi_query_osc, + (void *)(unsigned long)flags, NULL); return AE_OK; } @@ -236,10 +179,11 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid) **/ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) { - acpi_status status; - u32 ctrlset; + acpi_status status; + u32 ctrlset, control_set; acpi_handle tmp; struct acpi_osc_data *osc_data; + struct acpi_osc_args osc_args; status = acpi_get_handle(handle, "_OSC", &tmp); if (ACPI_FAILURE(status)) @@ -252,24 +196,25 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) } ctrlset = (flags & OSC_CONTROL_MASKS); - if (!ctrlset) { + if (!ctrlset) return AE_TYPE; - } - if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] && - ((osc_data->global_ctrlsets & ctrlset) != ctrlset)) { + + if (osc_data->is_queried && + ((osc_data->query_result & ctrlset) != ctrlset)) return AE_SUPPORT; - } - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; - status = acpi_run_osc(handle, osc_data->ctrlset_buf); - if (ACPI_FAILURE (status)) { - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; - } - + + control_set = osc_data->control_set | ctrlset; + osc_args.capbuf[OSC_QUERY_TYPE] = 0; + osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set; + osc_args.capbuf[OSC_CONTROL_TYPE] = control_set; + status = acpi_run_osc(handle, &osc_args); + if (ACPI_SUCCESS(status)) + osc_data->control_set = control_set; + return status; } EXPORT_SYMBOL(pci_osc_control_set); -#ifdef CONFIG_ACPI_SLEEP /* * _SxD returns the D-state with the highest power * (lowest D-state number) supported in the S-state "x". @@ -293,13 +238,11 @@ EXPORT_SYMBOL(pci_osc_control_set); * choose highest power _SxD or any lower power */ -static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev, - pm_message_t state) +static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) { int acpi_state; - acpi_state = acpi_pm_device_sleep_state(&pdev->dev, - device_may_wakeup(&pdev->dev), NULL); + acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL); if (acpi_state < 0) return PCI_POWER_ERROR; @@ -315,7 +258,13 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev, } return PCI_POWER_ERROR; } -#endif + +static bool acpi_pci_power_manageable(struct pci_dev *dev) +{ + acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + + return handle ? acpi_bus_power_manageable(handle) : false; +} static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { @@ -328,12 +277,11 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) [PCI_D3hot] = ACPI_STATE_D3, [PCI_D3cold] = ACPI_STATE_D3 }; + int error = -EINVAL; - if (!handle) - return -ENODEV; /* If the ACPI device has _EJ0, ignore the device */ - if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) - return 0; + if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) + return -ENODEV; switch (state) { case PCI_D0: @@ -341,11 +289,41 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) case PCI_D2: case PCI_D3hot: case PCI_D3cold: - return acpi_bus_set_power(handle, state_conv[state]); + error = acpi_bus_set_power(handle, state_conv[state]); } - return -EINVAL; + + if (!error) + dev_printk(KERN_INFO, &dev->dev, + "power state changed by ACPI to D%d\n", state); + + return error; +} + +static bool acpi_pci_can_wakeup(struct pci_dev *dev) +{ + acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + + return handle ? acpi_bus_can_wakeup(handle) : false; +} + +static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) +{ + int error = acpi_pm_device_sleep_wake(&dev->dev, enable); + + if (!error) + dev_printk(KERN_INFO, &dev->dev, + "wake-up capability %s by ACPI\n", + enable ? "enabled" : "disabled"); + return error; } +static struct pci_platform_pm_ops acpi_pci_platform_pm = { + .is_manageable = acpi_pci_power_manageable, + .set_state = acpi_pci_set_power_state, + .choose_state = acpi_pci_choose_state, + .can_wakeup = acpi_pci_can_wakeup, + .sleep_wake = acpi_pci_sleep_wake, +}; /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) @@ -397,10 +375,7 @@ static int __init acpi_pci_init(void) ret = register_acpi_bus_type(&acpi_pci_bus); if (ret) return 0; -#ifdef CONFIG_ACPI_SLEEP - platform_pci_choose_state = acpi_pci_choose_state; -#endif - platform_pci_set_power_state = acpi_pci_set_power_state; + pci_set_platform_pm(&acpi_pci_platform_pm); return 0; } arch_initcall(acpi_pci_init); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index e1637bd82b8..a13f5348611 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -274,7 +274,57 @@ static int pci_device_remove(struct device * dev) return 0; } -static int pci_device_suspend(struct device * dev, pm_message_t state) +static void pci_device_shutdown(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + + if (drv && drv->shutdown) + drv->shutdown(pci_dev); + pci_msi_shutdown(pci_dev); + pci_msix_shutdown(pci_dev); +} + +#ifdef CONFIG_PM_SLEEP + +/* + * Default "suspend" method for devices that have no driver provided suspend, + * or not even a driver at all. + */ +static void pci_default_pm_suspend(struct pci_dev *pci_dev) +{ + pci_save_state(pci_dev); + /* + * mark its power state as "unknown", since we don't know if + * e.g. the BIOS will change its device state when we suspend. + */ + if (pci_dev->current_state == PCI_D0) + pci_dev->current_state = PCI_UNKNOWN; +} + +/* + * Default "resume" method for devices that have no driver provided resume, + * or not even a driver at all. + */ +static int pci_default_pm_resume(struct pci_dev *pci_dev) +{ + int retval = 0; + + /* restore the PCI config space */ + pci_restore_state(pci_dev); + /* if the device was enabled before suspend, reenable */ + retval = pci_reenable_device(pci_dev); + /* + * if the device was busmaster before the suspend, make it busmaster + * again + */ + if (pci_dev->is_busmaster) + pci_set_master(pci_dev); + + return retval; +} + +static int pci_legacy_suspend(struct device *dev, pm_message_t state) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; @@ -284,18 +334,12 @@ static int pci_device_suspend(struct device * dev, pm_message_t state) i = drv->suspend(pci_dev, state); suspend_report_result(drv->suspend, i); } else { - pci_save_state(pci_dev); - /* - * mark its power state as "unknown", since we don't know if - * e.g. the BIOS will change its device state when we suspend. - */ - if (pci_dev->current_state == PCI_D0) - pci_dev->current_state = PCI_UNKNOWN; + pci_default_pm_suspend(pci_dev); } return i; } -static int pci_device_suspend_late(struct device * dev, pm_message_t state) +static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; @@ -308,26 +352,7 @@ static int pci_device_suspend_late(struct device * dev, pm_message_t state) return i; } -/* - * Default resume method for devices that have no driver provided resume, - * or not even a driver at all. - */ -static int pci_default_resume(struct pci_dev *pci_dev) -{ - int retval = 0; - - /* restore the PCI config space */ - pci_restore_state(pci_dev); - /* if the device was enabled before suspend, reenable */ - retval = pci_reenable_device(pci_dev); - /* if the device was busmaster before the suspend, make it busmaster again */ - if (pci_dev->is_busmaster) - pci_set_master(pci_dev); - - return retval; -} - -static int pci_device_resume(struct device * dev) +static int pci_legacy_resume(struct device *dev) { int error; struct pci_dev * pci_dev = to_pci_dev(dev); @@ -336,34 +361,313 @@ static int pci_device_resume(struct device * dev) if (drv && drv->resume) error = drv->resume(pci_dev); else - error = pci_default_resume(pci_dev); + error = pci_default_pm_resume(pci_dev); return error; } -static int pci_device_resume_early(struct device * dev) +static int pci_legacy_resume_early(struct device *dev) { int error = 0; struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; - pci_fixup_device(pci_fixup_resume, pci_dev); - if (drv && drv->resume_early) error = drv->resume_early(pci_dev); return error; } -static void pci_device_shutdown(struct device *dev) +static int pci_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm && drv->pm->prepare) + error = drv->pm->prepare(dev); + + return error; +} + +static void pci_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#ifdef CONFIG_SUSPEND + +static int pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->suspend) { + error = drv->pm->suspend(dev); + suspend_report_result(drv->pm->suspend, error); + } else { + pci_default_pm_suspend(pci_dev); + } + } else { + error = pci_legacy_suspend(dev, PMSG_SUSPEND); + } + pci_fixup_device(pci_fixup_suspend, pci_dev); + + return error; +} + +static int pci_pm_suspend_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; + int error = 0; - if (drv && drv->shutdown) - drv->shutdown(pci_dev); - pci_msi_shutdown(pci_dev); - pci_msix_shutdown(pci_dev); + if (drv && drv->pm) { + if (drv->pm->suspend_noirq) { + error = drv->pm->suspend_noirq(dev); + suspend_report_result(drv->pm->suspend_noirq, error); + } + } else { + error = pci_legacy_suspend_late(dev, PMSG_SUSPEND); + } + + return error; } +static int pci_pm_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error; + + pci_fixup_device(pci_fixup_resume, pci_dev); + + if (drv && drv->pm) { + error = drv->pm->resume ? drv->pm->resume(dev) : + pci_default_pm_resume(pci_dev); + } else { + error = pci_legacy_resume(dev); + } + + return error; +} + +static int pci_pm_resume_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + pci_fixup_device(pci_fixup_resume_early, pci_dev); + + if (drv && drv->pm) { + if (drv->pm->resume_noirq) + error = drv->pm->resume_noirq(dev); + } else { + error = pci_legacy_resume_early(dev); + } + + return error; +} + +#else /* !CONFIG_SUSPEND */ + +#define pci_pm_suspend NULL +#define pci_pm_suspend_noirq NULL +#define pci_pm_resume NULL +#define pci_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATION + +static int pci_pm_freeze(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->freeze) { + error = drv->pm->freeze(dev); + suspend_report_result(drv->pm->freeze, error); + } else { + pci_default_pm_suspend(pci_dev); + } + } else { + error = pci_legacy_suspend(dev, PMSG_FREEZE); + pci_fixup_device(pci_fixup_suspend, pci_dev); + } + + return error; +} + +static int pci_pm_freeze_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->freeze_noirq) { + error = drv->pm->freeze_noirq(dev); + suspend_report_result(drv->pm->freeze_noirq, error); + } + } else { + error = pci_legacy_suspend_late(dev, PMSG_FREEZE); + } + + return error; +} + +static int pci_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->thaw) + error = drv->pm->thaw(dev); + } else { + pci_fixup_device(pci_fixup_resume, to_pci_dev(dev)); + error = pci_legacy_resume(dev); + } + + return error; +} + +static int pci_pm_thaw_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->thaw_noirq) + error = drv->pm->thaw_noirq(dev); + } else { + pci_fixup_device(pci_fixup_resume_early, pci_dev); + error = pci_legacy_resume_early(dev); + } + + return error; +} + +static int pci_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int error = 0; + + pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); + + if (drv && drv->pm) { + if (drv->pm->poweroff) { + error = drv->pm->poweroff(dev); + suspend_report_result(drv->pm->poweroff, error); + } + } else { + error = pci_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return error; +} + +static int pci_pm_poweroff_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->poweroff_noirq) { + error = drv->pm->poweroff_noirq(dev); + suspend_report_result(drv->pm->poweroff_noirq, error); + } + } else { + error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE); + } + + return error; +} + +static int pci_pm_restore(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error; + + if (drv && drv->pm) { + error = drv->pm->restore ? drv->pm->restore(dev) : + pci_default_pm_resume(pci_dev); + } else { + error = pci_legacy_resume(dev); + } + pci_fixup_device(pci_fixup_resume, pci_dev); + + return error; +} + +static int pci_pm_restore_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + pci_fixup_device(pci_fixup_resume, pci_dev); + + if (drv && drv->pm) { + if (drv->pm->restore_noirq) + error = drv->pm->restore_noirq(dev); + } else { + error = pci_legacy_resume_early(dev); + } + pci_fixup_device(pci_fixup_resume_early, pci_dev); + + return error; +} + +#else /* !CONFIG_HIBERNATION */ + +#define pci_pm_freeze NULL +#define pci_pm_freeze_noirq NULL +#define pci_pm_thaw NULL +#define pci_pm_thaw_noirq NULL +#define pci_pm_poweroff NULL +#define pci_pm_poweroff_noirq NULL +#define pci_pm_restore NULL +#define pci_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATION */ + +struct pm_ext_ops pci_pm_ops = { + .base = { + .prepare = pci_pm_prepare, + .complete = pci_pm_complete, + .suspend = pci_pm_suspend, + .resume = pci_pm_resume, + .freeze = pci_pm_freeze, + .thaw = pci_pm_thaw, + .poweroff = pci_pm_poweroff, + .restore = pci_pm_restore, + }, + .suspend_noirq = pci_pm_suspend_noirq, + .resume_noirq = pci_pm_resume_noirq, + .freeze_noirq = pci_pm_freeze_noirq, + .thaw_noirq = pci_pm_thaw_noirq, + .poweroff_noirq = pci_pm_poweroff_noirq, + .restore_noirq = pci_pm_restore_noirq, +}; + +#define PCI_PM_OPS_PTR &pci_pm_ops + +#else /* !CONFIG_PM_SLEEP */ + +#define PCI_PM_OPS_PTR NULL + +#endif /* !CONFIG_PM_SLEEP */ + /** * __pci_register_driver - register a new pci driver * @drv: the driver structure to register @@ -386,6 +690,9 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, drv->driver.owner = owner; drv->driver.mod_name = mod_name; + if (drv->pm) + drv->driver.pm = &drv->pm->base; + spin_lock_init(&drv->dynids.lock); INIT_LIST_HEAD(&drv->dynids.list); @@ -511,12 +818,9 @@ struct bus_type pci_bus_type = { .uevent = pci_uevent, .probe = pci_device_probe, .remove = pci_device_remove, - .suspend = pci_device_suspend, - .suspend_late = pci_device_suspend_late, - .resume_early = pci_device_resume_early, - .resume = pci_device_resume, .shutdown = pci_device_shutdown, .dev_attrs = pci_dev_attrs, + .pm = PCI_PM_OPS_PTR, }; static int __init pci_driver_init(void) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e4548ab2a93..44a46c92b72 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1,6 +1,4 @@ /* - * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ - * * PCI Bus Services, see include/linux/pci.h for further explanation. * * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, @@ -19,6 +17,7 @@ #include <linux/string.h> #include <linux/log2.h> #include <linux/pci-aspm.h> +#include <linux/pm_wakeup.h> #include <asm/dma.h> /* isa_dma_bridge_buggy */ #include "pci.h" @@ -378,74 +377,90 @@ pci_restore_bars(struct pci_dev *dev) pci_update_resource(dev, &dev->resource[i], i); } -int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); +static struct pci_platform_pm_ops *pci_platform_pm; -/** - * pci_set_power_state - Set the power state of a PCI device - * @dev: PCI device to be suspended - * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering - * - * Transition a device to a new power state, using the Power Management - * Capabilities in the device's config space. - * - * RETURN VALUE: - * -EINVAL if trying to enter a lower state than we're already in. - * 0 if we're already in the requested state. - * -EIO if device does not support PCI PM. - * 0 if we can successfully change the power state. - */ -int -pci_set_power_state(struct pci_dev *dev, pci_power_t state) +int pci_set_platform_pm(struct pci_platform_pm_ops *ops) { - int pm, need_restore = 0; - u16 pmcsr, pmc; + if (!ops->is_manageable || !ops->set_state || !ops->choose_state + || !ops->sleep_wake || !ops->can_wakeup) + return -EINVAL; + pci_platform_pm = ops; + return 0; +} - /* bound the state we're entering */ - if (state > PCI_D3hot) - state = PCI_D3hot; +static inline bool platform_pci_power_manageable(struct pci_dev *dev) +{ + return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; +} - /* - * If the device or the parent bridge can't support PCI PM, ignore - * the request if we're doing anything besides putting it into D0 - * (which would only happen on boot). - */ - if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) - return 0; +static inline int platform_pci_set_power_state(struct pci_dev *dev, + pci_power_t t) +{ + return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; +} - /* find PCI PM capability in list */ - pm = pci_find_capability(dev, PCI_CAP_ID_PM); +static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) +{ + return pci_platform_pm ? + pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; +} - /* abort if the device doesn't support PM capabilities */ - if (!pm) +static inline bool platform_pci_can_wakeup(struct pci_dev *dev) +{ + return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; +} + +static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) +{ + return pci_platform_pm ? + pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; +} + +/** + * pci_raw_set_power_state - Use PCI PM registers to set the power state of + * given PCI device + * @dev: PCI device to handle. + * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. + * + * RETURN VALUE: + * -EINVAL if the requested state is invalid. + * -EIO if device does not support PCI PM or its PM capabilities register has a + * wrong version, or device doesn't support the requested state. + * 0 if device already is in the requested state. + * 0 if device's power state has been successfully changed. + */ +static int +pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) +{ + u16 pmcsr; + bool need_restore = false; + + if (!dev->pm_cap) return -EIO; + if (state < PCI_D0 || state > PCI_D3hot) + return -EINVAL; + /* Validate current state: * Can enter D0 from any state, but if we can only go deeper * to sleep if we're already in a low power state */ - if (state != PCI_D0 && dev->current_state > state) { - printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", - __func__, pci_name(dev), state, dev->current_state); + if (dev->current_state == state) { + /* we're already there */ + return 0; + } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold + && dev->current_state > state) { + dev_err(&dev->dev, "invalid power transition " + "(from state %d to %d)\n", dev->current_state, state); return -EINVAL; - } else if (dev->current_state == state) - return 0; /* we're already there */ - - - pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); - if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { - printk(KERN_DEBUG - "PCI: %s has unsupported PM cap regs version (%u)\n", - pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); - return -EIO; } /* check if this device supports the desired state */ - if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) - return -EIO; - else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) + if ((state == PCI_D1 && !dev->d1_support) + || (state == PCI_D2 && !dev->d2_support)) return -EIO; - pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); /* If we're (effectively) in D3, force entire word to 0. * This doesn't affect PME_Status, disables PME_En, and @@ -461,7 +476,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) case PCI_UNKNOWN: /* Boot-up */ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) - need_restore = 1; + need_restore = true; /* Fall-through: force to D0 */ default: pmcsr = 0; @@ -469,7 +484,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) } /* enter specified state */ - pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); /* Mandatory power management transition delays */ /* see PCI PM 1.1 5.6.1 table 18 */ @@ -478,13 +493,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) else if (state == PCI_D2 || dev->current_state == PCI_D2) udelay(200); - /* - * Give firmware a chance to be called, such as ACPI _PRx, _PSx - * Firmware method after native method ? - */ - if (platform_pci_set_power_state) - platform_pci_set_power_state(dev, state); - dev->current_state = state; /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT @@ -508,8 +516,77 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) return 0; } -pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); - +/** + * pci_update_current_state - Read PCI power state of given device from its + * PCI PM registers and cache it + * @dev: PCI device to handle. + */ +static void pci_update_current_state(struct pci_dev *dev) +{ + if (dev->pm_cap) { + u16 pmcsr; + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + } +} + +/** + * pci_set_power_state - Set the power state of a PCI device + * @dev: PCI device to handle. + * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. + * + * Transition a device to a new power state, using the platform formware and/or + * the device's PCI PM registers. + * + * RETURN VALUE: + * -EINVAL if the requested state is invalid. + * -EIO if device does not support PCI PM or its PM capabilities register has a + * wrong version, or device doesn't support the requested state. + * 0 if device already is in the requested state. + * 0 if device's power state has been successfully changed. + */ +int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +{ + int error; + + /* bound the state we're entering */ + if (state > PCI_D3hot) + state = PCI_D3hot; + else if (state < PCI_D0) + state = PCI_D0; + else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) + /* + * If the device or the parent bridge do not support PCI PM, + * ignore the request if we're doing anything other than putting + * it into D0 (which would only happen on boot). + */ + return 0; + + if (state == PCI_D0 && platform_pci_power_manageable(dev)) { + /* + * Allow the platform to change the state, for example via ACPI + * _PR0, _PS0 and some such, but do not trust it. + */ + int ret = platform_pci_set_power_state(dev, PCI_D0); + if (!ret) + pci_update_current_state(dev); + } + + error = pci_raw_set_power_state(dev, state); + + if (state > PCI_D0 && platform_pci_power_manageable(dev)) { + /* Allow the platform to finalize the transition */ + int ret = platform_pci_set_power_state(dev, state); + if (!ret) { + pci_update_current_state(dev); + error = 0; + } + } + + return error; +} + /** * pci_choose_state - Choose the power state of a PCI device * @dev: PCI device to be suspended @@ -527,11 +604,9 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) if (!pci_find_capability(dev, PCI_CAP_ID_PM)) return PCI_D0; - if (platform_pci_choose_state) { - ret = platform_pci_choose_state(dev, state); - if (ret != PCI_POWER_ERROR) - return ret; - } + ret = platform_pci_choose_state(dev); + if (ret != PCI_POWER_ERROR) + return ret; switch (state.event) { case PM_EVENT_ON: @@ -543,7 +618,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) case PM_EVENT_HIBERNATE: return PCI_D3hot; default: - printk("Unrecognized suspend event %d\n", state.event); + dev_info(&dev->dev, "unrecognized suspend event %d\n", + state.event); BUG(); } return PCI_D0; @@ -568,7 +644,7 @@ static int pci_save_pcie_state(struct pci_dev *dev) else found = 1; if (!save_state) { - dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); + dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); return -ENOMEM; } cap = (u16 *)&save_state->data[0]; @@ -619,7 +695,7 @@ static int pci_save_pcix_state(struct pci_dev *dev) else found = 1; if (!save_state) { - dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); + dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); return -ENOMEM; } cap = (u16 *)&save_state->data[0]; @@ -685,10 +761,9 @@ pci_restore_state(struct pci_dev *dev) for (i = 15; i >= 0; i--) { pci_read_config_dword(dev, i * 4, &val); if (val != dev->saved_config_space[i]) { - printk(KERN_DEBUG "PM: Writing back config space on " - "device %s at offset %x (was %x, writing %x)\n", - pci_name(dev), i, - val, (int)dev->saved_config_space[i]); + dev_printk(KERN_DEBUG, &dev->dev, "restoring config " + "space at offset %#x (was %#x, writing %#x)\n", + i, val, (int)dev->saved_config_space[i]); pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); } @@ -961,6 +1036,46 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) } /** + * pci_pme_capable - check the capability of PCI device to generate PME# + * @dev: PCI device to handle. + * @state: PCI state from which device will issue PME#. + */ +static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) +{ + if (!dev->pm_cap) + return false; + + return !!(dev->pme_support & (1 << state)); +} + +/** + * pci_pme_active - enable or disable PCI device's PME# function + * @dev: PCI device to handle. + * @enable: 'true' to enable PME# generation; 'false' to disable it. + * + * The caller must verify that the device is capable of generating PME# before + * calling this function with @enable equal to 'true'. + */ +static void pci_pme_active(struct pci_dev *dev, bool enable) +{ + u16 pmcsr; + + if (!dev->pm_cap) + return; + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + /* Clear PME_Status by writing 1 to it and enable PME# */ + pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; + if (!enable) + pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; + + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); + + dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", + enable ? "enabled" : "disabled"); +} + +/** * pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected * @state: PCI state from which device will issue wakeup events @@ -971,66 +1086,173 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) * called automatically by this routine. * * Devices with legacy power management (no standard PCI PM capabilities) - * always require such platform hooks. Depending on the platform, devices - * supporting the standard PCI PME# signal may require such platform hooks; - * they always update bits in config space to allow PME# generation. + * always require such platform hooks. * - * -EIO is returned if the device can't ever be a wakeup event source. - * -EINVAL is returned if the device can't generate wakeup events from - * the specified PCI state. Returns zero if the operation is successful. + * RETURN VALUE: + * 0 is returned on success + * -EINVAL is returned if device is not supposed to wake up the system + * Error code depending on the platform is returned if both the platform and + * the native mechanism fail to enable the generation of wake-up events */ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { - int pm; - int status; - u16 value; - - /* Note that drivers should verify device_may_wakeup(&dev->dev) - * before calling this function. Platform code should report - * errors when drivers try to enable wakeup on devices that - * can't issue wakeups, or on which wakeups were disabled by - * userspace updating the /sys/devices.../power/wakeup file. + int error = 0; + bool pme_done = false; + + if (!device_may_wakeup(&dev->dev)) + return -EINVAL; + + /* + * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don + * Anderson we should be doing PME# wake enable followed by ACPI wake + * enable. To disable wake-up we call the platform first, for symmetry. */ - status = call_platform_enable_wakeup(&dev->dev, enable); + if (!enable && platform_pci_can_wakeup(dev)) + error = platform_pci_sleep_wake(dev, false); - /* find PCI PM capability in list */ - pm = pci_find_capability(dev, PCI_CAP_ID_PM); + if (!enable || pci_pme_capable(dev, state)) { + pci_pme_active(dev, enable); + pme_done = true; + } - /* If device doesn't support PM Capabilities, but caller wants to - * disable wake events, it's a NOP. Otherwise fail unless the - * platform hooks handled this legacy device already. - */ - if (!pm) - return enable ? status : 0; + if (enable && platform_pci_can_wakeup(dev)) + error = platform_pci_sleep_wake(dev, true); - /* Check device's ability to generate PME# */ - pci_read_config_word(dev,pm+PCI_PM_PMC,&value); + return pme_done ? 0 : error; +} - value &= PCI_PM_CAP_PME_MASK; - value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ +/** + * pci_prepare_to_sleep - prepare PCI device for system-wide transition into + * a sleep state + * @dev: Device to handle. + * + * Choose the power state appropriate for the device depending on whether + * it can wake up the system and/or is power manageable by the platform + * (PCI_D3hot is the default) and put the device into that state. + */ +int pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state = PCI_D3hot; + int error; - /* Check if it can generate PME# from requested state. */ - if (!value || !(value & (1 << state))) { - /* if it can't, revert what the platform hook changed, - * always reporting the base "EINVAL, can't PME#" error + if (platform_pci_power_manageable(dev)) { + /* + * Call the platform to choose the target state of the device + * and enable wake-up from this state if supported. */ - if (enable) - call_platform_enable_wakeup(&dev->dev, 0); - return enable ? -EINVAL : 0; + pci_power_t state = platform_pci_choose_state(dev); + + switch (state) { + case PCI_POWER_ERROR: + case PCI_UNKNOWN: + break; + case PCI_D1: + case PCI_D2: + if (pci_no_d1d2(dev)) + break; + default: + target_state = state; + } + } else if (device_may_wakeup(&dev->dev)) { + /* + * Find the deepest state from which the device can generate + * wake-up events, make it the target state and enable device + * to generate PME#. + */ + if (!dev->pm_cap) + return -EIO; + + if (dev->pme_support) { + while (target_state + && !(dev->pme_support & (1 << target_state))) + target_state--; + } } - pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); + pci_enable_wake(dev, target_state, true); - /* Clear PME_Status by writing 1 to it and enable PME# */ - value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; + error = pci_set_power_state(dev, target_state); - if (!enable) - value &= ~PCI_PM_CTRL_PME_ENABLE; + if (error) + pci_enable_wake(dev, target_state, false); - pci_write_config_word(dev, pm + PCI_PM_CTRL, value); + return error; +} - return 0; +/** + * pci_back_from_sleep - turn PCI device on during system-wide transition into + * the working state a sleep state + * @dev: Device to handle. + * + * Disable device's sytem wake-up capability and put it into D0. + */ +int pci_back_from_sleep(struct pci_dev *dev) +{ + pci_enable_wake(dev, PCI_D0, false); + return pci_set_power_state(dev, PCI_D0); +} + +/** + * pci_pm_init - Initialize PM functions of given PCI device + * @dev: PCI device to handle. + */ +void pci_pm_init(struct pci_dev *dev) +{ + int pm; + u16 pmc; + + dev->pm_cap = 0; + + /* find PCI PM capability in list */ + pm = pci_find_capability(dev, PCI_CAP_ID_PM); + if (!pm) + return; + /* Check device's ability to generate PME# */ + pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); + + if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { + dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", + pmc & PCI_PM_CAP_VER_MASK); + return; + } + + dev->pm_cap = pm; + + dev->d1_support = false; + dev->d2_support = false; + if (!pci_no_d1d2(dev)) { + if (pmc & PCI_PM_CAP_D1) { + dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n"); + dev->d1_support = true; + } + if (pmc & PCI_PM_CAP_D2) { + dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n"); + dev->d2_support = true; + } + } + + pmc &= PCI_PM_CAP_PME_MASK; + if (pmc) { + dev_printk(KERN_INFO, &dev->dev, + "PME# supported from%s%s%s%s%s\n", + (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", + (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", + (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", + (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", + (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); + dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; + /* + * Make device's PM flags reflect the wake-up capability, but + * let the user space enable it to wake up the system as needed. + */ + device_set_wakeup_capable(&dev->dev, true); + device_set_wakeup_enable(&dev->dev, false); + /* Disable the PME# generation functionality */ + pci_pme_active(dev, false); + } else { + dev->pme_support = 0; + } } int @@ -1116,13 +1338,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) return 0; err_out: - printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " - "for device %s\n", - pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", - bar + 1, /* PCI BAR # */ - (unsigned long long)pci_resource_len(pdev, bar), - (unsigned long long)pci_resource_start(pdev, bar), - pci_name(pdev)); + dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n", + bar, + pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", + (unsigned long long)pci_resource_start(pdev, bar), + (unsigned long long)pci_resource_end(pdev, bar)); return -EBUSY; } @@ -1214,7 +1434,7 @@ pci_set_master(struct pci_dev *dev) pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_MASTER)) { - pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); + dev_dbg(&dev->dev, "enabling bus mastering\n"); cmd |= PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -1279,8 +1499,8 @@ pci_set_cacheline_size(struct pci_dev *dev) if (cacheline_size == pci_cache_line_size) return 0; - printk(KERN_DEBUG "PCI: cache line size of %d is not supported " - "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " + "supported\n", pci_cache_line_size << 2); return -EINVAL; } @@ -1305,8 +1525,7 @@ pci_set_mwi(struct pci_dev *dev) pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_INVALIDATE)) { - pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", - pci_name(dev)); + dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -1702,5 +1921,7 @@ EXPORT_SYMBOL(pci_set_power_state); EXPORT_SYMBOL(pci_save_state); EXPORT_SYMBOL(pci_restore_state); EXPORT_SYMBOL(pci_enable_wake); +EXPORT_SYMBOL(pci_prepare_to_sleep); +EXPORT_SYMBOL(pci_back_from_sleep); EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 00408c97e5f..d807cd786f2 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -5,11 +5,36 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); extern void pci_cleanup_rom(struct pci_dev *dev); -/* Firmware callbacks */ -extern pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, - pm_message_t state); -extern int (*platform_pci_set_power_state)(struct pci_dev *dev, - pci_power_t state); +/** + * Firmware PM callbacks + * + * @is_manageable - returns 'true' if given device is power manageable by the + * platform firmware + * + * @set_state - invokes the platform firmware to set the device's power state + * + * @choose_state - returns PCI power state of given device preferred by the + * platform; to be used during system-wide transitions from a + * sleeping state to the working state and vice versa + * + * @can_wakeup - returns 'true' if given device is capable of waking up the + * system from a sleeping state + * + * @sleep_wake - enables/disables the system wake up capability of given device + * + * If given platform is generally capable of power managing PCI devices, all of + * these callbacks are mandatory. + */ +struct pci_platform_pm_ops { + bool (*is_manageable)(struct pci_dev *dev); + int (*set_state)(struct pci_dev *dev, pci_power_t state); + pci_power_t (*choose_state)(struct pci_dev *dev); + bool (*can_wakeup)(struct pci_dev *dev); + int (*sleep_wake)(struct pci_dev *dev, bool enable); +}; + +extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); +extern void pci_pm_init(struct pci_dev *dev); extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); @@ -106,3 +131,16 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) } struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); + +/* PCI slot sysfs helper code */ +#define to_pci_slot(s) container_of(s, struct pci_slot, kobj) + +extern struct kset *pci_slots_kset; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; +#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) + diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 07c3bdb6edc..77036f46acf 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -26,6 +26,7 @@ #include <linux/pcieport_if.h> #include "aerdrv.h" +#include "../../pci.h" /* * Version Information @@ -219,8 +220,7 @@ static int __devinit aer_probe (struct pcie_device *dev, /* Alloc rpc data structure */ if (!(rpc = aer_alloc_rpc(dev))) { - printk(KERN_DEBUG "%s: Alloc rpc fails on PCIE device[%s]\n", - __func__, device->bus_id); + dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); aer_remove(dev); return -ENOMEM; } @@ -228,8 +228,7 @@ static int __devinit aer_probe (struct pcie_device *dev, /* Request IRQ ISR */ if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev))) { - printk(KERN_DEBUG "%s: Request ISR fails on PCIE device[%s]\n", - __func__, device->bus_id); + dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); aer_remove(dev); return status; } @@ -273,7 +272,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) * to issue Configuration Requests to those devices. */ msleep(200); - printk(KERN_DEBUG "Complete link reset at Root[%s]\n", dev->dev.bus_id); + dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); /* Enable Root Port's interrupt in response to error messages */ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index d39a78dbd02..30f581b8791 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -50,10 +50,10 @@ int aer_osc_setup(struct pcie_device *pciedev) } if (ACPI_FAILURE(status)) { - printk(KERN_DEBUG "AER service couldn't init device %s - %s\n", - pciedev->device.bus_id, - (status == AE_SUPPORT || status == AE_NOT_FOUND) ? - "no _OSC support" : "Run ACPI _OSC fails"); + dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't " + "init device: %s\n", + (status == AE_SUPPORT || status == AE_NOT_FOUND) ? + "no _OSC support" : "_OSC failed"); return -1; } diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index aaa82392d1d..ee5e7b5176d 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -221,9 +221,9 @@ static void report_error_detected(struct pci_dev *dev, void *data) * of a driver for this device is unaware of * its hw state. */ - printk(KERN_DEBUG "Device ID[%s] has %s\n", - dev->dev.bus_id, (dev->driver) ? - "no AER-aware driver" : "no driver"); + dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n", + dev->driver ? + "no AER-aware driver" : "no driver"); } return; } @@ -304,7 +304,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, { struct aer_broadcast_data result_data; - printk(KERN_DEBUG "Broadcast %s message\n", error_mesg); + dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg); result_data.state = state; if (cb == report_error_detected) result_data.result = PCI_ERS_RESULT_CAN_RECOVER; @@ -404,18 +404,16 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev, data.aer_driver = to_service_driver(aerdev->device.driver); } else { - printk(KERN_DEBUG "No link-reset support to Device ID" - "[%s]\n", - dev->dev.bus_id); + dev_printk(KERN_DEBUG, &dev->dev, "no link-reset " + "support\n"); return PCI_ERS_RESULT_DISCONNECT; } } status = data.aer_driver->reset_link(udev); if (status != PCI_ERS_RESULT_RECOVERED) { - printk(KERN_DEBUG "Link reset at upstream Device ID" - "[%s] failed\n", - udev->dev.bus_id); + dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream " + "device %s failed\n", pci_name(udev)); return PCI_ERS_RESULT_DISCONNECT; } @@ -511,10 +509,12 @@ static void handle_error_source(struct pcie_device * aerdev, } else { status = do_recovery(aerdev, dev, info.severity); if (status == PCI_ERS_RESULT_RECOVERED) { - printk(KERN_DEBUG "AER driver successfully recovered\n"); + dev_printk(KERN_DEBUG, &dev->dev, "AER driver " + "successfully recovered\n"); } else { /* TODO: Should kernel panic here? */ - printk(KERN_DEBUG "AER driver didn't recover\n"); + dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't " + "recover\n"); } } } diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index 3f0976868ed..359fe5568df 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c @@ -13,6 +13,7 @@ #include <linux/pm.h> #include <linux/pcieport_if.h> +#include "portdrv.h" static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index fb0abfa508d..890f0d2b370 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -23,20 +23,20 @@ static int pcie_port_probe_service(struct device *dev) { struct pcie_device *pciedev; struct pcie_port_service_driver *driver; - int status = -ENODEV; + int status; if (!dev || !dev->driver) - return status; + return -ENODEV; driver = to_service_driver(dev->driver); if (!driver || !driver->probe) - return status; + return -ENODEV; pciedev = to_pcie_device(dev); status = driver->probe(pciedev, driver->id_table); if (!status) { - printk(KERN_DEBUG "Load service driver %s on pcie device %s\n", - driver->name, dev->bus_id); + dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", + driver->name); get_device(dev); } return status; @@ -53,8 +53,8 @@ static int pcie_port_remove_service(struct device *dev) pciedev = to_pcie_device(dev); driver = to_service_driver(dev->driver); if (driver && driver->remove) { - printk(KERN_DEBUG "Unload service driver %s on pcie device %s\n", - driver->name, dev->bus_id); + dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", + driver->name); driver->remove(pciedev); put_device(dev); } @@ -103,7 +103,7 @@ static int pcie_port_resume_service(struct device *dev) */ static void release_pcie_device(struct device *dev) { - printk(KERN_DEBUG "Free Port Service[%s]\n", dev->bus_id); + dev_printk(KERN_DEBUG, dev, "free port service\n"); kfree(to_pcie_device(dev)); } @@ -150,7 +150,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) if (pos) { struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; - printk("%s Found MSIX capability\n", __func__); + dev_info(&dev->dev, "found MSI-X capability\n"); status = pci_enable_msix(dev, msix_entries, nvec); if (!status) { int j = 0; @@ -165,7 +165,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) if (status) { pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { - printk("%s Found MSI capability\n", __func__); + dev_info(&dev->dev, "found MSI capability\n"); status = pci_enable_msi(dev); if (!status) { interrupt_mode = PCIE_PORT_MSI_MODE; @@ -252,7 +252,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, return NULL; pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); - printk(KERN_DEBUG "Allocate Port Service[%s]\n", device->device.bus_id); + dev_printk(KERN_DEBUG, &device->device, "allocate port service\n"); return device; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 51d163238d9..367c9c20000 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -91,9 +91,8 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, pci_set_master(dev); if (!dev->irq && dev->pin) { - printk(KERN_WARNING - "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", - __func__, dev->vendor, dev->device); + dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; " + "check vendor BIOS\n", dev->vendor, dev->device); } if (pcie_port_device_register(dev)) { pci_disable_device(dev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 3706ce7972d..b1724cf31b6 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -277,8 +277,8 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) res->end = res->start + sz64; #else if (sz64 > 0x100000000ULL) { - printk(KERN_ERR "PCI: Unable to handle 64-bit " - "BAR for device %s\n", pci_name(dev)); + dev_err(&dev->dev, "BAR %d: can't handle 64-bit" + " BAR\n", pos); res->start = 0; res->flags = 0; } else if (lhi) { @@ -329,7 +329,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) return; if (dev->transparent) { - printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); + dev_info(&dev->dev, "transparent bridge\n"); for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) child->resource[i] = child->parent->resource[i - 3]; } @@ -392,7 +392,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) limit |= ((long) mem_limit_hi) << 32; #else if (mem_base_hi || mem_limit_hi) { - printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev)); + dev_err(&dev->dev, "can't handle 64-bit " + "address space for bridge\n"); return; } #endif @@ -414,6 +415,7 @@ static struct pci_bus * pci_alloc_bus(void) INIT_LIST_HEAD(&b->node); INIT_LIST_HEAD(&b->children); INIT_LIST_HEAD(&b->devices); + INIT_LIST_HEAD(&b->slots); } return b; } @@ -511,8 +513,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); - pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n", - pci_name(dev), buses & 0xffffff, pass); + dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", + buses & 0xffffff, pass); /* Disable MasterAbortMode during probing to avoid reporting of bus errors (in some architectures) */ @@ -535,8 +537,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, * ignore it. This can happen with the i450NX chipset. */ if (pci_find_bus(pci_domain_nr(bus), busnr)) { - printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", - pci_domain_nr(bus), busnr); + dev_info(&dev->dev, "bus %04x:%02x already known\n", + pci_domain_nr(bus), busnr); goto out; } @@ -711,8 +713,9 @@ static int pci_setup_device(struct pci_dev * dev) { u32 class; - sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), - dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); + dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), + dev->bus->number, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn)); pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); dev->revision = class & 0xff; @@ -720,7 +723,7 @@ static int pci_setup_device(struct pci_dev * dev) dev->class = class; class >>= 8; - pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev), + dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n", dev->vendor, dev->device, class, dev->hdr_type); /* "Unknown power state" */ @@ -788,13 +791,13 @@ static int pci_setup_device(struct pci_dev * dev) break; default: /* unknown header */ - printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", - pci_name(dev), dev->hdr_type); + dev_err(&dev->dev, "unknown header type %02x, " + "ignoring device\n", dev->hdr_type); return -1; bad: - printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", - pci_name(dev), class, dev->hdr_type); + dev_err(&dev->dev, "ignoring class %02x (doesn't match header " + "type %02x)\n", class, dev->hdr_type); dev->class = PCI_CLASS_NOT_DEFINED; } @@ -927,7 +930,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) return NULL; /* Card hasn't responded in 60 seconds? Must be stuck. */ if (delay > 60 * 1000) { - printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " "responding\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); @@ -984,6 +987,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); + /* Initialize power management of the device */ + pci_pm_init(dev); + /* * Add the device to our list of discovered devices * and the bus list for fixup functions, etc. diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 963a97642ae..4400dffbd93 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -1,6 +1,4 @@ /* - * $Id: proc.c,v 1.13 1998/05/12 07:36:07 mj Exp $ - * * Procfs interface for the PCI bus. * * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz> @@ -482,5 +480,5 @@ static int __init pci_proc_init(void) return 0; } -__initcall(pci_proc_init); +device_initcall(pci_proc_init); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 338a3f94b4d..12d489395fa 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -556,7 +556,7 @@ static void quirk_via_ioapic(struct pci_dev *dev) pci_write_config_byte (dev, 0x58, tmp); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); /* * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. @@ -576,7 +576,7 @@ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); /* * The AMD io apic can hang the box when an apic irq is masked. @@ -622,7 +622,7 @@ static void quirk_amd_8131_ioapic(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); #endif /* CONFIG_X86_IO_APIC */ /* @@ -774,7 +774,7 @@ static void quirk_cardbus_legacy(struct pci_dev *dev) pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); -DECLARE_PCI_FIXUP_RESUME(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); /* * Following the PCI ordering rules is optional on the AMD762. I'm not @@ -797,7 +797,7 @@ static void quirk_amd_ordering(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); /* * DreamWorks provided workaround for Dunord I-3000 problem @@ -865,7 +865,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) { @@ -885,9 +885,9 @@ static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); /* * Serverworks CSB5 IDE does not fully support native mode @@ -1054,6 +1054,20 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) * its on-board VGA controller */ asus_hides_smbus = 1; } + else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG) + switch(dev->subsystem_device) { + case 0x00b8: /* Compaq Evo D510 CMT */ + case 0x00b9: /* Compaq Evo D510 SFF */ + asus_hides_smbus = 1; + } + else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) + switch (dev->subsystem_device) { + case 0x001A: /* Compaq Deskpro EN SSF P667 815E */ + /* Motherboard doesn't have host bridge + * subvendor/subdevice IDs, therefore checking + * its on-board VGA controller */ + asus_hides_smbus = 1; + } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); @@ -1068,6 +1082,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); static void asus_hides_smbus_lpc(struct pci_dev *dev) { @@ -1093,31 +1109,61 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); -static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) +/* It appears we just have one such device. If not, we have a warning */ +static void __iomem *asus_rcba_base; +static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev) { - u32 val, rcba; - void __iomem *base; + u32 rcba; if (likely(!asus_hides_smbus)) return; + WARN_ON(asus_rcba_base); + pci_read_config_dword(dev, 0xF0, &rcba); - base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */ - if (base == NULL) return; - val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */ - writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */ - iounmap(base); + /* use bits 31:14, 16 kB aligned */ + asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); + if (asus_rcba_base == NULL) + return; +} + +static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev) +{ + u32 val; + + if (likely(!asus_hides_smbus || !asus_rcba_base)) + return; + /* read the Function Disable register, dword mode only */ + val = readl(asus_rcba_base + 0x3418); + writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */ +} + +static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev) +{ + if (likely(!asus_hides_smbus || !asus_rcba_base)) + return; + iounmap(asus_rcba_base); + asus_rcba_base = NULL; dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); } + +static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) +{ + asus_hides_smbus_lpc_ich6_suspend(dev); + asus_hides_smbus_lpc_ich6_resume_early(dev); + asus_hides_smbus_lpc_ich6_resume(dev); +} DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); +DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early); /* * SiS 96x south bridge: BIOS typically hides SMBus device... @@ -1135,10 +1181,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); /* * ... This is further complicated by the fact that some SiS96x south @@ -1172,7 +1218,7 @@ static void quirk_sis_503(struct pci_dev *dev) quirk_sis_96x_smbus(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); /* @@ -1205,7 +1251,7 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) @@ -1270,12 +1316,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, qui DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); #endif @@ -1521,6 +1567,10 @@ extern struct pci_fixup __start_pci_fixups_enable[]; extern struct pci_fixup __end_pci_fixups_enable[]; extern struct pci_fixup __start_pci_fixups_resume[]; extern struct pci_fixup __end_pci_fixups_resume[]; +extern struct pci_fixup __start_pci_fixups_resume_early[]; +extern struct pci_fixup __end_pci_fixups_resume_early[]; +extern struct pci_fixup __start_pci_fixups_suspend[]; +extern struct pci_fixup __end_pci_fixups_suspend[]; void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) @@ -1553,6 +1603,16 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) end = __end_pci_fixups_resume; break; + case pci_fixup_resume_early: + start = __start_pci_fixups_resume_early; + end = __end_pci_fixups_resume_early; + break; + + case pci_fixup_suspend: + start = __start_pci_fixups_suspend; + end = __end_pci_fixups_suspend; + break; + default: /* stupid compiler warning, you would think with an enum... */ return; @@ -1629,7 +1689,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 8ddb918f5f5..827c0a520e2 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -27,13 +27,6 @@ #include <linux/slab.h> -#define DEBUG_CONFIG 1 -#if DEBUG_CONFIG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - static void pbus_assign_resources_sorted(struct pci_bus *bus) { struct pci_dev *dev; @@ -81,8 +74,8 @@ void pci_setup_cardbus(struct pci_bus *bus) struct pci_dev *bridge = bus->self; struct pci_bus_region region; - printk("PCI: Bus %d, cardbus bridge: %s\n", - bus->number, pci_name(bridge)); + dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); if (bus->resource[0]->flags & IORESOURCE_IO) { @@ -90,7 +83,7 @@ void pci_setup_cardbus(struct pci_bus *bus) * The IO resource is allocated a range twice as large as it * would normally need. This allows us to set both IO regs. */ - printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, @@ -101,7 +94,7 @@ void pci_setup_cardbus(struct pci_bus *bus) pcibios_resource_to_bus(bridge, ®ion, bus->resource[1]); if (bus->resource[1]->flags & IORESOURCE_IO) { - printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, @@ -112,7 +105,7 @@ void pci_setup_cardbus(struct pci_bus *bus) pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); if (bus->resource[2]->flags & IORESOURCE_MEM) { - printk(KERN_INFO " PREFETCH window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, @@ -123,7 +116,7 @@ void pci_setup_cardbus(struct pci_bus *bus) pcibios_resource_to_bus(bridge, ®ion, bus->resource[3]); if (bus->resource[3]->flags & IORESOURCE_MEM) { - printk(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, @@ -151,7 +144,8 @@ static void pci_setup_bridge(struct pci_bus *bus) struct pci_bus_region region; u32 l, bu, lu, io_upper16; - DBG(KERN_INFO "PCI: Bridge: %s\n", pci_name(bridge)); + dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); /* Set up the top and bottom of the PCI I/O segment for this bus. */ pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); @@ -162,7 +156,7 @@ static void pci_setup_bridge(struct pci_bus *bus) l |= region.end & 0xf000; /* Set up upper 16 bits of I/O base/limit. */ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); - DBG(KERN_INFO " IO window: %04lx-%04lx\n", + dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n", (unsigned long)region.start, (unsigned long)region.end); } @@ -170,7 +164,7 @@ static void pci_setup_bridge(struct pci_bus *bus) /* Clear upper 16 bits of I/O base/limit. */ io_upper16 = 0; l = 0x00f0; - DBG(KERN_INFO " IO window: disabled.\n"); + dev_info(&bridge->dev, " IO window: disabled\n"); } /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); @@ -185,13 +179,13 @@ static void pci_setup_bridge(struct pci_bus *bus) if (bus->resource[1]->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; - DBG(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); } else { l = 0x0000fff0; - DBG(KERN_INFO " MEM window: disabled.\n"); + dev_info(&bridge->dev, " MEM window: disabled\n"); } pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); @@ -208,13 +202,13 @@ static void pci_setup_bridge(struct pci_bus *bus) l |= region.end & 0xfff00000; bu = upper_32_bits(region.start); lu = upper_32_bits(region.end); - DBG(KERN_INFO " PREFETCH window: 0x%016llx-0x%016llx\n", + dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n", (unsigned long long)region.start, (unsigned long long)region.end); } else { l = 0x0000fff0; - DBG(KERN_INFO " PREFETCH window: disabled.\n"); + dev_info(&bridge->dev, " PREFETCH window: disabled\n"); } pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); @@ -361,9 +355,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; order = __ffs(align) - 20; if (order > 11) { - printk(KERN_WARNING "PCI: region %s/%d " - "too large: 0x%016llx-0x%016llx\n", - pci_name(dev), i, + dev_warn(&dev->dev, "BAR %d too large: " + "%#016llx-%#016llx\n", i, (unsigned long long)r->start, (unsigned long long)r->end); r->flags = 0; @@ -529,8 +522,8 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus) break; default: - printk(KERN_INFO "PCI: not setting up bridge %s " - "for bus %d\n", pci_name(dev), b->number); + dev_info(&dev->dev, "not setting up bridge for bus " + "%04x:%02x\n", pci_domain_nr(b), b->number); break; } } diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index 05ca2ed9eb5..aa795fd428d 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c @@ -47,8 +47,7 @@ pdev_fixup_irq(struct pci_dev *dev, } dev->irq = irq; - pr_debug("PCI: fixup irq: (%s) got %d\n", - kobject_name(&dev->dev.kobj), dev->irq); + dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq); /* Always tell the device, so the driver knows what is the real IRQ to use; the device does not use it. */ diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7d35cdf4579..1a5fc83c71b 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -26,8 +26,7 @@ #include "pci.h" -void -pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) +void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) { struct pci_bus_region region; u32 new, check, mask; @@ -43,20 +42,20 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) /* * Ignore non-moveable resources. This might be legacy resources for * which no functional BAR register exists or another important - * system resource we should better not move around in system address - * space. + * system resource we shouldn't move around. */ if (res->flags & IORESOURCE_PCI_FIXED) return; pcibios_resource_to_bus(dev, ®ion, res); - pr_debug(" got res [%llx:%llx] bus [%llx:%llx] flags %lx for " - "BAR %d of %s\n", (unsigned long long)res->start, + dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] " + "flags %#lx\n", resno, + (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long long)region.start, (unsigned long long)region.end, - (unsigned long)res->flags, resno, pci_name(dev)); + (unsigned long)res->flags); new = region.start | (res->flags & PCI_REGION_FLAG_MASK); if (res->flags & IORESOURCE_IO) @@ -81,9 +80,8 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) pci_read_config_dword(dev, reg, &check); if ((new ^ check) & mask) { - printk(KERN_ERR "PCI: Error while updating region " - "%s/%d (%08x != %08x)\n", pci_name(dev), resno, - new, check); + dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n", + resno, new, check); } if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == @@ -92,15 +90,14 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) pci_write_config_dword(dev, reg + 4, new); pci_read_config_dword(dev, reg + 4, &check); if (check != new) { - printk(KERN_ERR "PCI: Error updating region " - "%s/%d (high %08x != %08x)\n", - pci_name(dev), resno, new, check); + dev_err(&dev->dev, "BAR %d: error updating " + "(high %#08x != %#08x)\n", resno, new, check); } } res->flags &= ~IORESOURCE_UNSET; - pr_debug("PCI: moved device %s resource %d (%lx) to %x\n", - pci_name(dev), resno, res->flags, - new & ~PCI_REGION_FLAG_MASK); + dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n", + resno, (unsigned long long)region.start, + (unsigned long long)region.end, res->flags); } int pci_claim_resource(struct pci_dev *dev, int resource) @@ -117,10 +114,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource) err = insert_resource(root, res); if (err) { - printk(KERN_ERR "PCI: %s region %d of %s %s [%llx:%llx]\n", - root ? "Address space collision on" : - "No parent found for", - resource, dtype, pci_name(dev), + dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n", + resource, + root ? "address space collision on" : + "no parent found for", + dtype, (unsigned long long)res->start, (unsigned long long)res->end); } @@ -140,11 +138,10 @@ int pci_assign_resource(struct pci_dev *dev, int resno) align = resource_alignment(res); if (!align) { - printk(KERN_ERR "PCI: Cannot allocate resource (bogus " - "alignment) %d [%llx:%llx] (flags %lx) of %s\n", + dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " + "alignment) [%#llx-%#llx] flags %#lx\n", resno, (unsigned long long)res->start, - (unsigned long long)res->end, res->flags, - pci_name(dev)); + (unsigned long long)res->end, res->flags); return -EINVAL; } @@ -165,11 +162,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } if (ret) { - printk(KERN_ERR "PCI: Failed to allocate %s resource " - "#%d:%llx@%llx for %s\n", + dev_err(&dev->dev, "BAR %d: can't allocate %s resource " + "[%#llx-%#llx]\n", resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", - resno, (unsigned long long)size, - (unsigned long long)res->start, pci_name(dev)); + (unsigned long long)res->start, + (unsigned long long)res->end); } else { res->flags &= ~IORESOURCE_STARTALIGN; if (resno < PCI_BRIDGE_RESOURCES) @@ -205,11 +202,11 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno) } if (ret) { - printk(KERN_ERR "PCI: Failed to allocate %s resource " - "#%d:%llx@%llx for %s\n", + dev_err(&dev->dev, "BAR %d: can't allocate %s resource " + "[%#llx-%#llx\n]", resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", - resno, (unsigned long long)(res->end - res->start + 1), - (unsigned long long)res->start, pci_name(dev)); + (unsigned long long)res->start, + (unsigned long long)res->end); } else if (resno < PCI_BRIDGE_RESOURCES) { pci_update_resource(dev, res, resno); } @@ -239,11 +236,10 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) r_align = resource_alignment(r); if (!r_align) { - printk(KERN_WARNING "PCI: bogus alignment of resource " - "%d [%llx:%llx] (flags %lx) of %s\n", + dev_warn(&dev->dev, "BAR %d: bogus alignment " + "[%#llx-%#llx] flags %#lx\n", i, (unsigned long long)r->start, - (unsigned long long)r->end, r->flags, - pci_name(dev)); + (unsigned long long)r->end, r->flags); continue; } for (list = head; ; list = list->next) { @@ -291,7 +287,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask) if (!r->parent) { dev_err(&dev->dev, "device not available because of " - "BAR %d [%llx:%llx] collisions\n", i, + "BAR %d [%#llx-%#llx] collisions\n", i, (unsigned long long) r->start, (unsigned long long) r->end); return -EINVAL; diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c new file mode 100644 index 00000000000..7e5b85cbd94 --- /dev/null +++ b/drivers/pci/slot.c @@ -0,0 +1,233 @@ +/* + * drivers/pci/slot.c + * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx> + * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P. + * Alex Chiang <achiang@hp.com> + */ + +#include <linux/kobject.h> +#include <linux/pci.h> +#include <linux/err.h> +#include "pci.h" + +struct kset *pci_slots_kset; +EXPORT_SYMBOL_GPL(pci_slots_kset); + +static ssize_t pci_slot_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct pci_slot *slot = to_pci_slot(kobj); + struct pci_slot_attribute *attribute = to_pci_slot_attr(attr); + return attribute->show ? attribute->show(slot, buf) : -EIO; +} + +static ssize_t pci_slot_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t len) +{ + struct pci_slot *slot = to_pci_slot(kobj); + struct pci_slot_attribute *attribute = to_pci_slot_attr(attr); + return attribute->store ? attribute->store(slot, buf, len) : -EIO; +} + +static struct sysfs_ops pci_slot_sysfs_ops = { + .show = pci_slot_attr_show, + .store = pci_slot_attr_store, +}; + +static ssize_t address_read_file(struct pci_slot *slot, char *buf) +{ + if (slot->number == 0xff) + return sprintf(buf, "%04x:%02x\n", + pci_domain_nr(slot->bus), + slot->bus->number); + else + return sprintf(buf, "%04x:%02x:%02x\n", + pci_domain_nr(slot->bus), + slot->bus->number, + slot->number); +} + +static void pci_slot_release(struct kobject *kobj) +{ + struct pci_slot *slot = to_pci_slot(kobj); + + pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, + slot->bus->number, slot->number); + + list_del(&slot->list); + + kfree(slot); +} + +static struct pci_slot_attribute pci_slot_attr_address = + __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); + +static struct attribute *pci_slot_default_attrs[] = { + &pci_slot_attr_address.attr, + NULL, +}; + +static struct kobj_type pci_slot_ktype = { + .sysfs_ops = &pci_slot_sysfs_ops, + .release = &pci_slot_release, + .default_attrs = pci_slot_default_attrs, +}; + +/** + * pci_create_slot - create or increment refcount for physical PCI slot + * @parent: struct pci_bus of parent bridge + * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder + * @name: user visible string presented in /sys/bus/pci/slots/<name> + * + * PCI slots have first class attributes such as address, speed, width, + * and a &struct pci_slot is used to manage them. This interface will + * either return a new &struct pci_slot to the caller, or if the pci_slot + * already exists, its refcount will be incremented. + * + * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple. + * + * Placeholder slots: + * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify + * a slot. There is one notable exception - pSeries (rpaphp), where the + * @slot_nr cannot be determined until a device is actually inserted into + * the slot. In this scenario, the caller may pass -1 for @slot_nr. + * + * The following semantics are imposed when the caller passes @slot_nr == + * -1. First, the check for existing %struct pci_slot is skipped, as the + * caller may know about several unpopulated slots on a given %struct + * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for + * these slots is then determined by the @name parameter. We expect + * kobject_init_and_add() to warn us if the caller attempts to create + * multiple slots with the same name. The other change in semantics is + * user-visible, which is the 'address' parameter presented in sysfs will + * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the + * %struct pci_bus and bb is the bus number. In other words, the devfn of + * the 'placeholder' slot will not be displayed. + */ + +struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, + const char *name) +{ + struct pci_slot *slot; + int err; + + down_write(&pci_bus_sem); + + if (slot_nr == -1) + goto placeholder; + + /* If we've already created this slot, bump refcount and return. */ + list_for_each_entry(slot, &parent->slots, list) { + if (slot->number == slot_nr) { + kobject_get(&slot->kobj); + pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n", + __func__, + atomic_read(&slot->kobj.kref.refcount), + pci_domain_nr(parent), parent->number, + slot_nr); + goto out; + } + } + +placeholder: + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + slot = ERR_PTR(-ENOMEM); + goto out; + } + + slot->bus = parent; + slot->number = slot_nr; + + slot->kobj.kset = pci_slots_kset; + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, + "%s", name); + if (err) { + printk(KERN_ERR "Unable to register kobject %s\n", name); + goto err; + } + + INIT_LIST_HEAD(&slot->list); + list_add(&slot->list, &parent->slots); + + /* Don't care if debug printk has a -1 for slot_nr */ + pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", + __func__, pci_domain_nr(parent), parent->number, slot_nr); + + out: + up_write(&pci_bus_sem); + return slot; + err: + kfree(slot); + slot = ERR_PTR(err); + goto out; +} +EXPORT_SYMBOL_GPL(pci_create_slot); + +/** + * pci_update_slot_number - update %struct pci_slot -> number + * @slot - %struct pci_slot to update + * @slot_nr - new number for slot + * + * The primary purpose of this interface is to allow callers who earlier + * created a placeholder slot in pci_create_slot() by passing a -1 as + * slot_nr, to update their %struct pci_slot with the correct @slot_nr. + */ + +void pci_update_slot_number(struct pci_slot *slot, int slot_nr) +{ + int name_count = 0; + struct pci_slot *tmp; + + down_write(&pci_bus_sem); + + list_for_each_entry(tmp, &slot->bus->slots, list) { + WARN_ON(tmp->number == slot_nr); + if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj))) + name_count++; + } + + if (name_count > 1) + printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj)); + + slot->number = slot_nr; + up_write(&pci_bus_sem); +} +EXPORT_SYMBOL_GPL(pci_update_slot_number); + +/** + * pci_destroy_slot - decrement refcount for physical PCI slot + * @slot: struct pci_slot to decrement + * + * %struct pci_slot is refcounted, so destroying them is really easy; we + * just call kobject_put on its kobj and let our release methods do the + * rest. + */ + +void pci_destroy_slot(struct pci_slot *slot) +{ + pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, + atomic_read(&slot->kobj.kref.refcount) - 1, + pci_domain_nr(slot->bus), slot->bus->number, slot->number); + + down_write(&pci_bus_sem); + kobject_put(&slot->kobj); + up_write(&pci_bus_sem); +} +EXPORT_SYMBOL_GPL(pci_destroy_slot); + +static int pci_slot_init(void) +{ + struct kset *pci_bus_kset; + + pci_bus_kset = bus_get_kset(&pci_bus_type); + pci_slots_kset = kset_create_and_add("slots", NULL, + &pci_bus_kset->kobj); + if (!pci_slots_kset) { + printk(KERN_ERR "PCI: Slot initialization failure\n"); + return -ENOMEM; + } + return 0; +} + +subsys_initcall(pci_slot_init); diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 9fcff0c3361..65129b54eb0 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -1490,7 +1490,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff))) reserved++; } - if ((count) || (reserved > 5) || + if ((count == MAX_TUPLES) || (reserved > 5) || ((!dev_ok || !ident_ok) && (count > 10))) count = 0; diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index 886dac823ed..e3fa9a2d9a3 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h @@ -1,3 +1,8 @@ +/* + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> + */ + extern spinlock_t pnp_lock; void *pnp_alloc(long size); @@ -19,22 +24,118 @@ void pnp_remove_card(struct pnp_card *card); int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev); void pnp_remove_card_device(struct pnp_dev *dev); -struct pnp_option *pnp_build_option(int priority); -struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev); -struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, - int priority); -int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_irq *data); -int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_dma *data); -int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_port *data); -int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_mem *data); +struct pnp_port { + resource_size_t min; /* min base number */ + resource_size_t max; /* max base number */ + resource_size_t align; /* align boundary */ + resource_size_t size; /* size of range */ + unsigned char flags; /* port flags */ +}; + +#define PNP_IRQ_NR 256 +typedef struct { DECLARE_BITMAP(bits, PNP_IRQ_NR); } pnp_irq_mask_t; + +struct pnp_irq { + pnp_irq_mask_t map; /* bitmap for IRQ lines */ + unsigned char flags; /* IRQ flags */ +}; + +struct pnp_dma { + unsigned char map; /* bitmask for DMA channels */ + unsigned char flags; /* DMA flags */ +}; + +struct pnp_mem { + resource_size_t min; /* min base number */ + resource_size_t max; /* max base number */ + resource_size_t align; /* align boundary */ + resource_size_t size; /* size of range */ + unsigned char flags; /* memory flags */ +}; + +#define PNP_OPTION_DEPENDENT 0x80000000 +#define PNP_OPTION_SET_MASK 0xffff +#define PNP_OPTION_SET_SHIFT 12 +#define PNP_OPTION_PRIORITY_MASK 0xfff +#define PNP_OPTION_PRIORITY_SHIFT 0 + +#define PNP_RES_PRIORITY_PREFERRED 0 +#define PNP_RES_PRIORITY_ACCEPTABLE 1 +#define PNP_RES_PRIORITY_FUNCTIONAL 2 +#define PNP_RES_PRIORITY_INVALID PNP_OPTION_PRIORITY_MASK + +struct pnp_option { + struct list_head list; + unsigned int flags; /* independent/dependent, set, priority */ + + unsigned long type; /* IORESOURCE_{IO,MEM,IRQ,DMA} */ + union { + struct pnp_port port; + struct pnp_irq irq; + struct pnp_dma dma; + struct pnp_mem mem; + } u; +}; + +int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags, + pnp_irq_mask_t *map, unsigned char flags); +int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags, + unsigned char map, unsigned char flags); +int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags); +int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags); + +static inline int pnp_option_is_dependent(struct pnp_option *option) +{ + return option->flags & PNP_OPTION_DEPENDENT ? 1 : 0; +} + +static inline unsigned int pnp_option_set(struct pnp_option *option) +{ + return (option->flags >> PNP_OPTION_SET_SHIFT) & PNP_OPTION_SET_MASK; +} + +static inline unsigned int pnp_option_priority(struct pnp_option *option) +{ + return (option->flags >> PNP_OPTION_PRIORITY_SHIFT) & + PNP_OPTION_PRIORITY_MASK; +} + +static inline unsigned int pnp_new_dependent_set(struct pnp_dev *dev, + int priority) +{ + unsigned int flags; + + if (priority > PNP_RES_PRIORITY_FUNCTIONAL) { + dev_warn(&dev->dev, "invalid dependent option priority %d " + "clipped to %d", priority, + PNP_RES_PRIORITY_INVALID); + priority = PNP_RES_PRIORITY_INVALID; + } + + flags = PNP_OPTION_DEPENDENT | + ((dev->num_dependent_sets & PNP_OPTION_SET_MASK) << + PNP_OPTION_SET_SHIFT) | + ((priority & PNP_OPTION_PRIORITY_MASK) << + PNP_OPTION_PRIORITY_SHIFT); + + dev->num_dependent_sets++; + + return flags; +} + +char *pnp_option_priority_name(struct pnp_option *option); +void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option); + void pnp_init_resources(struct pnp_dev *dev); void pnp_fixup_device(struct pnp_dev *dev); -void pnp_free_option(struct pnp_option *option); +void pnp_free_options(struct pnp_dev *dev); int __pnp_add_device(struct pnp_dev *dev); void __pnp_remove_device(struct pnp_dev *dev); @@ -43,29 +144,18 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res); int pnp_check_irq(struct pnp_dev *dev, struct resource *res); int pnp_check_dma(struct pnp_dev *dev, struct resource *res); +char *pnp_resource_type_name(struct resource *res); void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc); -void pnp_init_resource(struct resource *res); - -struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev, - unsigned int type, unsigned int num); - -#define PNP_MAX_PORT 40 -#define PNP_MAX_MEM 24 -#define PNP_MAX_IRQ 2 -#define PNP_MAX_DMA 2 +void pnp_free_resources(struct pnp_dev *dev); +int pnp_resource_type(struct resource *res); struct pnp_resource { + struct list_head list; struct resource res; - unsigned int index; /* ISAPNP config register index */ }; -struct pnp_resource_table { - struct pnp_resource port[PNP_MAX_PORT]; - struct pnp_resource mem[PNP_MAX_MEM]; - struct pnp_resource dma[PNP_MAX_DMA]; - struct pnp_resource irq[PNP_MAX_IRQ]; -}; +void pnp_free_resource(struct pnp_resource *pnp_res); struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, int flags); diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index 20771b7d448..a411582bcd7 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -99,14 +99,28 @@ static void pnp_free_ids(struct pnp_dev *dev) } } +void pnp_free_resource(struct pnp_resource *pnp_res) +{ + list_del(&pnp_res->list); + kfree(pnp_res); +} + +void pnp_free_resources(struct pnp_dev *dev) +{ + struct pnp_resource *pnp_res, *tmp; + + list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) { + pnp_free_resource(pnp_res); + } +} + static void pnp_release_device(struct device *dmdev) { struct pnp_dev *dev = to_pnp_dev(dmdev); - pnp_free_option(dev->independent); - pnp_free_option(dev->dependent); pnp_free_ids(dev); - kfree(dev->res); + pnp_free_resources(dev); + pnp_free_options(dev); kfree(dev); } @@ -119,12 +133,8 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid if (!dev) return NULL; - dev->res = kzalloc(sizeof(struct pnp_resource_table), GFP_KERNEL); - if (!dev->res) { - kfree(dev); - return NULL; - } - + INIT_LIST_HEAD(&dev->resources); + INIT_LIST_HEAD(&dev->options); dev->protocol = protocol; dev->number = id; dev->dma_mask = DMA_24BIT_MASK; @@ -140,7 +150,6 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid dev_id = pnp_add_id(dev, pnpid); if (!dev_id) { - kfree(dev->res); kfree(dev); return NULL; } diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c index 5695a79f3a5..a876ecf7028 100644 --- a/drivers/pnp/interface.c +++ b/drivers/pnp/interface.c @@ -3,6 +3,8 @@ * * Some code, especially possible resource dumping is based on isapnp_proc.c (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2002 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/pnp.h> @@ -53,11 +55,13 @@ static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt, ...) static void pnp_print_port(pnp_info_buffer_t * buffer, char *space, struct pnp_port *port) { - pnp_printf(buffer, - "%sport 0x%x-0x%x, align 0x%x, size 0x%x, %i-bit address decoding\n", - space, port->min, port->max, - port->align ? (port->align - 1) : 0, port->size, - port->flags & PNP_PORT_FLAG_16BITADDR ? 16 : 10); + pnp_printf(buffer, "%sport %#llx-%#llx, align %#llx, size %#llx, " + "%i-bit address decoding\n", space, + (unsigned long long) port->min, + (unsigned long long) port->max, + port->align ? ((unsigned long long) port->align - 1) : 0, + (unsigned long long) port->size, + port->flags & IORESOURCE_IO_16BIT_ADDR ? 16 : 10); } static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, @@ -67,7 +71,7 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, pnp_printf(buffer, "%sirq ", space); for (i = 0; i < PNP_IRQ_NR; i++) - if (test_bit(i, irq->map)) { + if (test_bit(i, irq->map.bits)) { if (!first) { pnp_printf(buffer, ","); } else { @@ -78,7 +82,7 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, else pnp_printf(buffer, "%i", i); } - if (bitmap_empty(irq->map, PNP_IRQ_NR)) + if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) pnp_printf(buffer, "<none>"); if (irq->flags & IORESOURCE_IRQ_HIGHEDGE) pnp_printf(buffer, " High-Edge"); @@ -88,6 +92,8 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, pnp_printf(buffer, " High-Level"); if (irq->flags & IORESOURCE_IRQ_LOWLEVEL) pnp_printf(buffer, " Low-Level"); + if (irq->flags & IORESOURCE_IRQ_OPTIONAL) + pnp_printf(buffer, " (optional)"); pnp_printf(buffer, "\n"); } @@ -148,8 +154,11 @@ static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space, { char *s; - pnp_printf(buffer, "%sMemory 0x%x-0x%x, align 0x%x, size 0x%x", - space, mem->min, mem->max, mem->align, mem->size); + pnp_printf(buffer, "%sMemory %#llx-%#llx, align %#llx, size %#llx", + space, (unsigned long long) mem->min, + (unsigned long long) mem->max, + (unsigned long long) mem->align, + (unsigned long long) mem->size); if (mem->flags & IORESOURCE_MEM_WRITEABLE) pnp_printf(buffer, ", writeable"); if (mem->flags & IORESOURCE_MEM_CACHEABLE) @@ -177,65 +186,58 @@ static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space, } static void pnp_print_option(pnp_info_buffer_t * buffer, char *space, - struct pnp_option *option, int dep) + struct pnp_option *option) { - char *s; - struct pnp_port *port; - struct pnp_irq *irq; - struct pnp_dma *dma; - struct pnp_mem *mem; - - if (dep) { - switch (option->priority) { - case PNP_RES_PRIORITY_PREFERRED: - s = "preferred"; - break; - case PNP_RES_PRIORITY_ACCEPTABLE: - s = "acceptable"; - break; - case PNP_RES_PRIORITY_FUNCTIONAL: - s = "functional"; - break; - default: - s = "invalid"; - } - pnp_printf(buffer, "Dependent: %02i - Priority %s\n", dep, s); + switch (option->type) { + case IORESOURCE_IO: + pnp_print_port(buffer, space, &option->u.port); + break; + case IORESOURCE_MEM: + pnp_print_mem(buffer, space, &option->u.mem); + break; + case IORESOURCE_IRQ: + pnp_print_irq(buffer, space, &option->u.irq); + break; + case IORESOURCE_DMA: + pnp_print_dma(buffer, space, &option->u.dma); + break; } - - for (port = option->port; port; port = port->next) - pnp_print_port(buffer, space, port); - for (irq = option->irq; irq; irq = irq->next) - pnp_print_irq(buffer, space, irq); - for (dma = option->dma; dma; dma = dma->next) - pnp_print_dma(buffer, space, dma); - for (mem = option->mem; mem; mem = mem->next) - pnp_print_mem(buffer, space, mem); } static ssize_t pnp_show_options(struct device *dmdev, struct device_attribute *attr, char *buf) { struct pnp_dev *dev = to_pnp_dev(dmdev); - struct pnp_option *independent = dev->independent; - struct pnp_option *dependent = dev->dependent; - int ret, dep = 1; + pnp_info_buffer_t *buffer; + struct pnp_option *option; + int ret, dep = 0, set = 0; + char *indent; - pnp_info_buffer_t *buffer = (pnp_info_buffer_t *) - pnp_alloc(sizeof(pnp_info_buffer_t)); + buffer = pnp_alloc(sizeof(pnp_info_buffer_t)); if (!buffer) return -ENOMEM; buffer->len = PAGE_SIZE; buffer->buffer = buf; buffer->curr = buffer->buffer; - if (independent) - pnp_print_option(buffer, "", independent, 0); - while (dependent) { - pnp_print_option(buffer, " ", dependent, dep); - dependent = dependent->next; - dep++; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option)) { + indent = " "; + if (!dep || pnp_option_set(option) != set) { + set = pnp_option_set(option); + dep = 1; + pnp_printf(buffer, "Dependent: %02i - " + "Priority %s\n", set, + pnp_option_priority_name(option)); + } + } else { + dep = 0; + indent = ""; + } + pnp_print_option(buffer, indent, option); } + ret = (buffer->curr - buf); kfree(buffer); return ret; @@ -248,79 +250,59 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, char *buf) { struct pnp_dev *dev = to_pnp_dev(dmdev); - struct resource *res; - int i, ret; pnp_info_buffer_t *buffer; + struct pnp_resource *pnp_res; + struct resource *res; + int ret; if (!dev) return -EINVAL; - buffer = (pnp_info_buffer_t *) pnp_alloc(sizeof(pnp_info_buffer_t)); + buffer = pnp_alloc(sizeof(pnp_info_buffer_t)); if (!buffer) return -ENOMEM; + buffer->len = PAGE_SIZE; buffer->buffer = buf; buffer->curr = buffer->buffer; - pnp_printf(buffer, "state = "); - if (dev->active) - pnp_printf(buffer, "active\n"); - else - pnp_printf(buffer, "disabled\n"); - - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "io"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " 0x%llx-0x%llx\n", - (unsigned long long) res->start, - (unsigned long long) res->end); - } - } - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "mem"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " 0x%llx-0x%llx\n", - (unsigned long long) res->start, - (unsigned long long) res->end); - } - } - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "irq"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " %lld\n", - (unsigned long long) res->start); + pnp_printf(buffer, "state = %s\n", dev->active ? "active" : "disabled"); + + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + + pnp_printf(buffer, pnp_resource_type_name(res)); + + if (res->flags & IORESOURCE_DISABLED) { + pnp_printf(buffer, " disabled\n"); + continue; } - } - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "dma"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " %lld\n", - (unsigned long long) res->start); + + switch (pnp_resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: + pnp_printf(buffer, " %#llx-%#llx\n", + (unsigned long long) res->start, + (unsigned long long) res->end); + break; + case IORESOURCE_IRQ: + case IORESOURCE_DMA: + pnp_printf(buffer, " %lld\n", + (unsigned long long) res->start); + break; } } + ret = (buffer->curr - buf); kfree(buffer); return ret; } -static ssize_t -pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, - const char *ubuf, size_t count) +static ssize_t pnp_set_current_resources(struct device *dmdev, + struct device_attribute *attr, + const char *ubuf, size_t count) { struct pnp_dev *dev = to_pnp_dev(dmdev); - struct pnp_resource *pnp_res; char *buf = (void *)ubuf; int retval = 0; resource_size_t start, end; @@ -368,7 +350,6 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, goto done; } if (!strnicmp(buf, "set", 3)) { - int nport = 0, nmem = 0, nirq = 0, ndma = 0; if (dev->active) goto done; buf += 3; @@ -391,10 +372,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, end = simple_strtoul(buf, &buf, 0); } else end = start; - pnp_res = pnp_add_io_resource(dev, start, end, - 0); - if (pnp_res) - pnp_res->index = nport++; + pnp_add_io_resource(dev, start, end, 0); continue; } if (!strnicmp(buf, "mem", 3)) { @@ -411,10 +389,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, end = simple_strtoul(buf, &buf, 0); } else end = start; - pnp_res = pnp_add_mem_resource(dev, start, end, - 0); - if (pnp_res) - pnp_res->index = nmem++; + pnp_add_mem_resource(dev, start, end, 0); continue; } if (!strnicmp(buf, "irq", 3)) { @@ -422,9 +397,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, while (isspace(*buf)) ++buf; start = simple_strtoul(buf, &buf, 0); - pnp_res = pnp_add_irq_resource(dev, start, 0); - if (pnp_res) - pnp_res->index = nirq++; + pnp_add_irq_resource(dev, start, 0); continue; } if (!strnicmp(buf, "dma", 3)) { @@ -432,9 +405,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, while (isspace(*buf)) ++buf; start = simple_strtoul(buf, &buf, 0); - pnp_res = pnp_add_dma_resource(dev, start, 0); - if (pnp_res) - pnp_res->index = ndma++; + pnp_add_dma_resource(dev, start, 0); continue; } break; diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c index f1bccdbdeb0..101a835e875 100644 --- a/drivers/pnp/isapnp/core.c +++ b/drivers/pnp/isapnp/core.c @@ -429,154 +429,135 @@ static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card, * Add IRQ resource to resources list. */ static void __init isapnp_parse_irq_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[3]; - struct pnp_irq *irq; unsigned long bits; + pnp_irq_mask_t map; + unsigned char flags = IORESOURCE_IRQ_HIGHEDGE; isapnp_peek(tmp, size); - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; bits = (tmp[1] << 8) | tmp[0]; - bitmap_copy(irq->map, &bits, 16); + + bitmap_zero(map.bits, PNP_IRQ_NR); + bitmap_copy(map.bits, &bits, 16); + if (size > 2) - irq->flags = tmp[2]; - else - irq->flags = IORESOURCE_IRQ_HIGHEDGE; - pnp_register_irq_resource(dev, option, irq); + flags = tmp[2]; + + pnp_register_irq_resource(dev, option_flags, &map, flags); } /* * Add DMA resource to resources list. */ static void __init isapnp_parse_dma_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[2]; - struct pnp_dma *dma; isapnp_peek(tmp, size); - dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL); - if (!dma) - return; - dma->map = tmp[0]; - dma->flags = tmp[1]; - pnp_register_dma_resource(dev, option, dma); + pnp_register_dma_resource(dev, option_flags, tmp[0], tmp[1]); } /* * Add port resource to resources list. */ static void __init isapnp_parse_port_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[7]; - struct pnp_port *port; + resource_size_t min, max, align, len; + unsigned char flags; isapnp_peek(tmp, size); - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = (tmp[2] << 8) | tmp[1]; - port->max = (tmp[4] << 8) | tmp[3]; - port->align = tmp[5]; - port->size = tmp[6]; - port->flags = tmp[0] ? PNP_PORT_FLAG_16BITADDR : 0; - pnp_register_port_resource(dev, option, port); + min = (tmp[2] << 8) | tmp[1]; + max = (tmp[4] << 8) | tmp[3]; + align = tmp[5]; + len = tmp[6]; + flags = tmp[0] ? IORESOURCE_IO_16BIT_ADDR : 0; + pnp_register_port_resource(dev, option_flags, + min, max, align, len, flags); } /* * Add fixed port resource to resources list. */ static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[3]; - struct pnp_port *port; + resource_size_t base, len; isapnp_peek(tmp, size); - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = (tmp[1] << 8) | tmp[0]; - port->size = tmp[2]; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); + base = (tmp[1] << 8) | tmp[0]; + len = tmp[2]; + pnp_register_port_resource(dev, option_flags, base, base, 0, len, + IORESOURCE_IO_FIXED); } /* * Add memory resource to resources list. */ static void __init isapnp_parse_mem_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[9]; - struct pnp_mem *mem; + resource_size_t min, max, align, len; + unsigned char flags; isapnp_peek(tmp, size); - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = ((tmp[2] << 8) | tmp[1]) << 8; - mem->max = ((tmp[4] << 8) | tmp[3]) << 8; - mem->align = (tmp[6] << 8) | tmp[5]; - mem->size = ((tmp[8] << 8) | tmp[7]) << 8; - mem->flags = tmp[0]; - pnp_register_mem_resource(dev, option, mem); + min = ((tmp[2] << 8) | tmp[1]) << 8; + max = ((tmp[4] << 8) | tmp[3]) << 8; + align = (tmp[6] << 8) | tmp[5]; + len = ((tmp[8] << 8) | tmp[7]) << 8; + flags = tmp[0]; + pnp_register_mem_resource(dev, option_flags, + min, max, align, len, flags); } /* * Add 32-bit memory resource to resources list. */ static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[17]; - struct pnp_mem *mem; + resource_size_t min, max, align, len; + unsigned char flags; isapnp_peek(tmp, size); - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; - mem->max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; - mem->align = - (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9]; - mem->size = - (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13]; - mem->flags = tmp[0]; - pnp_register_mem_resource(dev, option, mem); + min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; + max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; + align = (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9]; + len = (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13]; + flags = tmp[0]; + pnp_register_mem_resource(dev, option_flags, + min, max, align, len, flags); } /* * Add 32-bit fixed memory resource to resources list. */ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[9]; - struct pnp_mem *mem; + resource_size_t base, len; + unsigned char flags; isapnp_peek(tmp, size); - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = - (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; - mem->size = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; - mem->align = 0; - mem->flags = tmp[0]; - pnp_register_mem_resource(dev, option, mem); + base = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; + len = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; + flags = tmp[0]; + pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags); } /* @@ -604,20 +585,16 @@ isapnp_parse_name(char *name, unsigned int name_max, unsigned short *size) static int __init isapnp_create_device(struct pnp_card *card, unsigned short size) { - int number = 0, skip = 0, priority = 0, compat = 0; + int number = 0, skip = 0, priority, compat = 0; unsigned char type, tmp[17]; - struct pnp_option *option; + unsigned int option_flags; struct pnp_dev *dev; u32 eisa_id; char id[8]; if ((dev = isapnp_parse_device(card, size, number++)) == NULL) return 1; - option = pnp_register_independent_option(dev); - if (!option) { - kfree(dev); - return 1; - } + option_flags = 0; pnp_add_card_device(card, dev); while (1) { @@ -634,16 +611,11 @@ static int __init isapnp_create_device(struct pnp_card *card, return 1; size = 0; skip = 0; - option = pnp_register_independent_option(dev); - if (!option) { - kfree(dev); - return 1; - } + option_flags = 0; pnp_add_card_device(card, dev); } else { skip = 1; } - priority = 0; compat = 0; break; case _STAG_COMPATDEVID: @@ -660,44 +632,42 @@ static int __init isapnp_create_device(struct pnp_card *card, case _STAG_IRQ: if (size < 2 || size > 3) goto __skip; - isapnp_parse_irq_resource(dev, option, size); + isapnp_parse_irq_resource(dev, option_flags, size); size = 0; break; case _STAG_DMA: if (size != 2) goto __skip; - isapnp_parse_dma_resource(dev, option, size); + isapnp_parse_dma_resource(dev, option_flags, size); size = 0; break; case _STAG_STARTDEP: if (size > 1) goto __skip; - priority = 0x100 | PNP_RES_PRIORITY_ACCEPTABLE; + priority = PNP_RES_PRIORITY_ACCEPTABLE; if (size > 0) { isapnp_peek(tmp, size); - priority = 0x100 | tmp[0]; + priority = tmp[0]; size = 0; } - option = pnp_register_dependent_option(dev, priority); - if (!option) - return 1; + option_flags = pnp_new_dependent_set(dev, priority); break; case _STAG_ENDDEP: if (size != 0) goto __skip; - priority = 0; - dev_dbg(&dev->dev, "end dependent options\n"); + option_flags = 0; break; case _STAG_IOPORT: if (size != 7) goto __skip; - isapnp_parse_port_resource(dev, option, size); + isapnp_parse_port_resource(dev, option_flags, size); size = 0; break; case _STAG_FIXEDIO: if (size != 3) goto __skip; - isapnp_parse_fixed_port_resource(dev, option, size); + isapnp_parse_fixed_port_resource(dev, option_flags, + size); size = 0; break; case _STAG_VENDOR: @@ -705,7 +675,7 @@ static int __init isapnp_create_device(struct pnp_card *card, case _LTAG_MEMRANGE: if (size != 9) goto __skip; - isapnp_parse_mem_resource(dev, option, size); + isapnp_parse_mem_resource(dev, option_flags, size); size = 0; break; case _LTAG_ANSISTR: @@ -720,13 +690,14 @@ static int __init isapnp_create_device(struct pnp_card *card, case _LTAG_MEM32RANGE: if (size != 17) goto __skip; - isapnp_parse_mem32_resource(dev, option, size); + isapnp_parse_mem32_resource(dev, option_flags, size); size = 0; break; case _LTAG_FIXEDMEM32RANGE: if (size != 9) goto __skip; - isapnp_parse_fixed_mem32_resource(dev, option, size); + isapnp_parse_fixed_mem32_resource(dev, option_flags, + size); size = 0; break; case _STAG_END: @@ -928,7 +899,6 @@ EXPORT_SYMBOL(isapnp_write_byte); static int isapnp_get_resources(struct pnp_dev *dev) { - struct pnp_resource *pnp_res; int i, ret; dev_dbg(&dev->dev, "get resources\n"); @@ -940,35 +910,23 @@ static int isapnp_get_resources(struct pnp_dev *dev) for (i = 0; i < ISAPNP_MAX_PORT; i++) { ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1)); - if (ret) { - pnp_res = pnp_add_io_resource(dev, ret, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_io_resource(dev, ret, ret, + ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_MEM; i++) { ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8; - if (ret) { - pnp_res = pnp_add_mem_resource(dev, ret, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_mem_resource(dev, ret, ret, + ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_IRQ; i++) { ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8; - if (ret) { - pnp_res = pnp_add_irq_resource(dev, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_irq_resource(dev, ret, + ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_DMA; i++) { ret = isapnp_read_byte(ISAPNP_CFG_DMA + i); - if (ret != 4) { - pnp_res = pnp_add_dma_resource(dev, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_dma_resource(dev, ret, + ret == 4 ? IORESOURCE_DISABLED : 0); } __end: @@ -978,62 +936,45 @@ __end: static int isapnp_set_resources(struct pnp_dev *dev) { - struct pnp_resource *pnp_res; struct resource *res; - int tmp, index; + int tmp; dev_dbg(&dev->dev, "set resources\n"); isapnp_cfg_begin(dev->card->number, dev->number); dev->active = 1; for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { - index = pnp_res->index; + res = pnp_get_resource(dev, IORESOURCE_IO, tmp); + if (pnp_resource_enabled(res)) { dev_dbg(&dev->dev, " set io %d to %#llx\n", - index, (unsigned long long) res->start); - isapnp_write_word(ISAPNP_CFG_PORT + (index << 1), + tmp, (unsigned long long) res->start); + isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1), res->start); } } for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { + res = pnp_get_resource(dev, IORESOURCE_IRQ, tmp); + if (pnp_resource_enabled(res)) { int irq = res->start; if (irq == 2) irq = 9; - index = pnp_res->index; - dev_dbg(&dev->dev, " set irq %d to %d\n", index, irq); - isapnp_write_byte(ISAPNP_CFG_IRQ + (index << 1), irq); + dev_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq); + isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq); } } for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { - index = pnp_res->index; + res = pnp_get_resource(dev, IORESOURCE_DMA, tmp); + if (pnp_resource_enabled(res)) { dev_dbg(&dev->dev, " set dma %d to %lld\n", - index, (unsigned long long) res->start); - isapnp_write_byte(ISAPNP_CFG_DMA + index, res->start); + tmp, (unsigned long long) res->start); + isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start); } } for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { - index = pnp_res->index; + res = pnp_get_resource(dev, IORESOURCE_MEM, tmp); + if (pnp_resource_enabled(res)) { dev_dbg(&dev->dev, " set mem %d to %#llx\n", - index, (unsigned long long) res->start); - isapnp_write_word(ISAPNP_CFG_MEM + (index << 3), + tmp, (unsigned long long) res->start); + isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3), (res->start >> 8) & 0xffff); } } diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c index bea0914ff94..b526eaad3f6 100644 --- a/drivers/pnp/manager.c +++ b/drivers/pnp/manager.c @@ -3,6 +3,8 @@ * * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2003 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/errno.h> @@ -19,82 +21,64 @@ DEFINE_MUTEX(pnp_res_mutex); static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; - - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many I/O port resources\n"); - /* pretend we were successful so at least the manager won't try again */ - return 1; - } - - res = &pnp_res->res; + struct resource *res, local_res; - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_IO, idx); + if (res) { dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx " "flags %#lx\n", idx, (unsigned long long) res->start, (unsigned long long) res->end, res->flags); - return 1; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_IO; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = 0; + res->end = 0; if (!rule->size) { res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " io %d disabled\n", idx); - return 1; /* skip disabled resource requests */ + goto __add; } res->start = rule->min; res->end = res->start + rule->size - 1; - /* run through until pnp_check_port is happy */ while (!pnp_check_port(dev, res)) { res->start += rule->align; res->end = res->start + rule->size - 1; if (res->start > rule->max || !rule->align) { - dev_dbg(&dev->dev, " couldn't assign io %d\n", idx); - return 0; + dev_dbg(&dev->dev, " couldn't assign io %d " + "(min %#llx max %#llx)\n", idx, + (unsigned long long) rule->min, + (unsigned long long) rule->max); + return -EBUSY; } } - dev_dbg(&dev->dev, " assign io %d %#llx-%#llx\n", idx, - (unsigned long long) res->start, (unsigned long long) res->end); - return 1; + +__add: + pnp_add_io_resource(dev, res->start, res->end, res->flags); + return 0; } static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; - - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many memory resources\n"); - /* pretend we were successful so at least the manager won't try again */ - return 1; - } + struct resource *res, local_res; - res = &pnp_res->res; - - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_MEM, idx); + if (res) { dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx " "flags %#lx\n", idx, (unsigned long long) res->start, (unsigned long long) res->end, res->flags); - return 1; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_MEM; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = 0; + res->end = 0; - /* convert pnp flags to standard Linux flags */ if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) res->flags |= IORESOURCE_READONLY; if (rule->flags & IORESOURCE_MEM_CACHEABLE) @@ -107,30 +91,32 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) if (!rule->size) { res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " mem %d disabled\n", idx); - return 1; /* skip disabled resource requests */ + goto __add; } res->start = rule->min; res->end = res->start + rule->size - 1; - /* run through until pnp_check_mem is happy */ while (!pnp_check_mem(dev, res)) { res->start += rule->align; res->end = res->start + rule->size - 1; if (res->start > rule->max || !rule->align) { - dev_dbg(&dev->dev, " couldn't assign mem %d\n", idx); - return 0; + dev_dbg(&dev->dev, " couldn't assign mem %d " + "(min %#llx max %#llx)\n", idx, + (unsigned long long) rule->min, + (unsigned long long) rule->max); + return -EBUSY; } } - dev_dbg(&dev->dev, " assign mem %d %#llx-%#llx\n", idx, - (unsigned long long) res->start, (unsigned long long) res->end); - return 1; + +__add: + pnp_add_mem_resource(dev, res->start, res->end, res->flags); + return 0; } static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; + struct resource *res, local_res; int i; /* IRQ priority: this table is good for i386 */ @@ -138,59 +124,57 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2 }; - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many IRQ resources\n"); - /* pretend we were successful so at least the manager won't try again */ - return 1; - } - - res = &pnp_res->res; - - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_IRQ, idx); + if (res) { dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n", idx, (int) res->start, res->flags); - return 1; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_IRQ; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = -1; + res->end = -1; - if (bitmap_empty(rule->map, PNP_IRQ_NR)) { + if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) { res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " irq %d disabled\n", idx); - return 1; /* skip disabled resource requests */ + goto __add; } /* TBD: need check for >16 IRQ */ - res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16); + res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16); if (res->start < PNP_IRQ_NR) { res->end = res->start; - dev_dbg(&dev->dev, " assign irq %d %d\n", idx, - (int) res->start); - return 1; + goto __add; } for (i = 0; i < 16; i++) { - if (test_bit(xtab[i], rule->map)) { + if (test_bit(xtab[i], rule->map.bits)) { res->start = res->end = xtab[i]; - if (pnp_check_irq(dev, res)) { - dev_dbg(&dev->dev, " assign irq %d %d\n", idx, - (int) res->start); - return 1; - } + if (pnp_check_irq(dev, res)) + goto __add; } } + + if (rule->flags & IORESOURCE_IRQ_OPTIONAL) { + res->start = -1; + res->end = -1; + res->flags |= IORESOURCE_DISABLED; + dev_dbg(&dev->dev, " irq %d disabled (optional)\n", idx); + goto __add; + } + dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx); + return -EBUSY; + +__add: + pnp_add_irq_resource(dev, res->start, res->flags); return 0; } -static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) +static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; + struct resource *res, local_res; int i; /* DMA priority: this table is good for i386 */ @@ -198,231 +182,99 @@ static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) 1, 3, 5, 6, 7, 0, 2, 4 }; - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many DMA resources\n"); - return; - } - - res = &pnp_res->res; - - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_DMA, idx); + if (res) { dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n", idx, (int) res->start, res->flags); - return; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_DMA; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = -1; + res->end = -1; for (i = 0; i < 8; i++) { if (rule->map & (1 << xtab[i])) { res->start = res->end = xtab[i]; - if (pnp_check_dma(dev, res)) { - dev_dbg(&dev->dev, " assign dma %d %d\n", idx, - (int) res->start); - return; - } + if (pnp_check_dma(dev, res)) + goto __add; } } #ifdef MAX_DMA_CHANNELS res->start = res->end = MAX_DMA_CHANNELS; #endif - res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; + res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " disable dma %d\n", idx); -} - -void pnp_init_resource(struct resource *res) -{ - unsigned long type; - - type = res->flags & (IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_IRQ | IORESOURCE_DMA); - res->name = NULL; - res->flags = type | IORESOURCE_AUTO | IORESOURCE_UNSET; - if (type == IORESOURCE_IRQ || type == IORESOURCE_DMA) { - res->start = -1; - res->end = -1; - } else { - res->start = 0; - res->end = 0; - } +__add: + pnp_add_dma_resource(dev, res->start, res->flags); + return 0; } -/** - * pnp_init_resources - Resets a resource table to default values. - * @table: pointer to the desired resource table - */ void pnp_init_resources(struct pnp_dev *dev) { - struct resource *res; - int idx; - - for (idx = 0; idx < PNP_MAX_IRQ; idx++) { - res = &dev->res->irq[idx].res; - res->flags = IORESOURCE_IRQ; - pnp_init_resource(res); - } - for (idx = 0; idx < PNP_MAX_DMA; idx++) { - res = &dev->res->dma[idx].res; - res->flags = IORESOURCE_DMA; - pnp_init_resource(res); - } - for (idx = 0; idx < PNP_MAX_PORT; idx++) { - res = &dev->res->port[idx].res; - res->flags = IORESOURCE_IO; - pnp_init_resource(res); - } - for (idx = 0; idx < PNP_MAX_MEM; idx++) { - res = &dev->res->mem[idx].res; - res->flags = IORESOURCE_MEM; - pnp_init_resource(res); - } + pnp_free_resources(dev); } -/** - * pnp_clean_resources - clears resources that were not manually set - * @res: the resources to clean - */ static void pnp_clean_resource_table(struct pnp_dev *dev) { - struct resource *res; - int idx; - - for (idx = 0; idx < PNP_MAX_IRQ; idx++) { - res = &dev->res->irq[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_IRQ; - pnp_init_resource(res); - } - } - for (idx = 0; idx < PNP_MAX_DMA; idx++) { - res = &dev->res->dma[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_DMA; - pnp_init_resource(res); - } - } - for (idx = 0; idx < PNP_MAX_PORT; idx++) { - res = &dev->res->port[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_IO; - pnp_init_resource(res); - } - } - for (idx = 0; idx < PNP_MAX_MEM; idx++) { - res = &dev->res->mem[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_MEM; - pnp_init_resource(res); - } + struct pnp_resource *pnp_res, *tmp; + + list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) { + if (pnp_res->res.flags & IORESOURCE_AUTO) + pnp_free_resource(pnp_res); } } /** * pnp_assign_resources - assigns resources to the device based on the specified dependent number * @dev: pointer to the desired device - * @depnum: the dependent function number - * - * Only set depnum to 0 if the device does not have dependent options. + * @set: the dependent function number */ -static int pnp_assign_resources(struct pnp_dev *dev, int depnum) +static int pnp_assign_resources(struct pnp_dev *dev, int set) { - struct pnp_port *port; - struct pnp_mem *mem; - struct pnp_irq *irq; - struct pnp_dma *dma; + struct pnp_option *option; int nport = 0, nmem = 0, nirq = 0, ndma = 0; + int ret = 0; - if (!pnp_can_configure(dev)) - return -ENODEV; - - dbg_pnp_show_resources(dev, "before pnp_assign_resources"); + dev_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set); mutex_lock(&pnp_res_mutex); pnp_clean_resource_table(dev); - if (dev->independent) { - dev_dbg(&dev->dev, "assigning independent options\n"); - port = dev->independent->port; - mem = dev->independent->mem; - irq = dev->independent->irq; - dma = dev->independent->dma; - while (port) { - if (!pnp_assign_port(dev, port, nport)) - goto fail; - nport++; - port = port->next; - } - while (mem) { - if (!pnp_assign_mem(dev, mem, nmem)) - goto fail; - nmem++; - mem = mem->next; - } - while (irq) { - if (!pnp_assign_irq(dev, irq, nirq)) - goto fail; - nirq++; - irq = irq->next; - } - while (dma) { - pnp_assign_dma(dev, dma, ndma); - ndma++; - dma = dma->next; - } - } - if (depnum) { - struct pnp_option *dep; - int i; - - dev_dbg(&dev->dev, "assigning dependent option %d\n", depnum); - for (i = 1, dep = dev->dependent; i < depnum; - i++, dep = dep->next) - if (!dep) - goto fail; - port = dep->port; - mem = dep->mem; - irq = dep->irq; - dma = dep->dma; - while (port) { - if (!pnp_assign_port(dev, port, nport)) - goto fail; - nport++; - port = port->next; - } - while (mem) { - if (!pnp_assign_mem(dev, mem, nmem)) - goto fail; - nmem++; - mem = mem->next; - } - while (irq) { - if (!pnp_assign_irq(dev, irq, nirq)) - goto fail; - nirq++; - irq = irq->next; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option) && + pnp_option_set(option) != set) + continue; + + switch (option->type) { + case IORESOURCE_IO: + ret = pnp_assign_port(dev, &option->u.port, nport++); + break; + case IORESOURCE_MEM: + ret = pnp_assign_mem(dev, &option->u.mem, nmem++); + break; + case IORESOURCE_IRQ: + ret = pnp_assign_irq(dev, &option->u.irq, nirq++); + break; + case IORESOURCE_DMA: + ret = pnp_assign_dma(dev, &option->u.dma, ndma++); + break; + default: + ret = -EINVAL; + break; } - while (dma) { - pnp_assign_dma(dev, dma, ndma); - ndma++; - dma = dma->next; - } - } else if (dev->dependent) - goto fail; - - mutex_unlock(&pnp_res_mutex); - dbg_pnp_show_resources(dev, "after pnp_assign_resources"); - return 1; + if (ret < 0) + break; + } -fail: - pnp_clean_resource_table(dev); mutex_unlock(&pnp_res_mutex); - dbg_pnp_show_resources(dev, "after pnp_assign_resources (failed)"); - return 0; + if (ret < 0) { + dev_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret); + pnp_clean_resource_table(dev); + } else + dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded"); + return ret; } /** @@ -431,29 +283,25 @@ fail: */ int pnp_auto_config_dev(struct pnp_dev *dev) { - struct pnp_option *dep; - int i = 1; + int i, ret; if (!pnp_can_configure(dev)) { dev_dbg(&dev->dev, "configuration not supported\n"); return -ENODEV; } - if (!dev->dependent) { - if (pnp_assign_resources(dev, 0)) + ret = pnp_assign_resources(dev, 0); + if (ret == 0) + return 0; + + for (i = 1; i < dev->num_dependent_sets; i++) { + ret = pnp_assign_resources(dev, i); + if (ret == 0) return 0; - } else { - dep = dev->dependent; - do { - if (pnp_assign_resources(dev, i)) - return 0; - dep = dep->next; - i++; - } while (dep); } dev_err(&dev->dev, "unable to assign resources\n"); - return -EBUSY; + return ret; } /** diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 50902773bea..c1b9ea34977 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -117,9 +117,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) { int power_state; - power_state = acpi_pm_device_sleep_state(&dev->dev, - device_may_wakeup(&dev->dev), - NULL); + power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); if (power_state < 0) power_state = (state.event == PM_EVENT_ON) ? ACPI_STATE_D0 : ACPI_STATE_D3; diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 46c791adb89..d7e9f2152df 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -3,6 +3,8 @@ * * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr> * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -98,8 +100,10 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, int irq, flags; int p, t; - if (!valid_IRQ(gsi)) + if (!valid_IRQ(gsi)) { + pnp_add_irq_resource(dev, gsi, IORESOURCE_DISABLED); return; + } /* * in IO-APIC mode, use overrided attribute. Two reasons: @@ -178,13 +182,68 @@ static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start, u64 end = start + len - 1; if (io_decode == ACPI_DECODE_16) - flags |= PNP_PORT_FLAG_16BITADDR; + flags |= IORESOURCE_IO_16BIT_ADDR; if (len == 0 || end >= 0x10003) flags |= IORESOURCE_DISABLED; pnp_add_io_resource(dev, start, end, flags); } +/* + * Device CSRs that do not appear in PCI config space should be described + * via ACPI. This would normally be done with Address Space Descriptors + * marked as "consumer-only," but old versions of Windows and Linux ignore + * the producer/consumer flag, so HP invented a vendor-defined resource to + * describe the location and size of CSR space. + */ +static struct acpi_vendor_uuid hp_ccsr_uuid = { + .subtype = 2, + .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a, + 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad }, +}; + +static int vendor_resource_matches(struct pnp_dev *dev, + struct acpi_resource_vendor_typed *vendor, + struct acpi_vendor_uuid *match, + int expected_len) +{ + int uuid_len = sizeof(vendor->uuid); + u8 uuid_subtype = vendor->uuid_subtype; + u8 *uuid = vendor->uuid; + int actual_len; + + /* byte_length includes uuid_subtype and uuid */ + actual_len = vendor->byte_length - uuid_len - 1; + + if (uuid_subtype == match->subtype && + uuid_len == sizeof(match->data) && + memcmp(uuid, match->data, uuid_len) == 0) { + if (expected_len && expected_len != actual_len) { + dev_err(&dev->dev, "wrong vendor descriptor size; " + "expected %d, found %d bytes\n", + expected_len, actual_len); + return 0; + } + + return 1; + } + + return 0; +} + +static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev, + struct acpi_resource_vendor_typed *vendor) +{ + if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) { + u64 start, length; + + memcpy(&start, vendor->byte_data, sizeof(start)); + memcpy(&length, vendor->byte_data + 8, sizeof(length)); + + pnp_add_mem_resource(dev, start, start + length - 1, 0); + } +} + static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev, u64 start, u64 len, int write_protect) @@ -235,6 +294,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, struct acpi_resource_dma *dma; struct acpi_resource_io *io; struct acpi_resource_fixed_io *fixed_io; + struct acpi_resource_vendor_typed *vendor_typed; struct acpi_resource_memory24 *memory24; struct acpi_resource_memory32 *memory32; struct acpi_resource_fixed_memory32 *fixed_memory32; @@ -248,24 +308,39 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, * _CRS, but some firmware violates this, so parse them all. */ irq = &res->data.irq; - for (i = 0; i < irq->interrupt_count; i++) { - pnpacpi_parse_allocated_irqresource(dev, - irq->interrupts[i], - irq->triggering, - irq->polarity, - irq->sharable); + if (irq->interrupt_count == 0) + pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); + else { + for (i = 0; i < irq->interrupt_count; i++) { + pnpacpi_parse_allocated_irqresource(dev, + irq->interrupts[i], + irq->triggering, + irq->polarity, + irq->sharable); + } + + /* + * The IRQ encoder puts a single interrupt in each + * descriptor, so if a _CRS descriptor has more than + * one interrupt, we won't be able to re-encode it. + */ + if (pnp_can_write(dev) && irq->interrupt_count > 1) { + dev_warn(&dev->dev, "multiple interrupts in " + "_CRS descriptor; configuration can't " + "be changed\n"); + dev->capabilities &= ~PNP_WRITE; + } } break; case ACPI_RESOURCE_TYPE_DMA: dma = &res->data.dma; - if (dma->channel_count > 0) { + if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) flags = dma_flags(dma->type, dma->bus_master, dma->transfer); - if (dma->channels[0] == (u8) -1) - flags |= IORESOURCE_DISABLED; - pnp_add_dma_resource(dev, dma->channels[0], flags); - } + else + flags = IORESOURCE_DISABLED; + pnp_add_dma_resource(dev, dma->channels[0], flags); break; case ACPI_RESOURCE_TYPE_IO: @@ -289,6 +364,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, break; case ACPI_RESOURCE_TYPE_VENDOR: + vendor_typed = &res->data.vendor_typed; + pnpacpi_parse_allocated_vendor(dev, vendor_typed); break; case ACPI_RESOURCE_TYPE_END_TAG: @@ -331,12 +408,29 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, if (extended_irq->producer_consumer == ACPI_PRODUCER) return AE_OK; - for (i = 0; i < extended_irq->interrupt_count; i++) { - pnpacpi_parse_allocated_irqresource(dev, - extended_irq->interrupts[i], - extended_irq->triggering, - extended_irq->polarity, - extended_irq->sharable); + if (extended_irq->interrupt_count == 0) + pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); + else { + for (i = 0; i < extended_irq->interrupt_count; i++) { + pnpacpi_parse_allocated_irqresource(dev, + extended_irq->interrupts[i], + extended_irq->triggering, + extended_irq->polarity, + extended_irq->sharable); + } + + /* + * The IRQ encoder puts a single interrupt in each + * descriptor, so if a _CRS descriptor has more than + * one interrupt, we won't be able to re-encode it. + */ + if (pnp_can_write(dev) && + extended_irq->interrupt_count > 1) { + dev_warn(&dev->dev, "multiple interrupts in " + "_CRS descriptor; configuration can't " + "be changed\n"); + dev->capabilities &= ~PNP_WRITE; + } } break; @@ -373,179 +467,147 @@ int pnpacpi_parse_allocated_resource(struct pnp_dev *dev) } static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_dma *p) { int i; - struct pnp_dma *dma; + unsigned char map = 0, flags; if (p->channel_count == 0) return; - dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL); - if (!dma) - return; for (i = 0; i < p->channel_count; i++) - dma->map |= 1 << p->channels[i]; - - dma->flags = dma_flags(p->type, p->bus_master, p->transfer); + map |= 1 << p->channels[i]; - pnp_register_dma_resource(dev, option, dma); + flags = dma_flags(p->type, p->bus_master, p->transfer); + pnp_register_dma_resource(dev, option_flags, map, flags); } static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_irq *p) { int i; - struct pnp_irq *irq; + pnp_irq_mask_t map; + unsigned char flags; if (p->interrupt_count == 0) return; - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; + bitmap_zero(map.bits, PNP_IRQ_NR); for (i = 0; i < p->interrupt_count; i++) if (p->interrupts[i]) - __set_bit(p->interrupts[i], irq->map); - irq->flags = irq_flags(p->triggering, p->polarity, p->sharable); + __set_bit(p->interrupts[i], map.bits); - pnp_register_irq_resource(dev, option, irq); + flags = irq_flags(p->triggering, p->polarity, p->sharable); + pnp_register_irq_resource(dev, option_flags, &map, flags); } static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_extended_irq *p) { int i; - struct pnp_irq *irq; + pnp_irq_mask_t map; + unsigned char flags; if (p->interrupt_count == 0) return; - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; - for (i = 0; i < p->interrupt_count; i++) - if (p->interrupts[i]) - __set_bit(p->interrupts[i], irq->map); - irq->flags = irq_flags(p->triggering, p->polarity, p->sharable); + bitmap_zero(map.bits, PNP_IRQ_NR); + for (i = 0; i < p->interrupt_count; i++) { + if (p->interrupts[i]) { + if (p->interrupts[i] < PNP_IRQ_NR) + __set_bit(p->interrupts[i], map.bits); + else + dev_err(&dev->dev, "ignoring IRQ %d option " + "(too large for %d entry bitmap)\n", + p->interrupts[i], PNP_IRQ_NR); + } + } - pnp_register_irq_resource(dev, option, irq); + flags = irq_flags(p->triggering, p->polarity, p->sharable); + pnp_register_irq_resource(dev, option_flags, &map, flags); } static __init void pnpacpi_parse_port_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_io *io) { - struct pnp_port *port; + unsigned char flags = 0; if (io->address_length == 0) return; - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = io->minimum; - port->max = io->maximum; - port->align = io->alignment; - port->size = io->address_length; - port->flags = ACPI_DECODE_16 == io->io_decode ? - PNP_PORT_FLAG_16BITADDR : 0; - pnp_register_port_resource(dev, option, port); + + if (io->io_decode == ACPI_DECODE_16) + flags = IORESOURCE_IO_16BIT_ADDR; + pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, + io->alignment, io->address_length, flags); } static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_fixed_io *io) { - struct pnp_port *port; - if (io->address_length == 0) return; - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = io->address; - port->size = io->address_length; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); + + pnp_register_port_resource(dev, option_flags, io->address, io->address, + 0, io->address_length, IORESOURCE_IO_FIXED); } static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_memory24 *p) { - struct pnp_mem *mem; + unsigned char flags = 0; if (p->address_length == 0) return; - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = p->minimum; - mem->max = p->maximum; - mem->align = p->alignment; - mem->size = p->address_length; - - mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? - IORESOURCE_MEM_WRITEABLE : 0; - pnp_register_mem_resource(dev, option, mem); + if (p->write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, + p->alignment, p->address_length, flags); } static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_memory32 *p) { - struct pnp_mem *mem; + unsigned char flags = 0; if (p->address_length == 0) return; - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = p->minimum; - mem->max = p->maximum; - mem->align = p->alignment; - mem->size = p->address_length; - - mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? - IORESOURCE_MEM_WRITEABLE : 0; - pnp_register_mem_resource(dev, option, mem); + if (p->write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, + p->alignment, p->address_length, flags); } static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_fixed_memory32 *p) { - struct pnp_mem *mem; + unsigned char flags = 0; if (p->address_length == 0) return; - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = p->address; - mem->size = p->address_length; - mem->align = 0; - - mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? - IORESOURCE_MEM_WRITEABLE : 0; - pnp_register_mem_resource(dev, option, mem); + if (p->write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->address, p->address, + 0, p->address_length, flags); } static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource *r) { struct acpi_resource_address64 addr, *p = &addr; acpi_status status; - struct pnp_mem *mem; - struct pnp_port *port; + unsigned char flags = 0; status = acpi_resource_to_address64(r, p); if (!ACPI_SUCCESS(status)) { @@ -558,49 +620,37 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, return; if (p->resource_type == ACPI_MEMORY_RANGE) { - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = p->minimum; - mem->size = p->address_length; - mem->align = 0; - mem->flags = (p->info.mem.write_protect == - ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE - : 0; - pnp_register_mem_resource(dev, option, mem); - } else if (p->resource_type == ACPI_IO_RANGE) { - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = p->minimum; - port->size = p->address_length; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); - } + if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->minimum, + p->minimum, 0, p->address_length, + flags); + } else if (p->resource_type == ACPI_IO_RANGE) + pnp_register_port_resource(dev, option_flags, p->minimum, + p->minimum, 0, p->address_length, + IORESOURCE_IO_FIXED); } struct acpipnp_parse_option_s { - struct pnp_option *option; - struct pnp_option *option_independent; struct pnp_dev *dev; + unsigned int option_flags; }; static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, void *data) { - int priority = 0; + int priority; struct acpipnp_parse_option_s *parse_data = data; struct pnp_dev *dev = parse_data->dev; - struct pnp_option *option = parse_data->option; + unsigned int option_flags = parse_data->option_flags; switch (res->type) { case ACPI_RESOURCE_TYPE_IRQ: - pnpacpi_parse_irq_option(dev, option, &res->data.irq); + pnpacpi_parse_irq_option(dev, option_flags, &res->data.irq); break; case ACPI_RESOURCE_TYPE_DMA: - pnpacpi_parse_dma_option(dev, option, &res->data.dma); + pnpacpi_parse_dma_option(dev, option_flags, &res->data.dma); break; case ACPI_RESOURCE_TYPE_START_DEPENDENT: @@ -620,31 +670,19 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, priority = PNP_RES_PRIORITY_INVALID; break; } - /* TBD: Consider performance/robustness bits */ - option = pnp_register_dependent_option(dev, priority); - if (!option) - return AE_ERROR; - parse_data->option = option; + parse_data->option_flags = pnp_new_dependent_set(dev, priority); break; case ACPI_RESOURCE_TYPE_END_DEPENDENT: - /*only one EndDependentFn is allowed */ - if (!parse_data->option_independent) { - dev_warn(&dev->dev, "more than one EndDependentFn " - "in _PRS\n"); - return AE_ERROR; - } - parse_data->option = parse_data->option_independent; - parse_data->option_independent = NULL; - dev_dbg(&dev->dev, "end dependent options\n"); + parse_data->option_flags = 0; break; case ACPI_RESOURCE_TYPE_IO: - pnpacpi_parse_port_option(dev, option, &res->data.io); + pnpacpi_parse_port_option(dev, option_flags, &res->data.io); break; case ACPI_RESOURCE_TYPE_FIXED_IO: - pnpacpi_parse_fixed_port_option(dev, option, + pnpacpi_parse_fixed_port_option(dev, option_flags, &res->data.fixed_io); break; @@ -653,29 +691,31 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, break; case ACPI_RESOURCE_TYPE_MEMORY24: - pnpacpi_parse_mem24_option(dev, option, &res->data.memory24); + pnpacpi_parse_mem24_option(dev, option_flags, + &res->data.memory24); break; case ACPI_RESOURCE_TYPE_MEMORY32: - pnpacpi_parse_mem32_option(dev, option, &res->data.memory32); + pnpacpi_parse_mem32_option(dev, option_flags, + &res->data.memory32); break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: - pnpacpi_parse_fixed_mem32_option(dev, option, + pnpacpi_parse_fixed_mem32_option(dev, option_flags, &res->data.fixed_memory32); break; case ACPI_RESOURCE_TYPE_ADDRESS16: case ACPI_RESOURCE_TYPE_ADDRESS32: case ACPI_RESOURCE_TYPE_ADDRESS64: - pnpacpi_parse_address_option(dev, option, res); + pnpacpi_parse_address_option(dev, option_flags, res); break; case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: - pnpacpi_parse_ext_irq_option(dev, option, + pnpacpi_parse_ext_irq_option(dev, option_flags, &res->data.extended_irq); break; @@ -699,12 +739,9 @@ int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev) dev_dbg(&dev->dev, "parse resource options\n"); - parse_data.option = pnp_register_independent_option(dev); - if (!parse_data.option) - return -ENOMEM; - - parse_data.option_independent = parse_data.option; parse_data.dev = dev; + parse_data.option_flags = 0; + status = acpi_walk_resources(handle, METHOD_NAME__PRS, pnpacpi_option_resource, &parse_data); @@ -806,6 +843,13 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev, struct acpi_resource_irq *irq = &resource->data.irq; int triggering, polarity, shareable; + if (!pnp_resource_enabled(p)) { + irq->interrupt_count = 0; + dev_dbg(&dev->dev, " encode irq (%s)\n", + p ? "disabled" : "missing"); + return; + } + decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); irq->triggering = triggering; irq->polarity = polarity; @@ -828,6 +872,13 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev, struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq; int triggering, polarity, shareable; + if (!pnp_resource_enabled(p)) { + extended_irq->interrupt_count = 0; + dev_dbg(&dev->dev, " encode extended irq (%s)\n", + p ? "disabled" : "missing"); + return; + } + decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); extended_irq->producer_consumer = ACPI_CONSUMER; extended_irq->triggering = triggering; @@ -848,6 +899,13 @@ static void pnpacpi_encode_dma(struct pnp_dev *dev, { struct acpi_resource_dma *dma = &resource->data.dma; + if (!pnp_resource_enabled(p)) { + dma->channel_count = 0; + dev_dbg(&dev->dev, " encode dma (%s)\n", + p ? "disabled" : "missing"); + return; + } + /* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */ switch (p->flags & IORESOURCE_DMA_SPEED_MASK) { case IORESOURCE_DMA_TYPEA: @@ -889,17 +947,21 @@ static void pnpacpi_encode_io(struct pnp_dev *dev, { struct acpi_resource_io *io = &resource->data.io; - /* Note: pnp_assign_port will copy pnp_port->flags into p->flags */ - io->io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ? - ACPI_DECODE_16 : ACPI_DECODE_10; - io->minimum = p->start; - io->maximum = p->end; - io->alignment = 0; /* Correct? */ - io->address_length = p->end - p->start + 1; - - dev_dbg(&dev->dev, " encode io %#llx-%#llx decode %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, - io->io_decode); + if (pnp_resource_enabled(p)) { + /* Note: pnp_assign_port copies pnp_port->flags into p->flags */ + io->io_decode = (p->flags & IORESOURCE_IO_16BIT_ADDR) ? + ACPI_DECODE_16 : ACPI_DECODE_10; + io->minimum = p->start; + io->maximum = p->end; + io->alignment = 0; /* Correct? */ + io->address_length = p->end - p->start + 1; + } else { + io->minimum = 0; + io->address_length = 0; + } + + dev_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum, + io->minimum + io->address_length - 1, io->io_decode); } static void pnpacpi_encode_fixed_io(struct pnp_dev *dev, @@ -908,11 +970,16 @@ static void pnpacpi_encode_fixed_io(struct pnp_dev *dev, { struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io; - fixed_io->address = p->start; - fixed_io->address_length = p->end - p->start + 1; + if (pnp_resource_enabled(p)) { + fixed_io->address = p->start; + fixed_io->address_length = p->end - p->start + 1; + } else { + fixed_io->address = 0; + fixed_io->address_length = 0; + } - dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n", - (unsigned long long) p->start, (unsigned long long) p->end); + dev_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address, + fixed_io->address + fixed_io->address_length - 1); } static void pnpacpi_encode_mem24(struct pnp_dev *dev, @@ -921,17 +988,22 @@ static void pnpacpi_encode_mem24(struct pnp_dev *dev, { struct acpi_resource_memory24 *memory24 = &resource->data.memory24; - /* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */ - memory24->write_protect = - (p->flags & IORESOURCE_MEM_WRITEABLE) ? - ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; - memory24->minimum = p->start; - memory24->maximum = p->end; - memory24->alignment = 0; - memory24->address_length = p->end - p->start + 1; - - dev_dbg(&dev->dev, " encode mem24 %#llx-%#llx write_protect %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, + if (pnp_resource_enabled(p)) { + /* Note: pnp_assign_mem copies pnp_mem->flags into p->flags */ + memory24->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ? + ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; + memory24->minimum = p->start; + memory24->maximum = p->end; + memory24->alignment = 0; + memory24->address_length = p->end - p->start + 1; + } else { + memory24->minimum = 0; + memory24->address_length = 0; + } + + dev_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n", + memory24->minimum, + memory24->minimum + memory24->address_length - 1, memory24->write_protect); } @@ -941,16 +1013,21 @@ static void pnpacpi_encode_mem32(struct pnp_dev *dev, { struct acpi_resource_memory32 *memory32 = &resource->data.memory32; - memory32->write_protect = - (p->flags & IORESOURCE_MEM_WRITEABLE) ? - ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; - memory32->minimum = p->start; - memory32->maximum = p->end; - memory32->alignment = 0; - memory32->address_length = p->end - p->start + 1; + if (pnp_resource_enabled(p)) { + memory32->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ? + ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; + memory32->minimum = p->start; + memory32->maximum = p->end; + memory32->alignment = 0; + memory32->address_length = p->end - p->start + 1; + } else { + memory32->minimum = 0; + memory32->alignment = 0; + } - dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx write_protect %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, + dev_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n", + memory32->minimum, + memory32->minimum + memory32->address_length - 1, memory32->write_protect); } @@ -960,15 +1037,20 @@ static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev, { struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32; - fixed_memory32->write_protect = - (p->flags & IORESOURCE_MEM_WRITEABLE) ? - ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; - fixed_memory32->address = p->start; - fixed_memory32->address_length = p->end - p->start + 1; + if (pnp_resource_enabled(p)) { + fixed_memory32->write_protect = + p->flags & IORESOURCE_MEM_WRITEABLE ? + ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; + fixed_memory32->address = p->start; + fixed_memory32->address_length = p->end - p->start + 1; + } else { + fixed_memory32->address = 0; + fixed_memory32->address_length = 0; + } - dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx " - "write_protect %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, + dev_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n", + fixed_memory32->address, + fixed_memory32->address + fixed_memory32->address_length - 1, fixed_memory32->write_protect); } diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c index 5ff9a4c0447..ca567671379 100644 --- a/drivers/pnp/pnpbios/rsparser.c +++ b/drivers/pnp/pnpbios/rsparser.c @@ -216,137 +216,116 @@ len_err: static __init void pnpbios_parse_mem_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_mem *mem; - - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = ((p[5] << 8) | p[4]) << 8; - mem->max = ((p[7] << 8) | p[6]) << 8; - mem->align = (p[9] << 8) | p[8]; - mem->size = ((p[11] << 8) | p[10]) << 8; - mem->flags = p[3]; - pnp_register_mem_resource(dev, option, mem); + resource_size_t min, max, align, len; + unsigned char flags; + + min = ((p[5] << 8) | p[4]) << 8; + max = ((p[7] << 8) | p[6]) << 8; + align = (p[9] << 8) | p[8]; + len = ((p[11] << 8) | p[10]) << 8; + flags = p[3]; + pnp_register_mem_resource(dev, option_flags, min, max, align, len, + flags); } static __init void pnpbios_parse_mem32_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_mem *mem; - - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; - mem->max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; - mem->align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12]; - mem->size = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16]; - mem->flags = p[3]; - pnp_register_mem_resource(dev, option, mem); + resource_size_t min, max, align, len; + unsigned char flags; + + min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; + max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; + align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12]; + len = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16]; + flags = p[3]; + pnp_register_mem_resource(dev, option_flags, min, max, align, len, + flags); } static __init void pnpbios_parse_fixed_mem32_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_mem *mem; - - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; - mem->size = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; - mem->align = 0; - mem->flags = p[3]; - pnp_register_mem_resource(dev, option, mem); + resource_size_t base, len; + unsigned char flags; + + base = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; + len = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; + flags = p[3]; + pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags); } static __init void pnpbios_parse_irq_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_irq *irq; unsigned long bits; + pnp_irq_mask_t map; + unsigned char flags = IORESOURCE_IRQ_HIGHEDGE; - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; bits = (p[2] << 8) | p[1]; - bitmap_copy(irq->map, &bits, 16); + + bitmap_zero(map.bits, PNP_IRQ_NR); + bitmap_copy(map.bits, &bits, 16); + if (size > 2) - irq->flags = p[3]; - else - irq->flags = IORESOURCE_IRQ_HIGHEDGE; - pnp_register_irq_resource(dev, option, irq); + flags = p[3]; + + pnp_register_irq_resource(dev, option_flags, &map, flags); } static __init void pnpbios_parse_dma_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_dma *dma; - - dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL); - if (!dma) - return; - dma->map = p[1]; - dma->flags = p[2]; - pnp_register_dma_resource(dev, option, dma); + pnp_register_dma_resource(dev, option_flags, p[1], p[2]); } static __init void pnpbios_parse_port_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_port *port; - - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = (p[3] << 8) | p[2]; - port->max = (p[5] << 8) | p[4]; - port->align = p[6]; - port->size = p[7]; - port->flags = p[1] ? PNP_PORT_FLAG_16BITADDR : 0; - pnp_register_port_resource(dev, option, port); + resource_size_t min, max, align, len; + unsigned char flags; + + min = (p[3] << 8) | p[2]; + max = (p[5] << 8) | p[4]; + align = p[6]; + len = p[7]; + flags = p[1] ? IORESOURCE_IO_16BIT_ADDR : 0; + pnp_register_port_resource(dev, option_flags, min, max, align, len, + flags); } static __init void pnpbios_parse_fixed_port_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_port *port; - - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = (p[2] << 8) | p[1]; - port->size = p[3]; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); + resource_size_t base, len; + + base = (p[2] << 8) | p[1]; + len = p[3]; + pnp_register_port_resource(dev, option_flags, base, base, 0, len, + IORESOURCE_IO_FIXED); } static __init unsigned char * pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, - struct pnp_dev *dev) + struct pnp_dev *dev) { unsigned int len, tag; - int priority = 0; - struct pnp_option *option, *option_independent; + int priority; + unsigned int option_flags; if (!p) return NULL; dev_dbg(&dev->dev, "parse resource options\n"); - - option_independent = option = pnp_register_independent_option(dev); - if (!option) - return NULL; - + option_flags = 0; while ((char *)p < (char *)end) { /* determine the type of tag */ @@ -363,37 +342,38 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, case LARGE_TAG_MEM: if (len != 9) goto len_err; - pnpbios_parse_mem_option(dev, p, len, option); + pnpbios_parse_mem_option(dev, p, len, option_flags); break; case LARGE_TAG_MEM32: if (len != 17) goto len_err; - pnpbios_parse_mem32_option(dev, p, len, option); + pnpbios_parse_mem32_option(dev, p, len, option_flags); break; case LARGE_TAG_FIXEDMEM32: if (len != 9) goto len_err; - pnpbios_parse_fixed_mem32_option(dev, p, len, option); + pnpbios_parse_fixed_mem32_option(dev, p, len, + option_flags); break; case SMALL_TAG_IRQ: if (len < 2 || len > 3) goto len_err; - pnpbios_parse_irq_option(dev, p, len, option); + pnpbios_parse_irq_option(dev, p, len, option_flags); break; case SMALL_TAG_DMA: if (len != 2) goto len_err; - pnpbios_parse_dma_option(dev, p, len, option); + pnpbios_parse_dma_option(dev, p, len, option_flags); break; case SMALL_TAG_PORT: if (len != 7) goto len_err; - pnpbios_parse_port_option(dev, p, len, option); + pnpbios_parse_port_option(dev, p, len, option_flags); break; case SMALL_TAG_VENDOR: @@ -403,28 +383,23 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, case SMALL_TAG_FIXEDPORT: if (len != 3) goto len_err; - pnpbios_parse_fixed_port_option(dev, p, len, option); + pnpbios_parse_fixed_port_option(dev, p, len, + option_flags); break; case SMALL_TAG_STARTDEP: if (len > 1) goto len_err; - priority = 0x100 | PNP_RES_PRIORITY_ACCEPTABLE; + priority = PNP_RES_PRIORITY_ACCEPTABLE; if (len > 0) - priority = 0x100 | p[1]; - option = pnp_register_dependent_option(dev, priority); - if (!option) - return NULL; + priority = p[1]; + option_flags = pnp_new_dependent_set(dev, priority); break; case SMALL_TAG_ENDDEP: if (len != 0) goto len_err; - if (option_independent == option) - dev_warn(&dev->dev, "missing " - "SMALL_TAG_STARTDEP tag\n"); - option = option_independent; - dev_dbg(&dev->dev, "end dependent options\n"); + option_flags = 0; break; case SMALL_TAG_END: @@ -526,8 +501,16 @@ len_err: static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[4] = (base >> 8) & 0xff; p[5] = ((base >> 8) >> 8) & 0xff; @@ -536,15 +519,22 @@ static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p, p[10] = (len >> 8) & 0xff; p[11] = ((len >> 8) >> 8) & 0xff; - dev_dbg(&dev->dev, " encode mem %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1); } static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[4] = base & 0xff; p[5] = (base >> 8) & 0xff; @@ -559,15 +549,22 @@ static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p, p[18] = (len >> 16) & 0xff; p[19] = (len >> 24) & 0xff; - dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1); } static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[4] = base & 0xff; p[5] = (base >> 8) & 0xff; @@ -578,40 +575,54 @@ static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p, p[10] = (len >> 16) & 0xff; p[11] = (len >> 24) & 0xff; - dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base, + base + len - 1); } static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long map = 0; + unsigned long map; + + if (pnp_resource_enabled(res)) + map = 1 << res->start; + else + map = 0; - map = 1 << res->start; p[1] = map & 0xff; p[2] = (map >> 8) & 0xff; - dev_dbg(&dev->dev, " encode irq %llu\n", - (unsigned long long)res->start); + dev_dbg(&dev->dev, " encode irq mask %#lx\n", map); } static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long map = 0; + unsigned long map; + + if (pnp_resource_enabled(res)) + map = 1 << res->start; + else + map = 0; - map = 1 << res->start; p[1] = map & 0xff; - dev_dbg(&dev->dev, " encode dma %llu\n", - (unsigned long long)res->start); + dev_dbg(&dev->dev, " encode dma mask %#lx\n", map); } static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[2] = base & 0xff; p[3] = (base >> 8) & 0xff; @@ -619,8 +630,7 @@ static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p, p[5] = (base >> 8) & 0xff; p[7] = len & 0xff; - dev_dbg(&dev->dev, " encode io %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1); } static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p, @@ -629,12 +639,20 @@ static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p, unsigned long base = res->start; unsigned long len = res->end - res->start + 1; + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } + p[1] = base & 0xff; p[2] = (base >> 8) & 0xff; p[3] = len & 0xff; - dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base, + base + len - 1); } static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 1ff3bb585ab..55f55ed72dc 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -5,6 +5,8 @@ * when building up the resource structure for the first time. * * Copyright (c) 2000 Peter Denison <peterd@pnd-pc.demon.co.uk> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> * * Heavily based on PCI quirks handling which is * @@ -20,203 +22,207 @@ #include <linux/kallsyms.h> #include "base.h" +static void quirk_awe32_add_ports(struct pnp_dev *dev, + struct pnp_option *option, + unsigned int offset) +{ + struct pnp_option *new_option; + + new_option = kmalloc(sizeof(struct pnp_option), GFP_KERNEL); + if (!new_option) { + dev_err(&dev->dev, "couldn't add ioport region to option set " + "%d\n", pnp_option_set(option)); + return; + } + + *new_option = *option; + new_option->u.port.min += offset; + new_option->u.port.max += offset; + list_add(&new_option->list, &option->list); + + dev_info(&dev->dev, "added ioport region %#llx-%#llx to set %d\n", + (unsigned long long) new_option->u.port.min, + (unsigned long long) new_option->u.port.max, + pnp_option_set(option)); +} + static void quirk_awe32_resources(struct pnp_dev *dev) { - struct pnp_port *port, *port2, *port3; - struct pnp_option *res = dev->dependent; + struct pnp_option *option; + unsigned int set = ~0; /* - * Unfortunately the isapnp_add_port_resource is too tightly bound - * into the PnP discovery sequence, and cannot be used. Link in the - * two extra ports (at offset 0x400 and 0x800 from the one given) by - * hand. + * Add two extra ioport regions (at offset 0x400 and 0x800 from the + * one given) to every dependent option set. */ - for (; res; res = res->next) { - port2 = pnp_alloc(sizeof(struct pnp_port)); - if (!port2) - return; - port3 = pnp_alloc(sizeof(struct pnp_port)); - if (!port3) { - kfree(port2); - return; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option) && + pnp_option_set(option) != set) { + set = pnp_option_set(option); + quirk_awe32_add_ports(dev, option, 0x800); + quirk_awe32_add_ports(dev, option, 0x400); } - port = res->port; - memcpy(port2, port, sizeof(struct pnp_port)); - memcpy(port3, port, sizeof(struct pnp_port)); - port->next = port2; - port2->next = port3; - port2->min += 0x400; - port2->max += 0x400; - port3->min += 0x800; - port3->max += 0x800; - dev_info(&dev->dev, - "AWE32 quirk - added ioports 0x%lx and 0x%lx\n", - (unsigned long)port2->min, - (unsigned long)port3->min); } } static void quirk_cmi8330_resources(struct pnp_dev *dev) { - struct pnp_option *res = dev->dependent; - unsigned long tmp; - - for (; res; res = res->next) { - - struct pnp_irq *irq; - struct pnp_dma *dma; + struct pnp_option *option; + struct pnp_irq *irq; + struct pnp_dma *dma; - for (irq = res->irq; irq; irq = irq->next) { // Valid irqs are 5, 7, 10 - tmp = 0x04A0; - bitmap_copy(irq->map, &tmp, 16); // 0000 0100 1010 0000 - } + list_for_each_entry(option, &dev->options, list) { + if (!pnp_option_is_dependent(option)) + continue; - for (dma = res->dma; dma; dma = dma->next) // Valid 8bit dma channels are 1,3 + if (option->type == IORESOURCE_IRQ) { + irq = &option->u.irq; + bitmap_zero(irq->map.bits, PNP_IRQ_NR); + __set_bit(5, irq->map.bits); + __set_bit(7, irq->map.bits); + __set_bit(10, irq->map.bits); + dev_info(&dev->dev, "set possible IRQs in " + "option set %d to 5, 7, 10\n", + pnp_option_set(option)); + } else if (option->type == IORESOURCE_DMA) { + dma = &option->u.dma; if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) == - IORESOURCE_DMA_8BIT) - dma->map = 0x000A; + IORESOURCE_DMA_8BIT && + dma->map != 0x0A) { + dev_info(&dev->dev, "changing possible " + "DMA channel mask in option set %d " + "from %#02x to 0x0A (1, 3)\n", + pnp_option_set(option), dma->map); + dma->map = 0x0A; + } + } } - dev_info(&dev->dev, "CMI8330 quirk - forced possible IRQs to 5, 7, 10 " - "and DMA channels to 1, 3\n"); } static void quirk_sb16audio_resources(struct pnp_dev *dev) { + struct pnp_option *option; + unsigned int prev_option_flags = ~0, n = 0; struct pnp_port *port; - struct pnp_option *res = dev->dependent; - int changed = 0; /* - * The default range on the mpu port for these devices is 0x388-0x388. + * The default range on the OPL port for these devices is 0x388-0x388. * Here we increase that range so that two such cards can be * auto-configured. */ + list_for_each_entry(option, &dev->options, list) { + if (prev_option_flags != option->flags) { + prev_option_flags = option->flags; + n = 0; + } - for (; res; res = res->next) { - port = res->port; - if (!port) - continue; - port = port->next; - if (!port) - continue; - port = port->next; - if (!port) - continue; - if (port->min != port->max) - continue; - port->max += 0x70; - changed = 1; + if (pnp_option_is_dependent(option) && + option->type == IORESOURCE_IO) { + n++; + port = &option->u.port; + if (n == 3 && port->min == port->max) { + port->max += 0x70; + dev_info(&dev->dev, "increased option port " + "range from %#llx-%#llx to " + "%#llx-%#llx\n", + (unsigned long long) port->min, + (unsigned long long) port->min, + (unsigned long long) port->min, + (unsigned long long) port->max); + } + } } - if (changed) - dev_info(&dev->dev, "SB audio device quirk - increased port range\n"); } -static struct pnp_option *quirk_isapnp_mpu_options(struct pnp_dev *dev) +static struct pnp_option *pnp_clone_dependent_set(struct pnp_dev *dev, + unsigned int set) { - struct pnp_option *head = NULL; - struct pnp_option *prev = NULL; - struct pnp_option *res; - - /* - * Build a functional IRQ-less variant of each MPU option. - */ - - for (res = dev->dependent; res; res = res->next) { - struct pnp_option *curr; - struct pnp_port *port; - struct pnp_port *copy; + struct pnp_option *tail = NULL, *first_new_option = NULL; + struct pnp_option *option, *new_option; + unsigned int flags; - port = res->port; - if (!port || !res->irq) - continue; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option)) + tail = option; + } + if (!tail) { + dev_err(&dev->dev, "no dependent option sets\n"); + return NULL; + } - copy = pnp_alloc(sizeof *copy); - if (!copy) - break; + flags = pnp_new_dependent_set(dev, PNP_RES_PRIORITY_FUNCTIONAL); + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option) && + pnp_option_set(option) == set) { + new_option = kmalloc(sizeof(struct pnp_option), + GFP_KERNEL); + if (!new_option) { + dev_err(&dev->dev, "couldn't clone dependent " + "set %d\n", set); + return NULL; + } - copy->min = port->min; - copy->max = port->max; - copy->align = port->align; - copy->size = port->size; - copy->flags = port->flags; + *new_option = *option; + new_option->flags = flags; + if (!first_new_option) + first_new_option = new_option; - curr = pnp_build_option(PNP_RES_PRIORITY_FUNCTIONAL); - if (!curr) { - kfree(copy); - break; + list_add(&new_option->list, &tail->list); + tail = new_option; } - curr->port = copy; - - if (prev) - prev->next = curr; - else - head = curr; - prev = curr; } - if (head) - dev_info(&dev->dev, "adding IRQ-less MPU options\n"); - return head; + return first_new_option; } -static void quirk_ad1815_mpu_resources(struct pnp_dev *dev) + +static void quirk_add_irq_optional_dependent_sets(struct pnp_dev *dev) { - struct pnp_option *res; + struct pnp_option *new_option; + unsigned int num_sets, i, set; struct pnp_irq *irq; - /* - * Distribute the independent IRQ over the dependent options - */ - - res = dev->independent; - if (!res) - return; - - irq = res->irq; - if (!irq || irq->next) - return; - - res = dev->dependent; - if (!res) - return; - - while (1) { - struct pnp_irq *copy; - - copy = pnp_alloc(sizeof *copy); - if (!copy) - break; - - memcpy(copy->map, irq->map, sizeof copy->map); - copy->flags = irq->flags; + num_sets = dev->num_dependent_sets; + for (i = 0; i < num_sets; i++) { + new_option = pnp_clone_dependent_set(dev, i); + if (!new_option) + return; - copy->next = res->irq; /* Yes, this is NULL */ - res->irq = copy; + set = pnp_option_set(new_option); + while (new_option && pnp_option_set(new_option) == set) { + if (new_option->type == IORESOURCE_IRQ) { + irq = &new_option->u.irq; + irq->flags |= IORESOURCE_IRQ_OPTIONAL; + } + dbg_pnp_show_option(dev, new_option); + new_option = list_entry(new_option->list.next, + struct pnp_option, list); + } - if (!res->next) - break; - res = res->next; + dev_info(&dev->dev, "added dependent option set %d (same as " + "set %d except IRQ optional)\n", set, i); } - kfree(irq); - - res->next = quirk_isapnp_mpu_options(dev); - - res = dev->independent; - res->irq = NULL; } -static void quirk_isapnp_mpu_resources(struct pnp_dev *dev) +static void quirk_ad1815_mpu_resources(struct pnp_dev *dev) { - struct pnp_option *res; + struct pnp_option *option; + struct pnp_irq *irq = NULL; + unsigned int independent_irqs = 0; + + list_for_each_entry(option, &dev->options, list) { + if (option->type == IORESOURCE_IRQ && + !pnp_option_is_dependent(option)) { + independent_irqs++; + irq = &option->u.irq; + } + } - res = dev->dependent; - if (!res) + if (independent_irqs != 1) return; - while (res->next) - res = res->next; - - res->next = quirk_isapnp_mpu_options(dev); + irq->flags |= IORESOURCE_IRQ_OPTIONAL; + dev_info(&dev->dev, "made independent IRQ optional\n"); } #include <linux/pci.h> @@ -248,8 +254,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) for (j = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, j)); j++) { - if (res->flags & IORESOURCE_UNSET || - (res->start == 0 && res->end == 0)) + if (res->start == 0 && res->end == 0) continue; pnp_start = res->start; @@ -312,10 +317,10 @@ static struct pnp_fixup pnp_fixups[] = { {"CTL0043", quirk_sb16audio_resources}, {"CTL0044", quirk_sb16audio_resources}, {"CTL0045", quirk_sb16audio_resources}, - /* Add IRQ-less MPU options */ + /* Add IRQ-optional MPU options */ {"ADS7151", quirk_ad1815_mpu_resources}, - {"ADS7181", quirk_isapnp_mpu_resources}, - {"AZT0002", quirk_isapnp_mpu_resources}, + {"ADS7181", quirk_add_irq_optional_dependent_sets}, + {"AZT0002", quirk_add_irq_optional_dependent_sets}, /* PnP resources that might overlap PCI BARs */ {"PNP0c01", quirk_system_pci_resources}, {"PNP0c02", quirk_system_pci_resources}, diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index 390b50096e3..4cfe3a1efdf 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -3,6 +3,8 @@ * * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2003 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/module.h> @@ -28,201 +30,121 @@ static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some * option registration */ -struct pnp_option *pnp_build_option(int priority) +struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type, + unsigned int option_flags) { - struct pnp_option *option = pnp_alloc(sizeof(struct pnp_option)); + struct pnp_option *option; + option = kzalloc(sizeof(struct pnp_option), GFP_KERNEL); if (!option) return NULL; - option->priority = priority & 0xff; - /* make sure the priority is valid */ - if (option->priority > PNP_RES_PRIORITY_FUNCTIONAL) - option->priority = PNP_RES_PRIORITY_INVALID; - - return option; -} - -struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev) -{ - struct pnp_option *option; - - option = pnp_build_option(PNP_RES_PRIORITY_PREFERRED); - - /* this should never happen but if it does we'll try to continue */ - if (dev->independent) - dev_err(&dev->dev, "independent resource already registered\n"); - dev->independent = option; + option->flags = option_flags; + option->type = type; - dev_dbg(&dev->dev, "new independent option\n"); + list_add_tail(&option->list, &dev->options); return option; } -struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, - int priority) +int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags, + pnp_irq_mask_t *map, unsigned char flags) { struct pnp_option *option; + struct pnp_irq *irq; - option = pnp_build_option(priority); - - if (dev->dependent) { - struct pnp_option *parent = dev->dependent; - while (parent->next) - parent = parent->next; - parent->next = option; - } else - dev->dependent = option; - - dev_dbg(&dev->dev, "new dependent option (priority %#x)\n", priority); - return option; -} - -int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_irq *data) -{ - struct pnp_irq *ptr; -#ifdef DEBUG - char buf[PNP_IRQ_NR]; /* hex-encoded, so this is overkill but safe */ -#endif + option = pnp_build_option(dev, IORESOURCE_IRQ, option_flags); + if (!option) + return -ENOMEM; - ptr = option->irq; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->irq = data; + irq = &option->u.irq; + irq->map = *map; + irq->flags = flags; #ifdef CONFIG_PCI { int i; for (i = 0; i < 16; i++) - if (test_bit(i, data->map)) + if (test_bit(i, irq->map.bits)) pcibios_penalize_isa_irq(i, 0); } #endif -#ifdef DEBUG - bitmap_scnprintf(buf, sizeof(buf), data->map, PNP_IRQ_NR); - dev_dbg(&dev->dev, " irq bitmask %s flags %#x\n", buf, - data->flags); -#endif + dbg_pnp_show_option(dev, option); return 0; } -int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_dma *data) +int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags, + unsigned char map, unsigned char flags) { - struct pnp_dma *ptr; - - ptr = option->dma; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->dma = data; - - dev_dbg(&dev->dev, " dma bitmask %#x flags %#x\n", data->map, - data->flags); - return 0; -} + struct pnp_option *option; + struct pnp_dma *dma; -int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_port *data) -{ - struct pnp_port *ptr; - - ptr = option->port; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->port = data; - - dev_dbg(&dev->dev, " io " - "min %#x max %#x align %d size %d flags %#x\n", - data->min, data->max, data->align, data->size, data->flags); - return 0; -} + option = pnp_build_option(dev, IORESOURCE_DMA, option_flags); + if (!option) + return -ENOMEM; -int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_mem *data) -{ - struct pnp_mem *ptr; - - ptr = option->mem; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->mem = data; - - dev_dbg(&dev->dev, " mem " - "min %#x max %#x align %d size %d flags %#x\n", - data->min, data->max, data->align, data->size, data->flags); + dma = &option->u.dma; + dma->map = map; + dma->flags = flags; + + dbg_pnp_show_option(dev, option); return 0; } -static void pnp_free_port(struct pnp_port *port) +int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags) { - struct pnp_port *next; + struct pnp_option *option; + struct pnp_port *port; - while (port) { - next = port->next; - kfree(port); - port = next; - } -} + option = pnp_build_option(dev, IORESOURCE_IO, option_flags); + if (!option) + return -ENOMEM; -static void pnp_free_irq(struct pnp_irq *irq) -{ - struct pnp_irq *next; + port = &option->u.port; + port->min = min; + port->max = max; + port->align = align; + port->size = size; + port->flags = flags; - while (irq) { - next = irq->next; - kfree(irq); - irq = next; - } + dbg_pnp_show_option(dev, option); + return 0; } -static void pnp_free_dma(struct pnp_dma *dma) +int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags) { - struct pnp_dma *next; + struct pnp_option *option; + struct pnp_mem *mem; - while (dma) { - next = dma->next; - kfree(dma); - dma = next; - } -} + option = pnp_build_option(dev, IORESOURCE_MEM, option_flags); + if (!option) + return -ENOMEM; -static void pnp_free_mem(struct pnp_mem *mem) -{ - struct pnp_mem *next; + mem = &option->u.mem; + mem->min = min; + mem->max = max; + mem->align = align; + mem->size = size; + mem->flags = flags; - while (mem) { - next = mem->next; - kfree(mem); - mem = next; - } + dbg_pnp_show_option(dev, option); + return 0; } -void pnp_free_option(struct pnp_option *option) +void pnp_free_options(struct pnp_dev *dev) { - struct pnp_option *next; - - while (option) { - next = option->next; - pnp_free_port(option->port); - pnp_free_irq(option->irq); - pnp_free_dma(option->dma); - pnp_free_mem(option->mem); + struct pnp_option *option, *tmp; + + list_for_each_entry_safe(option, tmp, &dev->options, list) { + list_del(&option->list); kfree(option); - option = next; } } @@ -237,7 +159,7 @@ void pnp_free_option(struct pnp_option *option) !((*(enda) < *(startb)) || (*(endb) < *(starta))) #define cannot_compare(flags) \ -((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED)) +((flags) & IORESOURCE_DISABLED) int pnp_check_port(struct pnp_dev *dev, struct resource *res) { @@ -364,6 +286,61 @@ static irqreturn_t pnp_test_handler(int irq, void *dev_id) return IRQ_HANDLED; } +#ifdef CONFIG_PCI +static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci, + unsigned int irq) +{ + u32 class; + u8 progif; + + if (pci->irq == irq) { + dev_dbg(&pnp->dev, "device %s using irq %d\n", + pci_name(pci), irq); + return 1; + } + + /* + * See pci_setup_device() and ata_pci_sff_activate_host() for + * similar IDE legacy detection. + */ + pci_read_config_dword(pci, PCI_CLASS_REVISION, &class); + class >>= 8; /* discard revision ID */ + progif = class & 0xff; + class >>= 8; + + if (class == PCI_CLASS_STORAGE_IDE) { + /* + * Unless both channels are native-PCI mode only, + * treat the compatibility IRQs as busy. + */ + if ((progif & 0x5) != 0x5) + if (pci_get_legacy_ide_irq(pci, 0) == irq || + pci_get_legacy_ide_irq(pci, 1) == irq) { + dev_dbg(&pnp->dev, "legacy IDE device %s " + "using irq %d\n", pci_name(pci), irq); + return 1; + } + } + + return 0; +} +#endif + +static int pci_uses_irq(struct pnp_dev *pnp, unsigned int irq) +{ +#ifdef CONFIG_PCI + struct pci_dev *pci = NULL; + + for_each_pci_dev(pci) { + if (pci_dev_uses_irq(pnp, pci, irq)) { + pci_dev_put(pci); + return 1; + } + } +#endif + return 0; +} + int pnp_check_irq(struct pnp_dev *dev, struct resource *res) { int i; @@ -395,18 +372,9 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) } } -#ifdef CONFIG_PCI /* check if the resource is being used by a pci device */ - { - struct pci_dev *pci = NULL; - for_each_pci_dev(pci) { - if (pci->irq == *irq) { - pci_dev_put(pci); - return 0; - } - } - } -#endif + if (pci_uses_irq(dev, *irq)) + return 0; /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ @@ -499,81 +467,37 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res) #endif } -struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev, - unsigned int type, unsigned int num) +int pnp_resource_type(struct resource *res) { - struct pnp_resource_table *res = dev->res; - - switch (type) { - case IORESOURCE_IO: - if (num >= PNP_MAX_PORT) - return NULL; - return &res->port[num]; - case IORESOURCE_MEM: - if (num >= PNP_MAX_MEM) - return NULL; - return &res->mem[num]; - case IORESOURCE_IRQ: - if (num >= PNP_MAX_IRQ) - return NULL; - return &res->irq[num]; - case IORESOURCE_DMA: - if (num >= PNP_MAX_DMA) - return NULL; - return &res->dma[num]; - } - return NULL; + return res->flags & (IORESOURCE_IO | IORESOURCE_MEM | + IORESOURCE_IRQ | IORESOURCE_DMA); } struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned int type, unsigned int num) { struct pnp_resource *pnp_res; + struct resource *res; - pnp_res = pnp_get_pnp_resource(dev, type, num); - if (pnp_res) - return &pnp_res->res; - + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + if (pnp_resource_type(res) == type && num-- == 0) + return res; + } return NULL; } EXPORT_SYMBOL(pnp_get_resource); -static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev, int type) +static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev) { struct pnp_resource *pnp_res; - int i; - switch (type) { - case IORESOURCE_IO: - for (i = 0; i < PNP_MAX_PORT; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - case IORESOURCE_MEM: - for (i = 0; i < PNP_MAX_MEM; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - case IORESOURCE_IRQ: - for (i = 0; i < PNP_MAX_IRQ; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - case IORESOURCE_DMA: - for (i = 0; i < PNP_MAX_DMA; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - } - return NULL; + pnp_res = kzalloc(sizeof(struct pnp_resource), GFP_KERNEL); + if (!pnp_res) + return NULL; + + list_add_tail(&pnp_res->list, &dev->resources); + return pnp_res; } struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, @@ -581,15 +505,10 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_IRQ); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for IRQ %d\n", - irq); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for IRQ %d\n", irq); return NULL; } @@ -607,15 +526,10 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_DMA); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for DMA %d\n", - dma); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for DMA %d\n", dma); return NULL; } @@ -634,16 +548,12 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_IO); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for IO " - "%#llx-%#llx\n",(unsigned long long) start, - (unsigned long long) end); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for IO %#llx-%#llx\n", + (unsigned long long) start, + (unsigned long long) end); return NULL; } @@ -663,16 +573,12 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_MEM); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for MEM " - "%#llx-%#llx\n",(unsigned long long) start, - (unsigned long long) end); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for MEM %#llx-%#llx\n", + (unsigned long long) start, + (unsigned long long) end); return NULL; } @@ -686,6 +592,52 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev, return pnp_res; } +/* + * Determine whether the specified resource is a possible configuration + * for this device. + */ +int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start, + resource_size_t size) +{ + struct pnp_option *option; + struct pnp_port *port; + struct pnp_mem *mem; + struct pnp_irq *irq; + struct pnp_dma *dma; + + list_for_each_entry(option, &dev->options, list) { + if (option->type != type) + continue; + + switch (option->type) { + case IORESOURCE_IO: + port = &option->u.port; + if (port->min == start && port->size == size) + return 1; + break; + case IORESOURCE_MEM: + mem = &option->u.mem; + if (mem->min == start && mem->size == size) + return 1; + break; + case IORESOURCE_IRQ: + irq = &option->u.irq; + if (start < PNP_IRQ_NR && + test_bit(start, irq->map.bits)) + return 1; + break; + case IORESOURCE_DMA: + dma = &option->u.dma; + if (dma->map & (1 << start)) + return 1; + break; + } + } + + return 0; +} +EXPORT_SYMBOL(pnp_possible_config); + /* format is: pnp_reserve_irq=irq1[,irq2] .... */ static int __init pnp_setup_reserve_irq(char *str) { diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c index 95b076c18c0..bbf78ef4ba0 100644 --- a/drivers/pnp/support.c +++ b/drivers/pnp/support.c @@ -2,6 +2,8 @@ * support.c - standard functions for the use of pnp protocol drivers * * Copyright 2003 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/module.h> @@ -16,6 +18,10 @@ */ int pnp_is_active(struct pnp_dev *dev) { + /* + * I don't think this is very reliable because pnp_disable_dev() + * only clears out auto-assigned resources. + */ if (!pnp_port_start(dev, 0) && pnp_port_len(dev, 0) <= 1 && !pnp_mem_start(dev, 0) && pnp_mem_len(dev, 0) <= 1 && pnp_irq(dev, 0) == -1 && pnp_dma(dev, 0) == -1) @@ -52,39 +58,154 @@ void pnp_eisa_id_to_string(u32 id, char *str) str[7] = '\0'; } +char *pnp_resource_type_name(struct resource *res) +{ + switch (pnp_resource_type(res)) { + case IORESOURCE_IO: + return "io"; + case IORESOURCE_MEM: + return "mem"; + case IORESOURCE_IRQ: + return "irq"; + case IORESOURCE_DMA: + return "dma"; + } + return NULL; +} + void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc) { #ifdef DEBUG + char buf[128]; + int len = 0; + struct pnp_resource *pnp_res; struct resource *res; - int i; - dev_dbg(&dev->dev, "current resources: %s\n", desc); - - for (i = 0; i < PNP_MAX_IRQ; i++) { - res = pnp_get_resource(dev, IORESOURCE_IRQ, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " irq %lld flags %#lx\n", - (unsigned long long) res->start, res->flags); + if (list_empty(&dev->resources)) { + dev_dbg(&dev->dev, "%s: no current resources\n", desc); + return; } - for (i = 0; i < PNP_MAX_DMA; i++) { - res = pnp_get_resource(dev, IORESOURCE_DMA, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " dma %lld flags %#lx\n", - (unsigned long long) res->start, res->flags); + + dev_dbg(&dev->dev, "%s: current resources:\n", desc); + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + + len += snprintf(buf + len, sizeof(buf) - len, " %-3s ", + pnp_resource_type_name(res)); + + if (res->flags & IORESOURCE_DISABLED) { + dev_dbg(&dev->dev, "%sdisabled\n", buf); + continue; + } + + switch (pnp_resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: + len += snprintf(buf + len, sizeof(buf) - len, + "%#llx-%#llx flags %#lx", + (unsigned long long) res->start, + (unsigned long long) res->end, + res->flags); + break; + case IORESOURCE_IRQ: + case IORESOURCE_DMA: + len += snprintf(buf + len, sizeof(buf) - len, + "%lld flags %#lx", + (unsigned long long) res->start, + res->flags); + break; + } + dev_dbg(&dev->dev, "%s\n", buf); } - for (i = 0; i < PNP_MAX_PORT; i++) { - res = pnp_get_resource(dev, IORESOURCE_IO, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " io %#llx-%#llx flags %#lx\n", - (unsigned long long) res->start, - (unsigned long long) res->end, res->flags); +#endif +} + +char *pnp_option_priority_name(struct pnp_option *option) +{ + switch (pnp_option_priority(option)) { + case PNP_RES_PRIORITY_PREFERRED: + return "preferred"; + case PNP_RES_PRIORITY_ACCEPTABLE: + return "acceptable"; + case PNP_RES_PRIORITY_FUNCTIONAL: + return "functional"; } - for (i = 0; i < PNP_MAX_MEM; i++) { - res = pnp_get_resource(dev, IORESOURCE_MEM, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " mem %#llx-%#llx flags %#lx\n", - (unsigned long long) res->start, - (unsigned long long) res->end, res->flags); + return "invalid"; +} + +void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option) +{ +#ifdef DEBUG + char buf[128]; + int len = 0, i; + struct pnp_port *port; + struct pnp_mem *mem; + struct pnp_irq *irq; + struct pnp_dma *dma; + + if (pnp_option_is_dependent(option)) + len += snprintf(buf + len, sizeof(buf) - len, + " dependent set %d (%s) ", + pnp_option_set(option), + pnp_option_priority_name(option)); + else + len += snprintf(buf + len, sizeof(buf) - len, " independent "); + + switch (option->type) { + case IORESOURCE_IO: + port = &option->u.port; + len += snprintf(buf + len, sizeof(buf) - len, "io min %#llx " + "max %#llx align %lld size %lld flags %#x", + (unsigned long long) port->min, + (unsigned long long) port->max, + (unsigned long long) port->align, + (unsigned long long) port->size, port->flags); + break; + case IORESOURCE_MEM: + mem = &option->u.mem; + len += snprintf(buf + len, sizeof(buf) - len, "mem min %#llx " + "max %#llx align %lld size %lld flags %#x", + (unsigned long long) mem->min, + (unsigned long long) mem->max, + (unsigned long long) mem->align, + (unsigned long long) mem->size, mem->flags); + break; + case IORESOURCE_IRQ: + irq = &option->u.irq; + len += snprintf(buf + len, sizeof(buf) - len, "irq"); + if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) + len += snprintf(buf + len, sizeof(buf) - len, + " <none>"); + else { + for (i = 0; i < PNP_IRQ_NR; i++) + if (test_bit(i, irq->map.bits)) + len += snprintf(buf + len, + sizeof(buf) - len, + " %d", i); + } + len += snprintf(buf + len, sizeof(buf) - len, " flags %#x", + irq->flags); + if (irq->flags & IORESOURCE_IRQ_OPTIONAL) + len += snprintf(buf + len, sizeof(buf) - len, + " (optional)"); + break; + case IORESOURCE_DMA: + dma = &option->u.dma; + len += snprintf(buf + len, sizeof(buf) - len, "dma"); + if (!dma->map) + len += snprintf(buf + len, sizeof(buf) - len, + " <none>"); + else { + for (i = 0; i < 8; i++) + if (dma->map & (1 << i)) + len += snprintf(buf + len, + sizeof(buf) - len, + " %d", i); + } + len += snprintf(buf + len, sizeof(buf) - len, " (bitmask %#x) " + "flags %#x", dma->map, dma->flags); + break; } + dev_dbg(&dev->dev, "%s\n", buf); #endif } diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c index cf4e07b01d4..764f3a31068 100644 --- a/drivers/pnp/system.c +++ b/drivers/pnp/system.c @@ -60,7 +60,7 @@ static void reserve_resources_of_dev(struct pnp_dev *dev) int i; for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) { - if (res->flags & IORESOURCE_UNSET) + if (res->flags & IORESOURCE_DISABLED) continue; if (res->start == 0) continue; /* disabled */ @@ -81,7 +81,7 @@ static void reserve_resources_of_dev(struct pnp_dev *dev) } for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) { - if (res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED)) + if (res->flags & IORESOURCE_DISABLED) continue; reserve_range(dev, res->start, res->end, 0); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index d91df38ee4f..85fcb437105 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -333,7 +333,8 @@ dasd_diag_check_device(struct dasd_device *device) if (IS_ERR(block)) { DEV_MESSAGE(KERN_WARNING, device, "%s", "could not allocate dasd block structure"); - kfree(device->private); + device->private = NULL; + kfree(private); return PTR_ERR(block); } device->block = block; @@ -348,7 +349,8 @@ dasd_diag_check_device(struct dasd_device *device) if (rc) { DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device " "information (rc=%d)", rc); - return -ENOTSUPP; + rc = -EOPNOTSUPP; + goto out; } /* Figure out position of label block */ @@ -362,7 +364,8 @@ dasd_diag_check_device(struct dasd_device *device) default: DEV_MESSAGE(KERN_WARNING, device, "unsupported device class " "(class=%d)", private->rdc_data.vdev_class); - return -ENOTSUPP; + rc = -EOPNOTSUPP; + goto out; } DBF_DEV_EVENT(DBF_INFO, device, @@ -379,7 +382,8 @@ dasd_diag_check_device(struct dasd_device *device) if (label == NULL) { DEV_MESSAGE(KERN_WARNING, device, "%s", "No memory to allocate initialization request"); - return -ENOMEM; + rc = -ENOMEM; + goto out; } rc = 0; end_block = 0; @@ -403,7 +407,7 @@ dasd_diag_check_device(struct dasd_device *device) DEV_MESSAGE(KERN_WARNING, device, "%s", "DIAG call failed"); rc = -EOPNOTSUPP; - goto out; + goto out_label; } mdsk_term_io(device); if (rc == 0) @@ -413,7 +417,7 @@ dasd_diag_check_device(struct dasd_device *device) DEV_MESSAGE(KERN_WARNING, device, "device access failed " "(rc=%d)", rc); rc = -EIO; - goto out; + goto out_label; } /* check for label block */ if (memcmp(label->label_id, DASD_DIAG_CMS1, @@ -439,8 +443,15 @@ dasd_diag_check_device(struct dasd_device *device) (unsigned long) (block->blocks << block->s2b_shift) >> 1); } -out: +out_label: free_page((long) label); +out: + if (rc) { + device->block = NULL; + dasd_free_block(block); + device->private = NULL; + kfree(private); + } return rc; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index e0b77210d37..3590fdb5b2f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1418,8 +1418,10 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, /* service information message SIM */ - if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) { + if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && + ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { dasd_3990_erp_handle_sim(device, irb->ecw); + dasd_schedule_device_bh(device); return; } diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index aee4656127f..aa0c533423a 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -139,7 +139,8 @@ dasd_fba_check_characteristics(struct dasd_device *device) if (IS_ERR(block)) { DEV_MESSAGE(KERN_WARNING, device, "%s", "could not allocate dasd block structure"); - kfree(device->private); + device->private = NULL; + kfree(private); return PTR_ERR(block); } device->block = block; @@ -152,6 +153,10 @@ dasd_fba_check_characteristics(struct dasd_device *device) DEV_MESSAGE(KERN_WARNING, device, "Read device characteristics returned error %d", rc); + device->block = NULL; + dasd_free_block(block); + device->private = NULL; + kfree(private); return rc; } diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 0a9f1cccbe5..b0ac44b2712 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -345,7 +345,7 @@ static int get_urd_class(struct urdev *urd) cc = diag210(&ur_diag210); switch (cc) { case 0: - return -ENOTSUPP; + return -EOPNOTSUPP; case 2: return ur_diag210.vrdcvcla; /* virtual device class */ case 3: @@ -621,7 +621,7 @@ static int verify_device(struct urdev *urd) case DEV_CLASS_UR_I: return verify_uri_device(urd); default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } @@ -654,7 +654,7 @@ static int get_file_reclen(struct urdev *urd) case DEV_CLASS_UR_I: return get_uri_file_reclen(urd); default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } @@ -827,7 +827,7 @@ static int ur_probe(struct ccw_device *cdev) goto fail_remove_attr; } if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto fail_remove_attr; } spin_lock_irq(get_ccwdev_lock(cdev)); @@ -892,7 +892,7 @@ static int ur_set_online(struct ccw_device *cdev) } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); } else { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto fail_free_cdev; } diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 047dd92ae80..7fd84be1193 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -29,6 +29,7 @@ #define TO_USER 0 #define TO_KERNEL 1 +#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */ enum arch_id { ARCH_S390 = 0, @@ -51,6 +52,7 @@ static struct debug_info *zcore_dbf; static int hsa_available; static struct dentry *zcore_dir; static struct dentry *zcore_file; +static struct dentry *zcore_memmap_file; /* * Copy memory from HSA to kernel or user memory (not reentrant): @@ -476,6 +478,54 @@ static const struct file_operations zcore_fops = { .release = zcore_release, }; +static ssize_t zcore_memmap_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, filp->private_data, + MEMORY_CHUNKS * CHUNK_INFO_SIZE); +} + +static int zcore_memmap_open(struct inode *inode, struct file *filp) +{ + int i; + char *buf; + struct mem_chunk *chunk_array; + + chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), + GFP_KERNEL); + if (!chunk_array) + return -ENOMEM; + detect_memory_layout(chunk_array); + buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL); + if (!buf) { + kfree(chunk_array); + return -ENOMEM; + } + for (i = 0; i < MEMORY_CHUNKS; i++) { + sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", + (unsigned long long) chunk_array[i].addr, + (unsigned long long) chunk_array[i].size); + if (chunk_array[i].size == 0) + break; + } + kfree(chunk_array); + filp->private_data = buf; + return 0; +} + +static int zcore_memmap_release(struct inode *inode, struct file *filp) +{ + kfree(filp->private_data); + return 0; +} + +static const struct file_operations zcore_memmap_fops = { + .owner = THIS_MODULE, + .read = zcore_memmap_read, + .open = zcore_memmap_open, + .release = zcore_memmap_release, +}; + static void __init set_s390_lc_mask(union save_area *map) { @@ -554,18 +604,44 @@ static int __init check_sdias(void) return 0; } -static void __init zcore_header_init(int arch, struct zcore_header *hdr) +static int __init get_mem_size(unsigned long *mem) +{ + int i; + struct mem_chunk *chunk_array; + + chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), + GFP_KERNEL); + if (!chunk_array) + return -ENOMEM; + detect_memory_layout(chunk_array); + for (i = 0; i < MEMORY_CHUNKS; i++) { + if (chunk_array[i].size == 0) + break; + *mem += chunk_array[i].size; + } + kfree(chunk_array); + return 0; +} + +static int __init zcore_header_init(int arch, struct zcore_header *hdr) { + int rc; + unsigned long memory = 0; + if (arch == ARCH_S390X) hdr->arch_id = DUMP_ARCH_S390X; else hdr->arch_id = DUMP_ARCH_S390; - hdr->mem_size = sys_info.mem_size; - hdr->rmem_size = sys_info.mem_size; + rc = get_mem_size(&memory); + if (rc) + return rc; + hdr->mem_size = memory; + hdr->rmem_size = memory; hdr->mem_end = sys_info.mem_size; - hdr->num_pages = sys_info.mem_size / PAGE_SIZE; + hdr->num_pages = memory / PAGE_SIZE; hdr->tod = get_clock(); get_cpu_id(&hdr->cpu_id); + return 0; } static int __init zcore_init(void) @@ -608,7 +684,9 @@ static int __init zcore_init(void) if (rc) goto fail; - zcore_header_init(arch, &zcore_header); + rc = zcore_header_init(arch, &zcore_header); + if (rc) + goto fail; zcore_dir = debugfs_create_dir("zcore" , NULL); if (!zcore_dir) { @@ -618,13 +696,22 @@ static int __init zcore_init(void) zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, &zcore_fops); if (!zcore_file) { - debugfs_remove(zcore_dir); rc = -ENOMEM; - goto fail; + goto fail_dir; + } + zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir, + NULL, &zcore_memmap_fops); + if (!zcore_memmap_file) { + rc = -ENOMEM; + goto fail_file; } hsa_available = 1; return 0; +fail_file: + debugfs_remove(zcore_file); +fail_dir: + debugfs_remove(zcore_dir); fail: diag308(DIAG308_REL_HSA, NULL); return rc; diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index 91e9e3f3073..bd79bd16539 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o obj-$(CONFIG_CHSC_SCH) += chsc_sch.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o + +qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 65264a38057..29826fdd47b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -27,7 +27,13 @@ static void *sei_page; -static int chsc_error_from_response(int response) +/** + * chsc_error_from_response() - convert a chsc response to an error + * @response: chsc response code + * + * Returns an appropriate Linux error code for @response. + */ +int chsc_error_from_response(int response) { switch (response) { case 0x0001: @@ -45,6 +51,7 @@ static int chsc_error_from_response(int response) return -EIO; } } +EXPORT_SYMBOL_GPL(chsc_error_from_response); struct chsc_ssd_area { struct chsc_header request; diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index fb6c4d6c45b..ba59bceace9 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -101,4 +101,6 @@ void chsc_chp_online(struct chp_id chpid); void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); +int chsc_error_from_response(int response); + #endif diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c deleted file mode 100644 index 2bf36e14b10..00000000000 --- a/drivers/s390/cio/qdio.c +++ /dev/null @@ -1,3929 +0,0 @@ -/* - * - * linux/drivers/s390/cio/qdio.c - * - * Linux for S/390 QDIO base support, Hipersocket base support - * version 2 - * - * Copyright 2000,2002 IBM Corporation - * Author(s): Utz Bacher <utz.bacher@de.ibm.com> - * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> - * - * Restriction: only 63 iqdio subchannels would have its own indicator, - * after that, subsequent subchannels share one indicator - * - * - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/kernel.h> -#include <linux/proc_fs.h> -#include <linux/timer.h> -#include <linux/mempool.h> -#include <linux/semaphore.h> - -#include <asm/ccwdev.h> -#include <asm/io.h> -#include <asm/atomic.h> -#include <asm/timex.h> - -#include <asm/debug.h> -#include <asm/s390_rdev.h> -#include <asm/qdio.h> -#include <asm/airq.h> - -#include "cio.h" -#include "css.h" -#include "device.h" -#include "qdio.h" -#include "ioasm.h" -#include "chsc.h" - -/****************** MODULE PARAMETER VARIABLES ********************/ -MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); -MODULE_DESCRIPTION("QDIO base support version 2, " \ - "Copyright 2000 IBM Corporation"); -MODULE_LICENSE("GPL"); - -/******************** HERE WE GO ***********************************/ - -static const char version[] = "QDIO base support version 2"; - -static int qdio_performance_stats = 0; -static int proc_perf_file_registration; -static struct qdio_perf_stats perf_stats; - -static int hydra_thinints; -static int is_passthrough = 0; -static int omit_svs; - -static int indicator_used[INDICATORS_PER_CACHELINE]; -static __u32 * volatile indicators; -static __u32 volatile spare_indicator; -static atomic_t spare_indicator_usecount; -#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 -static mempool_t *qdio_mempool_scssc; -static struct kmem_cache *qdio_q_cache; - -static debug_info_t *qdio_dbf_setup; -static debug_info_t *qdio_dbf_sbal; -static debug_info_t *qdio_dbf_trace; -static debug_info_t *qdio_dbf_sense; -#ifdef CONFIG_QDIO_DEBUG -static debug_info_t *qdio_dbf_slsb_out; -static debug_info_t *qdio_dbf_slsb_in; -#endif /* CONFIG_QDIO_DEBUG */ - -/* iQDIO stuff: */ -static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change - during a while loop */ -static DEFINE_SPINLOCK(ttiq_list_lock); -static void *tiqdio_ind; -static void tiqdio_tl(unsigned long); -static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); - -/* not a macro, as one of the arguments is atomic_read */ -static inline int -qdio_min(int a,int b) -{ - if (a<b) - return a; - else - return b; -} - -/***************** SCRUBBER HELPER ROUTINES **********************/ -#ifdef CONFIG_64BIT -static inline void qdio_perf_stat_inc(atomic64_t *count) -{ - if (qdio_performance_stats) - atomic64_inc(count); -} - -static inline void qdio_perf_stat_dec(atomic64_t *count) -{ - if (qdio_performance_stats) - atomic64_dec(count); -} -#else /* CONFIG_64BIT */ -static inline void qdio_perf_stat_inc(atomic_t *count) -{ - if (qdio_performance_stats) - atomic_inc(count); -} - -static inline void qdio_perf_stat_dec(atomic_t *count) -{ - if (qdio_performance_stats) - atomic_dec(count); -} -#endif /* CONFIG_64BIT */ - -static inline __u64 -qdio_get_micros(void) -{ - return (get_clock() >> 12); /* time>>12 is microseconds */ -} - -/* - * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve - * the q in any case, so that we'll not be interrupted when we are in - * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost - * ever works (last famous words) - */ -static inline int -qdio_reserve_q(struct qdio_q *q) -{ - return atomic_add_return(1,&q->use_count) - 1; -} - -static inline void -qdio_release_q(struct qdio_q *q) -{ - atomic_dec(&q->use_count); -} - -/*check ccq */ -static int -qdio_check_ccq(struct qdio_q *q, unsigned int ccq) -{ - char dbf_text[15]; - - if (ccq == 0 || ccq == 32) - return 0; - if (ccq == 96 || ccq == 97) - return 1; - /*notify devices immediately*/ - sprintf(dbf_text,"%d", ccq); - QDIO_DBF_TEXT2(1,trace,dbf_text); - return -EIO; -} -/* EQBS: extract buffer states */ -static int -qdio_do_eqbs(struct qdio_q *q, unsigned char *state, - unsigned int *start, unsigned int *cnt) -{ - struct qdio_irq *irq; - unsigned int tmp_cnt, q_no, ccq; - int rc ; - char dbf_text[15]; - - ccq = 0; - tmp_cnt = *cnt; - irq = (struct qdio_irq*)q->irq_ptr; - q_no = q->q_no; - if(!q->is_input_q) - q_no += irq->no_input_qs; -again: - ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); - rc = qdio_check_ccq(q, ccq); - if ((ccq == 96) && (tmp_cnt != *cnt)) - rc = 0; - if (rc == 1) { - QDIO_DBF_TEXT5(1,trace,"eqAGAIN"); - goto again; - } - if (rc < 0) { - QDIO_DBF_TEXT2(1,trace,"eqberr"); - sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0, 0, 0, -1, -1, q->int_parm); - return 0; - } - return (tmp_cnt - *cnt); -} - -/* SQBS: set buffer states */ -static int -qdio_do_sqbs(struct qdio_q *q, unsigned char state, - unsigned int *start, unsigned int *cnt) -{ - struct qdio_irq *irq; - unsigned int tmp_cnt, q_no, ccq; - int rc; - char dbf_text[15]; - - ccq = 0; - tmp_cnt = *cnt; - irq = (struct qdio_irq*)q->irq_ptr; - q_no = q->q_no; - if(!q->is_input_q) - q_no += irq->no_input_qs; -again: - ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); - rc = qdio_check_ccq(q, ccq); - if (rc == 1) { - QDIO_DBF_TEXT5(1,trace,"sqAGAIN"); - goto again; - } - if (rc < 0) { - QDIO_DBF_TEXT3(1,trace,"sqberr"); - sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt); - QDIO_DBF_TEXT3(1,trace,dbf_text); - sprintf(dbf_text,"%d,%d",ccq,q_no); - QDIO_DBF_TEXT3(1,trace,dbf_text); - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0, 0, 0, -1, -1, q->int_parm); - return 0; - } - return (tmp_cnt - *cnt); -} - -static inline int -qdio_set_slsb(struct qdio_q *q, unsigned int *bufno, - unsigned char state, unsigned int *count) -{ - volatile char *slsb; - struct qdio_irq *irq; - - irq = (struct qdio_irq*)q->irq_ptr; - if (!irq->is_qebsm) { - slsb = (char *)&q->slsb.acc.val[(*bufno)]; - xchg(slsb, state); - return 1; - } - return qdio_do_sqbs(q, state, bufno, count); -} - -#ifdef CONFIG_QDIO_DEBUG -static inline void -qdio_trace_slsb(struct qdio_q *q) -{ - if (q->queue_type==QDIO_TRACE_QTYPE) { - if (q->is_input_q) - QDIO_DBF_HEX2(0,slsb_in,&q->slsb, - QDIO_MAX_BUFFERS_PER_Q); - else - QDIO_DBF_HEX2(0,slsb_out,&q->slsb, - QDIO_MAX_BUFFERS_PER_Q); - } -} -#endif - -static inline int -set_slsb(struct qdio_q *q, unsigned int *bufno, - unsigned char state, unsigned int *count) -{ - int rc; -#ifdef CONFIG_QDIO_DEBUG - qdio_trace_slsb(q); -#endif - rc = qdio_set_slsb(q, bufno, state, count); -#ifdef CONFIG_QDIO_DEBUG - qdio_trace_slsb(q); -#endif - return rc; -} -static inline int -qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, - unsigned int gpr3) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigasync"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - qdio_perf_stat_inc(&perf_stats.siga_syncs); - - cc = do_siga_sync(q->schid, gpr2, gpr3); - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static inline int -qdio_siga_sync_q(struct qdio_q *q) -{ - if (q->is_input_q) - return qdio_siga_sync(q, 0, q->mask); - return qdio_siga_sync(q, q->mask, 0); -} - -static int -__do_siga_output(struct qdio_q *q, unsigned int *busy_bit) -{ - struct qdio_irq *irq; - unsigned int fc = 0; - unsigned long schid; - - irq = (struct qdio_irq *) q->irq_ptr; - if (!irq->is_qebsm) - schid = *((u32 *)&q->schid); - else { - schid = irq->sch_token; - fc |= 0x80; - } - return do_siga_output(schid, q->mask, busy_bit, fc); -} - -/* - * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns - * an access exception - */ -static int -qdio_siga_output(struct qdio_q *q) -{ - int cc; - __u32 busy_bit; - __u64 start_time=0; - - qdio_perf_stat_inc(&perf_stats.siga_outs); - - QDIO_DBF_TEXT4(0,trace,"sigaout"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - for (;;) { - cc = __do_siga_output(q, &busy_bit); -//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); - if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { - if (!start_time) - start_time=NOW; - if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE) - break; - } else - break; - } - - if ((cc==2) && (busy_bit)) - cc |= QDIO_SIGA_ERROR_B_BIT_SET; - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static int -qdio_siga_input(struct qdio_q *q) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigain"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - qdio_perf_stat_inc(&perf_stats.siga_ins); - - cc = do_siga_input(q->schid, q->mask); - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static __u32 * -qdio_get_indicator(void) -{ - int i; - - for (i = 0; i < INDICATORS_PER_CACHELINE; i++) - if (!indicator_used[i]) { - indicator_used[i]=1; - return indicators+i; - } - atomic_inc(&spare_indicator_usecount); - return (__u32 * volatile) &spare_indicator; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static void -qdio_put_indicator(__u32 *addr) -{ - int i; - - if ( (addr) && (addr!=&spare_indicator) ) { - i=addr-indicators; - indicator_used[i]=0; - } - if (addr == &spare_indicator) - atomic_dec(&spare_indicator_usecount); -} - -static inline void -tiqdio_clear_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"clrsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,0); -} - -static inline void -tiqdio_set_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"setsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,-1); -} - -static inline void -tiqdio_sched_tl(void) -{ - tasklet_hi_schedule(&tiqdio_tasklet); -} - -static void -qdio_mark_tiq(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"mark iq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - spin_lock_irqsave(&ttiq_list_lock,flags); - if (unlikely(atomic_read(&q->is_in_shutdown))) - goto out_unlock; - - if (!q->is_input_q) - goto out_unlock; - - if ((q->list_prev) || (q->list_next)) - goto out_unlock; - - if (!tiq_list) { - tiq_list=q; - q->list_prev=q; - q->list_next=q; - } else { - q->list_next=tiq_list; - q->list_prev=tiq_list->list_prev; - tiq_list->list_prev->list_next=q; - tiq_list->list_prev=q; - } - spin_unlock_irqrestore(&ttiq_list_lock,flags); - - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return; -out_unlock: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - return; -} - -static inline void -qdio_mark_q(struct qdio_q *q) -{ - QDIO_DBF_TEXT4(0,trace,"mark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(atomic_read(&q->is_in_shutdown))) - return; - - tasklet_schedule(&q->tasklet); -} - -static int -qdio_stop_polling(struct qdio_q *q) -{ -#ifdef QDIO_USE_PROCESSING_STATE - unsigned int tmp, gsf, count = 1; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - - if (!atomic_xchg(&q->polling,0)) - return 1; - - QDIO_DBF_TEXT4(0,trace,"stoppoll"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* show the card that we are not polling anymore */ - if (!q->is_input_q) - return 1; - - tmp = gsf = GET_SAVED_FRONTIER(q); - tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) ); - set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count); - - /* - * we don't issue this SYNC_MEMORY, as we trust Rick T and - * moreover will not use the PROCESSING state under VM, so - * q->polling was 0 anyway - */ - /*SYNC_MEMORY;*/ - if (irq->is_qebsm) { - count = 1; - qdio_do_eqbs(q, &state, &gsf, &count); - } else - state = q->slsb.acc.val[gsf]; - if (state != SLSB_P_INPUT_PRIMED) - return 1; - /* - * set our summary bit again, as otherwise there is a - * small window we can miss between resetting it and - * checking for PRIMED state - */ - if (q->is_thinint_q) - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - return 0; - -#else /* QDIO_USE_PROCESSING_STATE */ - return 1; -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -/* - * see the comment in do_QDIO and before qdio_reserve_q about the - * sophisticated locking outside of unmark_q, so that we don't need to - * disable the interrupts :-) -*/ -static void -qdio_unmark_q(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"unmark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if ((!q->list_prev)||(!q->list_next)) - return; - - if ((q->is_thinint_q)&&(q->is_input_q)) { - /* iQDIO */ - spin_lock_irqsave(&ttiq_list_lock,flags); - /* in case cleanup has done this already and simultanously - * qdio_unmark_q is called from the interrupt handler, we've - * got to check this in this specific case again */ - if ((!q->list_prev)||(!q->list_next)) - goto out; - if (q->list_next==q) { - /* q was the only interesting q */ - tiq_list=NULL; - q->list_next=NULL; - q->list_prev=NULL; - } else { - q->list_next->list_prev=q->list_prev; - q->list_prev->list_next=q->list_next; - tiq_list=q->list_next; - q->list_next=NULL; - q->list_prev=NULL; - } -out: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - } -} - -static inline unsigned long -tiqdio_clear_global_summary(void) -{ - unsigned long time; - - QDIO_DBF_TEXT5(0,trace,"clrglobl"); - - time = do_clear_global_summary(); - - QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long)); - - return time; -} - - -/************************* OUTBOUND ROUTINES *******************************/ -static int -qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - unsigned char state; - unsigned int cnt, count, ftc; - - irq = (struct qdio_irq *) q->irq_ptr; - if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) - SYNC_MEMORY; - - ftc = q->first_to_check; - count = qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - if (count == 0) - return q->first_to_check; - cnt = qdio_do_eqbs(q, &state, &ftc, &count); - if (cnt == 0) - return q->first_to_check; - switch (state) { - case SLSB_P_OUTPUT_ERROR: - QDIO_DBF_TEXT3(0,trace,"outperr"); - atomic_sub(cnt , &q->number_of_buffers_used); - if (q->qdio_error) - q->error_status_flags |= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error = SLSB_P_OUTPUT_ERROR; - q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; - q->first_to_check = ftc; - break; - case SLSB_P_OUTPUT_EMPTY: - QDIO_DBF_TEXT5(0,trace,"outpempt"); - atomic_sub(cnt, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_CU_OUTPUT_PRIMED: - /* all buffers primed */ - QDIO_DBF_TEXT5(0,trace,"outpprim"); - break; - default: - break; - } - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - return q->first_to_check; -} - -static int -qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - unsigned char state; - int tmp, ftc, count, cnt; - char dbf_text[15]; - - - irq = (struct qdio_irq *) q->irq_ptr; - ftc = q->first_to_check; - count = qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - if (count == 0) - return q->first_to_check; - cnt = qdio_do_eqbs(q, &state, &ftc, &count); - if (cnt == 0) - return q->first_to_check; - switch (state) { - case SLSB_P_INPUT_ERROR : -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(1,trace,"inperr"); - sprintf(dbf_text,"%2x,%2x",ftc,count); - QDIO_DBF_TEXT3(1,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - if (q->qdio_error) - q->error_status_flags |= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error = SLSB_P_INPUT_ERROR; - q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; - atomic_sub(cnt, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_P_INPUT_PRIMED : - QDIO_DBF_TEXT3(0,trace,"inptprim"); - sprintf(dbf_text,"%2x,%2x",ftc,count); - QDIO_DBF_TEXT3(1,trace,dbf_text); - tmp = 0; - ftc = q->first_to_check; -#ifdef QDIO_USE_PROCESSING_STATE - if (cnt > 1) { - cnt -= 1; - tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); - if (!tmp) - break; - } - cnt = 1; - tmp += set_slsb(q, &ftc, - SLSB_P_INPUT_PROCESSING, &cnt); - atomic_set(&q->polling, 1); -#else - tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); -#endif - atomic_sub(tmp, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_CU_INPUT_EMPTY: - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_PROCESSING: - QDIO_DBF_TEXT5(0,trace,"inpnipro"); - break; - default: - break; - } - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - return q->first_to_check; -} - -static int -qdio_get_outbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - volatile char *slsb; - unsigned int count = 1; - int first_not_to_check, f, f_mod_no; - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"getobfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - irq = (struct qdio_irq *) q->irq_ptr; - if (irq->is_qebsm) - return qdio_qebsm_get_outbound_buffer_frontier(q); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * f points to already processed elements, so f+no_used is correct... - * ... but: we don't check 128 buffers, as otherwise - * qdio_has_outbound_q_moved would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) || - (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) - SYNC_MEMORY; - -check_next: - if (f==first_not_to_check) - goto out; - - switch(slsb[f_mod_no]) { - - /* the adapter has not fetched the output yet */ - case SLSB_CU_OUTPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"outpprim"); - break; - - /* the adapter got it */ - case SLSB_P_OUTPUT_EMPTY: - atomic_dec(&q->number_of_buffers_used); - f++; - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - QDIO_DBF_TEXT5(0,trace,"outpempt"); - goto check_next; - - case SLSB_P_OUTPUT_ERROR: - QDIO_DBF_TEXT3(0,trace,"outperr"); - sprintf(dbf_text,"%x-%x-%x",f_mod_no, - q->sbal[f_mod_no]->element[14].sbalf.value, - q->sbal[f_mod_no]->element[15].sbalf.value); - QDIO_DBF_TEXT3(1,trace,dbf_text); - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count); - - /* - * we increment the frontier, as this buffer - * was processed obviously - */ - atomic_dec(&q->number_of_buffers_used); - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_OUTPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - break; - - /* no new buffers */ - default: - QDIO_DBF_TEXT5(0,trace,"outpni"); - } -out: - return (q->first_to_check=f_mod_no); -} - -/* all buffers are processed */ -static int -qdio_is_outbound_q_done(struct qdio_q *q) -{ - int no_used; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"oqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"oqisdone"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - return (no_used==0); -} - -static int -qdio_has_outbound_q_moved(struct qdio_q *q) -{ - int i; - - i=qdio_get_outbound_buffer_frontier(q); - - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - QDIO_DBF_TEXT4(0,trace,"oqhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"oqhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -static void -qdio_kick_outbound_q(struct qdio_q *q) -{ - int result; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"kickoutq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!q->siga_out) - return; - - /* here's the story with cc=2 and busy bit set (thanks, Rick): - * VM's CP could present us cc=2 and busy bit set on SIGA-write - * during reconfiguration of their Guest LAN (only in HIPERS mode, - * QDIO mode is asynchronous -- cc=2 and busy bit there will take - * the queues down immediately; and not being under VM we have a - * problem on cc=2 and busy bit set right away). - * - * Therefore qdio_siga_output will try for a short time constantly, - * if such a condition occurs. If it doesn't change, it will - * increase the busy_siga_counter and save the timestamp, and - * schedule the queue for later processing (via mark_q, using the - * queue tasklet). __qdio_outbound_processing will check out the - * counter. If non-zero, it will call qdio_kick_outbound_q as often - * as the value of the counter. This will attempt further SIGA - * instructions. For each successful SIGA, the counter is - * decreased, for failing SIGAs the counter remains the same, after - * all. - * After some time of no movement, qdio_kick_outbound_q will - * finally fail and reflect corresponding error codes to call - * the upper layer module and have it take the queues down. - * - * Note that this is a change from the original HiperSockets design - * (saying cc=2 and busy bit means take the queues down), but in - * these days Guest LAN didn't exist... excessive cc=2 with busy bit - * conditions will still take the queues down, but the threshold is - * higher due to the Guest LAN environment. - */ - - - result=qdio_siga_output(q); - - switch (result) { - case 0: - /* went smooth this time, reset timestamp */ -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(0,trace,"cc2reslv"); - sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - q->timing.busy_start=0; - break; - case (2|QDIO_SIGA_ERROR_B_BIT_SET): - /* cc=2 and busy bit: */ - atomic_inc(&q->busy_siga_counter); - - /* if the last siga was successful, save - * timestamp here */ - if (!q->timing.busy_start) - q->timing.busy_start=NOW; - - /* if we're in time, don't touch error_status_flags - * and siga_error */ - if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { - qdio_mark_q(q); - break; - } - QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - /* else fallthrough and report error */ - default: - /* for plain cc=1, 2 or 3: */ - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|= - QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } -} - -static void -qdio_kick_outbound_handler(struct qdio_q *q) -{ - int start, end, real_end, count; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - start = q->first_element_to_kick; - /* last_move_ftc was just updated */ - real_end = GET_SAVED_FRONTIER(q); - end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)& - (QDIO_MAX_BUFFERS_PER_Q-1); - count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)& - (QDIO_MAX_BUFFERS_PER_Q-1); - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"kickouth"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (q->state==QDIO_IRQ_STATE_ACTIVE) - q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT| - q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; -} - -static void -__qdio_outbound_processing(struct qdio_q *q) -{ - int siga_attempts; - - QDIO_DBF_TEXT4(0,trace,"qoutproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched); - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } - qdio_perf_stat_inc(&perf_stats.outbound_tl_runs); - qdio_perf_stat_inc(&perf_stats.tl_runs); - - /* see comment in qdio_kick_outbound_q */ - siga_attempts=atomic_read(&q->busy_siga_counter); - while (siga_attempts) { - atomic_dec(&q->busy_siga_counter); - qdio_kick_outbound_q(q); - siga_attempts--; - } - - if (qdio_has_outbound_q_moved(q)) - qdio_kick_outbound_handler(q); - - if (q->queue_type == QDIO_ZFCP_QFMT) { - if ((!q->hydra_gives_outbound_pcis) && - (!qdio_is_outbound_q_done(q))) - qdio_mark_q(q); - } - else if (((!q->is_iqdio_q) && (!q->is_pci_out)) || - (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) { - /* - * make sure buffer switch from PRIMED to EMPTY is noticed - * and outbound_handler is called - */ - if (qdio_is_outbound_q_done(q)) { - del_timer(&q->timer); - } else { - if (!timer_pending(&q->timer)) - mod_timer(&q->timer, jiffies + - QDIO_FORCE_CHECK_TIMEOUT); - } - } - - qdio_release_q(q); -} - -static void -qdio_outbound_processing(unsigned long q) -{ - __qdio_outbound_processing((struct qdio_q *) q); -} - -/************************* INBOUND ROUTINES *******************************/ - - -static int -qdio_get_inbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - int f,f_mod_no; - volatile char *slsb; - unsigned int count = 1; - int first_not_to_check; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif /* CONFIG_QDIO_DEBUG */ -#ifdef QDIO_USE_PROCESSING_STATE - int last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"getibfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - irq = (struct qdio_irq *) q->irq_ptr; - if (irq->is_qebsm) - return qdio_qebsm_get_inbound_buffer_frontier(q); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved - * would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - /* - * we don't use this one, as a PCI or we after a thin interrupt - * will sync the queues - */ - /* SYNC_MEMORY;*/ - -check_next: - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - if (f==first_not_to_check) - goto out; - switch (slsb[f_mod_no]) { - - /* CU_EMPTY means frontier is reached */ - case SLSB_CU_INPUT_EMPTY: - QDIO_DBF_TEXT5(0,trace,"inptempt"); - break; - - /* P_PRIMED means set slsb to P_PROCESSING and move on */ - case SLSB_P_INPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"inptprim"); - -#ifdef QDIO_USE_PROCESSING_STATE - /* - * as soon as running under VM, polling the input queues will - * kill VM in terms of CP overhead - */ - if (q->siga_sync) { - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); - } else { - /* set the previous buffer to NOT_INIT. The current - * buffer will be set to PROCESSING at the end of - * this function to avoid further interrupts. */ - if (last_position>=0) - set_slsb(q, &last_position, - SLSB_P_INPUT_NOT_INIT, &count); - atomic_set(&q->polling,1); - last_position=f_mod_no; - } -#else /* QDIO_USE_PROCESSING_STATE */ - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); -#endif /* QDIO_USE_PROCESSING_STATE */ - /* - * not needed, as the inbound queue will be synced on the next - * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s - */ - /*SYNC_MEMORY;*/ - f++; - atomic_dec(&q->number_of_buffers_used); - goto check_next; - - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_PROCESSING: - QDIO_DBF_TEXT5(0,trace,"inpnipro"); - break; - - /* P_ERROR means frontier is reached, break and report error */ - case SLSB_P_INPUT_ERROR: -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"inperr%2x",f_mod_no); - QDIO_DBF_TEXT3(1,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_INPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - /* we increment the frontier, as this buffer - * was processed obviously */ - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - atomic_dec(&q->number_of_buffers_used); - -#ifdef QDIO_USE_PROCESSING_STATE - last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - break; - - /* everything else means frontier not changed (HALTED or so) */ - default: - break; - } -out: - q->first_to_check=f_mod_no; - -#ifdef QDIO_USE_PROCESSING_STATE - if (last_position>=0) - set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count); -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - - return q->first_to_check; -} - -static int -qdio_has_inbound_q_moved(struct qdio_q *q) -{ - int i; - - i=qdio_get_inbound_buffer_frontier(q); - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis)) - SAVE_TIMESTAMP(q); - - QDIO_DBF_TEXT4(0,trace,"inhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"inhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -/* means, no more buffers to be filled */ -static int -tiqdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; - unsigned int start_buf, count; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* propagate the change from 82 to 80 through VM */ - SYNC_MEMORY; - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"iqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"iniqisdo"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!no_used) - return 1; - if (irq->is_qebsm) { - count = 1; - start_buf = q->first_to_check; - qdio_do_eqbs(q, &state, &start_buf, &count); - } else - state = q->slsb.acc.val[q->first_to_check]; - if (state != SLSB_P_INPUT_PRIMED) - /* - * nothing more to do, if next buffer is not PRIMED. - * note that we did a SYNC_MEMORY before, that there - * has been a sychnronization. - * we will return 0 below, as there is nothing to do - * (stop_polling not necessary, as we have not been - * using the PROCESSING state - */ - return 0; - - /* - * ok, the next input buffer is primed. that means, that device state - * change indicator and adapter local summary are set, so we will find - * it next time. - * we will return 0 below, as there is nothing to do, except scheduling - * ourselves for the next time. - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; -} - -static int -qdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; - unsigned int start_buf, count; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* - * we need that one for synchronization with the adapter, as it - * does a kind of PCI avoidance - */ - SYNC_MEMORY; - - if (!no_used) { - QDIO_DBF_TEXT4(0,trace,"inqisdnA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } - if (irq->is_qebsm) { - count = 1; - start_buf = q->first_to_check; - qdio_do_eqbs(q, &state, &start_buf, &count); - } else - state = q->slsb.acc.val[q->first_to_check]; - if (state == SLSB_P_INPUT_PRIMED) { - /* we got something to do */ - QDIO_DBF_TEXT4(0,trace,"inqisntA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } - - /* on VM, we don't poll, so the q is always done here */ - if (q->siga_sync) - return 1; - if (q->hydra_gives_outbound_pcis) - return 1; - - /* - * at this point we know, that inbound first_to_check - * has (probably) not moved (see qdio_inbound_processing) - */ - if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisdon"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 1; - } else { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisntd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; - } -} - -static void -qdio_kick_inbound_handler(struct qdio_q *q) -{ - int count, start, end, real_end, i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - QDIO_DBF_TEXT4(0,trace,"kickinh"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - start=q->first_element_to_kick; - real_end=q->first_to_check; - end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - i=start; - count=0; - while (1) { - count++; - if (i==end) - break; - i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - } - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (likely(q->state==QDIO_IRQ_STATE_ACTIVE)) - q->handler(q->cdev, - QDIO_STATUS_INBOUND_INT|q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; - - qdio_perf_stat_inc(&perf_stats.inbound_cnt); -} - -static void -__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *oq; - int i; - - QDIO_DBF_TEXT4(0,trace,"iqinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* - * we first want to reserve the q, so that we know, that we don't - * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might - * be set - */ - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - tiqdio_sched_tl(); - return; - } - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs); - if (unlikely(atomic_read(&q->is_in_shutdown))) { - qdio_unmark_q(q); - goto out; - } - - /* - * we reset spare_ind_was_set, when the queue does not use the - * spare indicator - */ - if (spare_ind_was_set) - spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator); - - if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set) - goto out; - /* - * q->dev_st_chg_ind is the indicator, be it shared or not. - * only clear it, if indicator is non-shared - */ - if (q->dev_st_chg_ind != &spare_indicator) - tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); - - if (q->hydra_gives_outbound_pcis) { - if (!q->siga_sync_done_on_thinints) { - SYNC_MEMORY_ALL; - } else if (!q->siga_sync_done_on_outb_tis) { - SYNC_MEMORY_ALL_OUTB; - } - } else { - SYNC_MEMORY; - } - /* - * maybe we have to do work on our outbound queues... at least - * we have to check the outbound-int-capable thinint-capable - * queues - */ - if (q->hydra_gives_outbound_pcis) { - irq_ptr = (struct qdio_irq*)q->irq_ptr; - for (i=0;i<irq_ptr->no_output_qs;i++) { - oq = irq_ptr->output_qs[i]; - if (!qdio_is_outbound_q_done(oq)) { - qdio_perf_stat_dec(&perf_stats.tl_runs); - __qdio_outbound_processing(oq); - } - } - } - - if (!qdio_has_inbound_q_moved(q)) - goto out; - - qdio_kick_inbound_handler(q); - if (tiqdio_is_inbound_q_done(q)) - if (!qdio_stop_polling(q)) { - /* - * we set the flags to get into the stuff next time, - * see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - } -out: - qdio_release_q(q); -} - -static void -tiqdio_inbound_processing(unsigned long q) -{ - __tiqdio_inbound_processing((struct qdio_q *) q, - atomic_read(&spare_indicator_usecount)); -} - -static void -__qdio_inbound_processing(struct qdio_q *q) -{ - int q_laps=0; - - QDIO_DBF_TEXT4(0,trace,"qinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched); - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } - qdio_perf_stat_inc(&perf_stats.inbound_tl_runs); - qdio_perf_stat_inc(&perf_stats.tl_runs); - -again: - if (qdio_has_inbound_q_moved(q)) { - qdio_kick_inbound_handler(q); - if (!qdio_stop_polling(q)) { - q_laps++; - if (q_laps<QDIO_Q_LAPS) - goto again; - } - qdio_mark_q(q); - } else { - if (!qdio_is_inbound_q_done(q)) - /* means poll time is not yet over */ - qdio_mark_q(q); - } - - qdio_release_q(q); -} - -static void -qdio_inbound_processing(unsigned long q) -{ - __qdio_inbound_processing((struct qdio_q *) q); -} - -/************************* MAIN ROUTINES *******************************/ - -#ifdef QDIO_USE_PROCESSING_STATE -static int -tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) -{ - if (!q) { - tiqdio_sched_tl(); - return 0; - } - - /* - * under VM, we have not used the PROCESSING state, so no - * need to stop polling - */ - if (q->siga_sync) - return 2; - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - - /* - * sanity -- we'd get here without setting the - * dev st chg ind - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; - } - if (qdio_stop_polling(q)) { - qdio_release_q(q); - return 2; - } - if (q_laps<QDIO_Q_LAPS-1) { - qdio_release_q(q); - return 3; - } - /* - * we set the flags to get into the stuff - * next time, see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - qdio_release_q(q); - return 1; - -} -#endif /* QDIO_USE_PROCESSING_STATE */ - -static void -tiqdio_inbound_checks(void) -{ - struct qdio_q *q; - int spare_ind_was_set=0; -#ifdef QDIO_USE_PROCESSING_STATE - int q_laps=0; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"iqdinbck"); - QDIO_DBF_TEXT5(0,trace,"iqlocsum"); - -#ifdef QDIO_USE_PROCESSING_STATE -again: -#endif /* QDIO_USE_PROCESSING_STATE */ - - /* when the spare indicator is used and set, save that and clear it */ - if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) { - spare_ind_was_set = 1; - tiqdio_clear_summary_bit((__u32*)&spare_indicator); - } - - q=(struct qdio_q*)tiq_list; - do { - if (!q) - break; - __tiqdio_inbound_processing(q, spare_ind_was_set); - q=(struct qdio_q*)q->list_next; - } while (q!=(struct qdio_q*)tiq_list); - -#ifdef QDIO_USE_PROCESSING_STATE - q=(struct qdio_q*)tiq_list; - do { - int ret; - - ret = tiqdio_reset_processing_state(q, q_laps); - switch (ret) { - case 0: - return; - case 1: - q_laps++; - case 2: - q = (struct qdio_q*)q->list_next; - break; - default: - q_laps++; - goto again; - } - } while (q!=(struct qdio_q*)tiq_list); -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -static void -tiqdio_tl(unsigned long data) -{ - QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); - - qdio_perf_stat_inc(&perf_stats.tl_runs); - - tiqdio_inbound_checks(); -} - -/********************* GENERAL HELPER_ROUTINES ***********************/ - -static void -qdio_release_irq_memory(struct qdio_irq *irq_ptr) -{ - int i; - struct qdio_q *q; - - for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { - q = irq_ptr->input_qs[i]; - if (q) { - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); - } - q = irq_ptr->output_qs[i]; - if (q) { - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); - } - } - free_page((unsigned long) irq_ptr->qdr); - free_page((unsigned long) irq_ptr); -} - -static void -qdio_set_impl_params(struct qdio_irq *irq_ptr, - unsigned int qib_param_field_format, - /* pointer to 128 bytes or NULL, if no param field */ - unsigned char *qib_param_field, - /* pointer to no_queues*128 words of data or NULL */ - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned long *input_slib_elements, - unsigned long *output_slib_elements) -{ - int i,j; - - if (!irq_ptr) - return; - - irq_ptr->qib.pfmt=qib_param_field_format; - if (qib_param_field) - memcpy(irq_ptr->qib.parm,qib_param_field, - QDIO_MAX_BUFFERS_PER_Q); - - if (input_slib_elements) - for (i=0;i<no_input_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->input_qs[i]->slib->slibe[j].parms= - input_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } - if (output_slib_elements) - for (i=0;i<no_output_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->output_qs[i]->slib->slibe[j].parms= - output_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } -} - -static int -qdio_alloc_qs(struct qdio_irq *irq_ptr, - int no_input_qs, int no_output_qs) -{ - int i; - struct qdio_q *q; - - for (i = 0; i < no_input_qs; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); - if (!q) - return -ENOMEM; - memset(q, 0, sizeof(*q)); - - q->slib = (struct slib *) __get_free_page(GFP_KERNEL); - if (!q->slib) { - kmem_cache_free(qdio_q_cache, q); - return -ENOMEM; - } - irq_ptr->input_qs[i]=q; - } - - for (i = 0; i < no_output_qs; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); - if (!q) - return -ENOMEM; - memset(q, 0, sizeof(*q)); - - q->slib = (struct slib *) __get_free_page(GFP_KERNEL); - if (!q->slib) { - kmem_cache_free(qdio_q_cache, q); - return -ENOMEM; - } - irq_ptr->output_qs[i]=q; - } - return 0; -} - -static void -qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, - int no_input_qs, int no_output_qs, - qdio_handler_t *input_handler, - qdio_handler_t *output_handler, - unsigned long int_parm,int q_format, - unsigned long flags, - void **inbound_sbals_array, - void **outbound_sbals_array) -{ - struct qdio_q *q; - int i,j; - char dbf_text[20]; /* see qdio_initialize */ - void *ptr; - int available; - - sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - - memset(q,0,((char*)&q->slib)-((char*)q)); - sprintf(dbf_text,"in-q%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(inbound_sbals_array++); - - q->queue_type=q_format; - q->int_parm=int_parm; - q->schid = irq_ptr->schid; - q->irq_ptr = irq_ptr; - q->cdev = cdev; - q->mask=1<<(31-i); - q->q_no=i; - q->is_input_q=1; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=input_handler; - q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind; - - /* q->is_thinint_q isn't valid at this time, but - * irq_ptr->is_thinint_irq is - */ - if (irq_ptr->is_thinint_irq) - tasklet_init(&q->tasklet, tiqdio_inbound_processing, - (unsigned long) q); - else - tasklet_init(&q->tasklet, qdio_inbound_processing, - (unsigned long) q); - - /* actually this is not used for inbound queues. yet. */ - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - -/* for (j=0;j<QDIO_STATS_NUMBER;j++) - q->timing.last_transfer_times[j]=(qdio_get_micros()/ - QDIO_STATS_NUMBER)*j; - q->timing.last_transfer_index=QDIO_STATS_NUMBER-1; -*/ - - /* fill in slib */ - if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - if (!irq_ptr->is_qebsm) { - unsigned int count = 1; - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count); - } - } - - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - memset(q,0,((char*)&q->slib)-((char*)q)); - - sprintf(dbf_text,"outq%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(outbound_sbals_array++); - - q->queue_type=q_format; - if ((q->queue_type == QDIO_IQDIO_QFMT) && - (no_output_qs > 1) && - (i == no_output_qs-1)) - q->queue_type = QDIO_IQDIO_QFMT_ASYNCH; - q->int_parm=int_parm; - q->is_input_q=0; - q->is_pci_out = 0; - q->schid = irq_ptr->schid; - q->cdev = cdev; - q->irq_ptr = irq_ptr; - q->mask=1<<(31-i); - q->q_no=i; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=output_handler; - - tasklet_init(&q->tasklet, qdio_outbound_processing, - (unsigned long) q); - setup_timer(&q->timer, qdio_outbound_processing, - (unsigned long) q); - - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - - /* fill in slib */ - if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - if (!irq_ptr->is_qebsm) { - unsigned int count = 1; - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count); - } - } -} - -static void -qdio_fill_thresholds(struct qdio_irq *irq_ptr, - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned int min_input_threshold, - unsigned int max_input_threshold, - unsigned int min_output_threshold, - unsigned int max_output_threshold) -{ - int i; - struct qdio_q *q; - - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - q->timing.threshold=max_input_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_input_threshold+ - (max_input_threshold-min_input_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - q->timing.threshold=max_output_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_output_threshold+ - (max_output_threshold-min_output_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } -} - -static void tiqdio_thinint_handler(void *ind, void *drv_data) -{ - QDIO_DBF_TEXT4(0,trace,"thin_int"); - - qdio_perf_stat_inc(&perf_stats.thinints); - - /* SVS only when needed: - * issue SVS to benefit from iqdio interrupt avoidance - * (SVS clears AISOI)*/ - if (!omit_svs) - tiqdio_clear_global_summary(); - - tiqdio_inbound_checks(); -} - -static void -qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) -{ - int i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT5(0,trace,"newstate"); - sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state); - QDIO_DBF_TEXT5(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - irq_ptr->state=state; - for (i=0;i<irq_ptr->no_input_qs;i++) - irq_ptr->input_qs[i]->state=state; - for (i=0;i<irq_ptr->no_output_qs;i++) - irq_ptr->output_qs[i]->state=state; - mb(); -} - -static void -qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) -{ - char dbf_text[15]; - - if (irb->esw.esw0.erw.cons) { - sprintf(dbf_text,"sens%4x",schid.sch_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); - - QDIO_PRINT_WARN("sense data available on qdio channel.\n"); - QDIO_HEXDUMP16(WARN,"irb: ",irb); - QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw); - } - -} - -static void -qdio_handle_pci(struct qdio_irq *irq_ptr) -{ - int i; - struct qdio_q *q; - - qdio_perf_stat_inc(&perf_stats.pcis); - for (i=0;i<irq_ptr->no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) - qdio_mark_q(q); - else { - qdio_perf_stat_dec(&perf_stats.tl_runs); - __qdio_inbound_processing(q); - } - } - if (!irq_ptr->hydra_gives_outbound_pcis) - return; - for (i=0;i<irq_ptr->no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - if (qdio_is_outbound_q_done(q)) - continue; - qdio_perf_stat_dec(&perf_stats.tl_runs); - if (!irq_ptr->sync_done_on_outb_pcis) - SYNC_MEMORY; - __qdio_outbound_processing(q); - } -} - -static void qdio_establish_handle_irq(struct ccw_device*, int, int); - -static void -qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, - int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *q; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - QDIO_DBF_TEXT2(1, trace, "ick2"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on activate " \ - "queues on device %s (cs=x%x, ds=x%x).\n", - cdev->dev.bus_id, cstat, dstat); - if (irq_ptr->no_input_qs) { - q=irq_ptr->input_qs[0]; - } else if (irq_ptr->no_output_qs) { - q=irq_ptr->output_qs[0]; - } else { - QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n", - cdev->dev.bus_id); - goto omit_handler_call; - } - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0,0,0,-1,-1,q->int_parm); -omit_handler_call: - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED); - -} - -static void -qdio_call_shutdown(struct work_struct *work) -{ - struct ccw_device_private *priv; - struct ccw_device *cdev; - - priv = container_of(work, struct ccw_device_private, kick_work); - cdev = priv->cdev; - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - put_device(&cdev->dev); -} - -static void -qdio_timeout_handler(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - QDIO_DBF_TEXT2(0, trace, "qtoh"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - irq_ptr = cdev->private->qdio_data; - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1,setup,"eq:timeo"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_CLEANUP: - QDIO_PRINT_INFO("Did not get interrupt on cleanup, " - "irq=0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - /* I/O has been terminated by common I/O layer. */ - QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1, trace, "cio:term"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - if (get_device(&cdev->dev)) { - /* Can't call shutdown from interrupt context. */ - PREPARE_WORK(&cdev->private->kick_work, - qdio_call_shutdown); - queue_work(ccw_device_work, &cdev->private->kick_work); - } - break; - default: - BUG(); - } - wake_up(&cdev->private->wait_q); -} - -static void -qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) -{ - struct qdio_irq *irq_ptr; - int cstat,dstat; - char dbf_text[15]; - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0, trace, "qint"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!intparm) { - QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \ - "handler, device %s\n", cdev->dev.bus_id); - return; - } - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) { - QDIO_DBF_TEXT2(1, trace, "uint"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_PRINT_ERR("received interrupt on unused device %s!\n", - cdev->dev.bus_id); - return; - } - - if (IS_ERR(irb)) { - /* Currently running i/o is in error. */ - switch (PTR_ERR(irb)) { - case -EIO: - QDIO_PRINT_ERR("i/o error on device %s\n", - cdev->dev.bus_id); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - wake_up(&cdev->private->wait_q); - return; - case -ETIMEDOUT: - qdio_timeout_handler(cdev); - return; - default: - QDIO_PRINT_ERR("unknown error state %ld on device %s\n", - PTR_ERR(irb), cdev->dev.bus_id); - return; - } - } - - qdio_irq_check_sense(irq_ptr->schid, irb); - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - qdio_establish_handle_irq(cdev, cstat, dstat); - break; - - case QDIO_IRQ_STATE_CLEANUP: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - break; - - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - if (cstat & SCHN_STAT_PCI) { - qdio_handle_pci(irq_ptr); - break; - } - - if ((cstat&~SCHN_STAT_PCI)||dstat) { - qdio_handle_activate_check(cdev, intparm, cstat, dstat); - break; - } - default: - QDIO_PRINT_ERR("got interrupt for queues in state %d on " \ - "device %s?!\n", - irq_ptr->state, cdev->dev.bus_id); - } - wake_up(&cdev->private->wait_q); - -} - -int -qdio_synchronize(struct ccw_device *cdev, unsigned int flags, - unsigned int queue_number) -{ - int cc = 0; - struct qdio_q *q; - struct qdio_irq *irq_ptr; - void *ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]="SyncXXXX"; -#endif - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); - *((int*)(&dbf_text[0]))=flags; - *((int*)(&dbf_text[4]))=queue_number; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); -#endif /* CONFIG_QDIO_DEBUG */ - - if (flags&QDIO_FLAG_SYNC_INPUT) { - q=irq_ptr->input_qs[queue_number]; - if (!q) - return -EINVAL; - if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(q->schid, 0, q->mask); - } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { - q=irq_ptr->output_qs[queue_number]; - if (!q) - return -EINVAL; - if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(q->schid, q->mask, 0); - } else - return -EINVAL; - - ptr=&cc; - if (cc) - QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int)); - - return cc; -} - -static int -qdio_get_ssqd_information(struct subchannel_id *schid, - struct qdio_chsc_ssqd **ssqd_area) -{ - int result; - - QDIO_DBF_TEXT0(0, setup, "getssqd"); - *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!ssqd_area) { - QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n", - schid->sch_no); - return -ENOMEM; - } - - (*ssqd_area)->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0024, - }; - (*ssqd_area)->first_sch = schid->sch_no; - (*ssqd_area)->last_sch = schid->sch_no; - (*ssqd_area)->ssid = schid->ssid; - result = chsc(*ssqd_area); - - if (result) { - QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n", - result, schid->ssid, schid->sch_no); - goto out; - } - - if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n", - (*ssqd_area)->response.code, - schid->ssid, schid->sch_no); - goto out; - } - if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) || - !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) || - ((*ssqd_area)->sch != schid->sch_no)) { - QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \ - "using all SIGAs.\n", - schid->ssid, schid->sch_no); - goto out; - } - return 0; -out: - return -EINVAL; -} - -int -qdio_get_ssqd_pct(struct ccw_device *cdev) -{ - struct qdio_chsc_ssqd *ssqd_area; - struct subchannel_id schid; - char dbf_text[15]; - int rc; - int pct = 0; - - QDIO_DBF_TEXT0(0, setup, "getpct"); - schid = ccw_device_get_subchannel_id(cdev); - rc = qdio_get_ssqd_information(&schid, &ssqd_area); - if (!rc) - pct = (int)ssqd_area->pct; - if (rc != -ENOMEM) - mempool_free(ssqd_area, qdio_mempool_scssc); - sprintf(dbf_text, "pct: %d", pct); - QDIO_DBF_TEXT2(0, setup, dbf_text); - return pct; -} -EXPORT_SYMBOL(qdio_get_ssqd_pct); - -static void -qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token) -{ - struct qdio_q *q; - int i; - unsigned int count, start_buf; - char dbf_text[15]; - - /*check if QEBSM is disabled */ - if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) { - irq_ptr->is_qebsm = 0; - irq_ptr->sch_token = 0; - irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; - QDIO_DBF_TEXT0(0,setup,"noV=V"); - return; - } - irq_ptr->sch_token = token; - /*input queue*/ - for (i = 0; i < irq_ptr->no_input_qs;i++) { - q = irq_ptr->input_qs[i]; - count = QDIO_MAX_BUFFERS_PER_Q; - start_buf = 0; - set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count); - } - sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"%8lx",irq_ptr->sch_token); - QDIO_DBF_TEXT0(0,setup,dbf_text); - /*output queue*/ - for (i = 0; i < irq_ptr->no_output_qs; i++) { - q = irq_ptr->output_qs[i]; - count = QDIO_MAX_BUFFERS_PER_Q; - start_buf = 0; - set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count); - } -} - -static void -qdio_get_ssqd_siga(struct qdio_irq *irq_ptr) -{ - int rc; - struct qdio_chsc_ssqd *ssqd_area; - - QDIO_DBF_TEXT0(0,setup,"getssqd"); - irq_ptr->qdioac = 0; - rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area); - if (rc) { - QDIO_PRINT_WARN("using all SIGAs for sch x%x.n", - irq_ptr->schid.sch_no); - irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | - CHSC_FLAG_SIGA_OUTPUT_NECESSARY | - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ - irq_ptr->is_qebsm = 0; - } else - irq_ptr->qdioac = ssqd_area->qdioac1; - - qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token); - if (rc != -ENOMEM) - mempool_free(ssqd_area, qdio_mempool_scssc); -} - -static unsigned int -tiqdio_check_chsc_availability(void) -{ - char dbf_text[15]; - - /* Check for bit 41. */ - if (!css_general_characteristics.aif) { - QDIO_PRINT_WARN("Adapter interruption facility not " \ - "installed.\n"); - return -ENOENT; - } - - /* Check for bits 107 and 108. */ - if (!css_chsc_characteristics.scssc || - !css_chsc_characteristics.scsscf) { - QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \ - "not available.\n"); - return -ENOENT; - } - - /* Check for OSA/FCP thin interrupts (bit 67). */ - hydra_thinints = css_general_characteristics.aif_osa; - sprintf(dbf_text,"hydrati%1x", hydra_thinints); - QDIO_DBF_TEXT0(0,setup,dbf_text); - -#ifdef CONFIG_64BIT - /* Check for QEBSM support in general (bit 58). */ - is_passthrough = css_general_characteristics.qebsm; -#endif - sprintf(dbf_text,"cssQBS:%1x", is_passthrough); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* Check for aif time delay disablement fac (bit 56). If installed, - * omit svs even under lpar (good point by rick again) */ - omit_svs = css_general_characteristics.aif_tdd; - sprintf(dbf_text,"omitsvs%1x", omit_svs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - return 0; -} - - -static unsigned int -tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) -{ - unsigned long real_addr_local_summary_bit; - unsigned long real_addr_dev_st_chg_ind; - void *ptr; - char dbf_text[15]; - - unsigned int resp_code; - int result; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u64 summary_indicator_addr; - u64 subchannel_indicator_addr; - u32 ks:4; - u32 kc:4; - u32 reserved4:21; - u32 isc:3; - u32 word_with_d_bit; - /* set to 0x10000000 to enable - * time delay disablement facility */ - u32 reserved5; - struct subchannel_id schid; - u32 reserved6[1004]; - struct chsc_header response; - u32 reserved7; - } *scssc_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - if (reset_to_zero) { - real_addr_local_summary_bit=0; - real_addr_dev_st_chg_ind=0; - } else { - real_addr_local_summary_bit= - virt_to_phys((volatile void *)tiqdio_ind); - real_addr_dev_st_chg_ind= - virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); - } - - scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!scssc_area) { - QDIO_PRINT_WARN("No memory for setting indicators on " \ - "subchannel 0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - return -ENOMEM; - } - scssc_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x0021, - }; - scssc_area->operation_code = 0; - - scssc_area->summary_indicator_addr = real_addr_local_summary_bit; - scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind; - scssc_area->ks = QDIO_STORAGE_KEY; - scssc_area->kc = QDIO_STORAGE_KEY; - scssc_area->isc = TIQDIO_THININT_ISC; - scssc_area->schid = irq_ptr->schid; - /* enables the time delay disablement facility. Don't care - * whether it is really there (i.e. we haven't checked for - * it) */ - if (css_general_characteristics.aif_tdd) - scssc_area->word_with_d_bit = 0x10000000; - else - QDIO_PRINT_WARN("Time delay disablement facility " \ - "not available\n"); - - result = chsc(scssc_area); - if (result) { - QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \ - "cc=%i.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result); - result = -EIO; - goto out; - } - - resp_code = scssc_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting indicators " \ - "is 0x%x.\n",resp_code); - sprintf(dbf_text,"sidR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scssc_area->response; - QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN); - result = -EIO; - goto out; - } - - QDIO_DBF_TEXT2(0,setup,"setscind"); - QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit, - sizeof(unsigned long)); - QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long)); - result = 0; -out: - mempool_free(scssc_area, qdio_mempool_scssc); - return result; - -} - -static unsigned int -tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) -{ - unsigned int resp_code; - int result; - void *ptr; - char dbf_text[15]; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u32 reserved4[2]; - u32 delay_target; - u32 reserved5[1009]; - struct chsc_header response; - u32 reserved6; - } *scsscf_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!scsscf_area) { - QDIO_PRINT_WARN("No memory for setting delay target on " \ - "subchannel 0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - return -ENOMEM; - } - scsscf_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x1027, - }; - - scsscf_area->delay_target = delay_target<<16; - - result=chsc(scsscf_area); - if (result) { - QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \ - "cc=%i. Continuing.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result); - result = -EIO; - goto out; - } - - resp_code = scsscf_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting delay target " \ - "is 0x%x. Continuing.\n",resp_code); - sprintf(dbf_text,"sdtR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scsscf_area->response; - QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN); - } - QDIO_DBF_TEXT2(0,trace,"delytrgt"); - QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long)); - result = 0; /* not critical */ -out: - mempool_free(scsscf_area, qdio_mempool_scssc); - return result; -} - -int -qdio_cleanup(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - int rc; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - rc = qdio_shutdown(cdev, how); - if ((rc == 0) || (rc == -EINPROGRESS)) - rc = qdio_free(cdev); - return rc; -} - -int -qdio_shutdown(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - int i; - int result = 0; - int rc; - unsigned long flags; - int timeout; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* mark all qs as uninteresting */ - for (i=0;i<irq_ptr->no_input_qs;i++) - atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1); - - for (i=0;i<irq_ptr->no_output_qs;i++) - atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1); - - tasklet_kill(&tiqdio_tasklet); - - for (i=0;i<irq_ptr->no_input_qs;i++) { - qdio_unmark_q(irq_ptr->input_qs[i]); - tasklet_kill(&irq_ptr->input_qs[i]->tasklet); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - input_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->input_qs[i]->use_count)) - result=-EINPROGRESS; - } - - for (i=0;i<irq_ptr->no_output_qs;i++) { - tasklet_kill(&irq_ptr->output_qs[i]->tasklet); - del_timer(&irq_ptr->output_qs[i]->timer); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - output_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->output_qs[i]->use_count)) - result=-EINPROGRESS; - } - - /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev),flags); - if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) { - rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_CLEAR_TIMEOUT; - } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) { - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } else { /* default behaviour */ - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } - if (rc == -ENODEV) { - /* No need to wait for device no longer present. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) { - /* - * Whoever put another handler there, has to cope with the - * interrupt theirself. Might happen if qdio_shutdown was - * called on already shutdown queues, but this shouldn't have - * bad side effects. - */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (rc == 0) { - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); - - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || - irq_ptr->state == QDIO_IRQ_STATE_ERR, - timeout); - } else { - QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " - "device %s\n", result, cdev->dev.bus_id); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - result = rc; - goto out; - } - if (irq_ptr->is_thinint_irq) { - qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind); - tiqdio_set_subchannel_ind(irq_ptr,1); - /* reset adapter interrupt indicators */ - } - - /* exchange int handlers, if necessary */ - if ((void*)cdev->handler == (void*)qdio_handler) - cdev->handler=irq_ptr->original_int_handler; - - /* Ignore errors. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); -out: - up(&irq_ptr->setting_up_sema); - return result; -} - -int -qdio_free(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - cdev->private->qdio_data = NULL; - - up(&irq_ptr->setting_up_sema); - - qdio_release_irq_memory(irq_ptr); - module_put(THIS_MODULE); - return 0; -} - -static void -qdio_allocate_do_dbf(struct qdio_initialize *init_data) -{ - char dbf_text[20]; /* if a printf printed out more than 8 chars */ - - sprintf(dbf_text,"qfmt:%x",init_data->q_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8); - sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*)); - QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*)); - sprintf(dbf_text,"miit%4x",init_data->min_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"mait%4x",init_data->max_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"miot%4x",init_data->min_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"maot%4x",init_data->max_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"niq:%4x",init_data->no_input_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"noq:%4x",init_data->no_output_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); -} - -static void -qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) -{ - irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib); - - irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl); - - irq_ptr->qdr->qdf0[i].slsba= - (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; -} - -static void -qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, - int j, int iqfmt) -{ - irq_ptr->output_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib); - - irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl); - - irq_ptr->qdr->qdf0[i+j].slsba= - (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY; -} - - -static void -qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->input_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->input_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static void -qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_output_qs;i++) { - irq_ptr->output_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->output_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->output_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static int -qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, - int dstat) -{ - char dbf_text[15]; - struct qdio_irq *irq_ptr; - - irq_ptr = cdev->private->qdio_data; - - if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { - sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on establish " \ - "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - cstat,dstat); - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); - } - - if (!(dstat & DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:no de"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get " - "device end: dstat=%02x, cstat=%02x\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - - if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:badio"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got " - "the following devstat: dstat=%02x, " - "cstat=%02x\n", irq_ptr->schid.ssid, - irq_ptr->schid.sch_no, dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - return 0; -} - -static void -qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) - return; - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); -} - -int -qdio_initialize(struct qdio_initialize *init_data) -{ - int rc; - char dbf_text[15]; - - sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - rc = qdio_allocate(init_data); - if (rc == 0) { - rc = qdio_establish(init_data); - if (rc != 0) - qdio_free(init_data->cdev); - } - - return rc; -} - - -int -qdio_allocate(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || - (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) || - ((init_data->no_input_qs) && (!init_data->input_handler)) || - ((init_data->no_output_qs) && (!init_data->output_handler)) ) - return -EINVAL; - - if (!init_data->input_sbal_addr_array) - return -EINVAL; - - if (!init_data->output_sbal_addr_array) - return -EINVAL; - - qdio_allocate_do_dbf(init_data); - - /* create irq */ - irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - - QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); - - if (!irq_ptr) { - QDIO_PRINT_ERR("allocation of irq_ptr failed!\n"); - return -ENOMEM; - } - - init_MUTEX(&irq_ptr->setting_up_sema); - - /* QDR must be in DMA area since CCW data address is only 32 bit */ - irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA); - if (!(irq_ptr->qdr)) { - free_page((unsigned long) irq_ptr); - QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n"); - return -ENOMEM; - } - QDIO_DBF_TEXT0(0,setup,"qdr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*)); - - if (qdio_alloc_qs(irq_ptr, - init_data->no_input_qs, - init_data->no_output_qs)) { - QDIO_PRINT_ERR("queue allocation failed!\n"); - qdio_release_irq_memory(irq_ptr); - return -ENOMEM; - } - - init_data->cdev->private->qdio_data = irq_ptr; - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE); - - return 0; -} - -static int qdio_fill_irq(struct qdio_initialize *init_data) -{ - int i; - char dbf_text[15]; - struct ciw *ciw; - int is_iqdio; - struct qdio_irq *irq_ptr; - - irq_ptr = init_data->cdev->private->qdio_data; - - memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr)); - - /* wipes qib.ac, required by ar7063 */ - memset(irq_ptr->qdr,0,sizeof(struct qdr)); - - irq_ptr->int_parm=init_data->int_parm; - - irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); - irq_ptr->no_input_qs=init_data->no_input_qs; - irq_ptr->no_output_qs=init_data->no_output_qs; - - if (init_data->q_format==QDIO_IQDIO_QFMT) { - irq_ptr->is_iqdio_irq=1; - irq_ptr->is_thinint_irq=1; - } else { - irq_ptr->is_iqdio_irq=0; - irq_ptr->is_thinint_irq=hydra_thinints; - } - sprintf(dbf_text,"is_i_t%1x%1x", - irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - if (irq_ptr->is_thinint_irq) { - irq_ptr->dev_st_chg_ind = qdio_get_indicator(); - QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); - if (!irq_ptr->dev_st_chg_ind) { - QDIO_PRINT_WARN("no indicator location available " \ - "for irq 0.%x.%x\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdio_release_irq_memory(irq_ptr); - return -ENOBUFS; - } - } - - /* defaults */ - irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD; - irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT; - irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD; - irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT; - - qdio_fill_qs(irq_ptr, init_data->cdev, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_handler, - init_data->output_handler,init_data->int_parm, - init_data->q_format,init_data->flags, - init_data->input_sbal_addr_array, - init_data->output_sbal_addr_array); - - if (!try_module_get(THIS_MODULE)) { - QDIO_PRINT_CRIT("try_module_get() failed!\n"); - qdio_release_irq_memory(irq_ptr); - return -EINVAL; - } - - qdio_fill_thresholds(irq_ptr,init_data->no_input_qs, - init_data->no_output_qs, - init_data->min_input_threshold, - init_data->max_input_threshold, - init_data->min_output_threshold, - init_data->max_output_threshold); - - /* fill in qdr */ - irq_ptr->qdr->qfmt=init_data->q_format; - irq_ptr->qdr->iqdcnt=init_data->no_input_qs; - irq_ptr->qdr->oqdcnt=init_data->no_output_qs; - irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */ - irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4; - - irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib; - irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; - - /* fill in qib */ - irq_ptr->is_qebsm = is_passthrough; - if (irq_ptr->is_qebsm) - irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; - - irq_ptr->qib.qfmt=init_data->q_format; - if (init_data->no_input_qs) - irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); - if (init_data->no_output_qs) - irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib); - memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8); - - qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format, - init_data->qib_param_field, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_slib_elements, - init_data->output_slib_elements); - - /* first input descriptors, then output descriptors */ - is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0; - for (i=0;i<init_data->no_input_qs;i++) - qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio); - - for (i=0;i<init_data->no_output_qs;i++) - qdio_allocate_fill_output_desc(irq_ptr, i, - init_data->no_input_qs, - is_iqdio); - - /* qdr, qib, sls, slsbs, slibs, sbales filled. */ - - /* get qdio commands */ - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no eq"); - QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->equeue = *ciw; - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no aq"); - QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->aqueue = *ciw; - - /* Set new interrupt handler. */ - irq_ptr->original_int_handler = init_data->cdev->handler; - init_data->cdev->handler = qdio_handler; - - return 0; -} - -int -qdio_establish(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - unsigned long saveflags; - int result, result2; - struct ccw_device *cdev; - char dbf_text[20]; - - cdev=init_data->cdev; - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -EINVAL; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - - qdio_fill_irq(init_data); - - /* the thinint CHSC stuff */ - if (irq_ptr->is_thinint_irq) { - - result = tiqdio_set_subchannel_ind(irq_ptr,0); - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); - } - - sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - /* establish q */ - irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->equeue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_options_mask(cdev, 0); - result = ccw_device_start(cdev, &irq_ptr->ccw, - QDIO_DOING_ESTABLISH, 0, 0); - if (result) { - result2 = ccw_device_start(cdev, &irq_ptr->ccw, - QDIO_DOING_ESTABLISH, 0, 0); - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result, result2); - result=result2; - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || - irq_ptr->state == QDIO_IRQ_STATE_ERR, - QDIO_ESTABLISH_TIMEOUT); - - if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED) - result = 0; - else { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return -EIO; - } - - qdio_get_ssqd_siga(irq_ptr); - /* if this gets set once, we're running under VM and can omit SVSes */ - if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) - omit_svs=1; - - sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - irq_ptr->hydra_gives_outbound_pcis= - irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED; - irq_ptr->sync_done_on_outb_pcis= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS; - - qdio_initialize_set_siga_flags_input(irq_ptr); - qdio_initialize_set_siga_flags_output(irq_ptr); - - up(&irq_ptr->setting_up_sema); - - return result; - -} - -int -qdio_activate(struct ccw_device *cdev, int flags) -{ - struct qdio_irq *irq_ptr; - int i,result=0,result2; - unsigned long saveflags; - char dbf_text[20]; /* see qdio_initialize */ - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) { - result=-EBUSY; - goto out; - } - - sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(0,setup,dbf_text); - QDIO_DBF_TEXT2(0,trace,dbf_text); - - /* activate q */ - irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->aqueue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(0); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); - result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, - 0, DOIO_DENY_PREFETCH); - if (result) { - result2=ccw_device_start(cdev,&irq_ptr->ccw, - QDIO_DOING_ACTIVATE,0,0); - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result, result2); - result=result2; - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - if (result) - goto out; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - if (irq_ptr->is_thinint_irq) { - /* - * that way we know, that, if we will get interrupted - * by tiqdio_inbound_processing, qdio_unmark_q will - * not be called - */ - qdio_reserve_q(irq_ptr->input_qs[i]); - qdio_mark_tiq(irq_ptr->input_qs[i]); - qdio_release_q(irq_ptr->input_qs[i]); - } - } - - if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) { - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->is_input_q|= - QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT; - } - } - - msleep(QDIO_ACTIVATE_TIMEOUT); - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_STOPPED: - case QDIO_IRQ_STATE_ERR: - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - down(&irq_ptr->setting_up_sema); - result = -EIO; - break; - default: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); - result = 0; - } - out: - up(&irq_ptr->setting_up_sema); - - return result; -} - -/* buffers filled forwards again to make Rick happy */ -static void -qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - int tmp = 0; - - qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); - if (irq->is_qebsm) { - while (count) { - tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); - if (!tmp) - return; - } - return; - } - for (;;) { - set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); - count--; - if (!count) break; - qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); - } -} - -static void -qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - int tmp = 0; - - qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); - if (irq->is_qebsm) { - while (count) { - tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); - if (!tmp) - return; - } - return; - } - - for (;;) { - set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); - count--; - if (!count) break; - qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); - } -} - -static void -do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - - /* This is the inbound handling of queues */ - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - qdio_do_qdio_fill_input(q,qidx,count,buffers); - - if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&& - (callflags&QDIO_FLAG_UNDER_INTERRUPT)) - atomic_xchg(&q->polling,0); - - if (used_elements) - return; - if (callflags&QDIO_FLAG_DONT_SIGA) - return; - if (q->siga_in) { - int result; - - result=qdio_siga_input(q); - if (result) { - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } - } - - qdio_mark_q(q); -} - -static void -do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - unsigned int cnt, start_buf; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - - /* This is the outbound handling of queues */ - qdio_do_qdio_fill_output(q,qidx,count,buffers); - - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - if (callflags&QDIO_FLAG_DONT_SIGA) { - qdio_perf_stat_inc(&perf_stats.outbound_cnt); - return; - } - if (callflags & QDIO_FLAG_PCI_OUT) - q->is_pci_out = 1; - else - q->is_pci_out = 0; - if (q->is_iqdio_q) { - /* one siga for every sbal */ - while (count--) - qdio_kick_outbound_q(q); - - __qdio_outbound_processing(q); - } else { - /* under VM, we do a SIGA sync unconditionally */ - SYNC_MEMORY; - else { - /* - * w/o shadow queues (else branch of - * SYNC_MEMORY :-/ ), we try to - * fast-requeue buffers - */ - if (irq->is_qebsm) { - cnt = 1; - start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) & - (QDIO_MAX_BUFFERS_PER_Q-1)); - qdio_do_eqbs(q, &state, &start_buf, &cnt); - } else - state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) - &(QDIO_MAX_BUFFERS_PER_Q-1) ]; - if (state != SLSB_CU_OUTPUT_PRIMED) { - qdio_kick_outbound_q(q); - } else { - QDIO_DBF_TEXT3(0,trace, "fast-req"); - qdio_perf_stat_inc(&perf_stats.fast_reqs); - } - } - /* - * only marking the q could take too long, - * the upper layer module could do a lot of - * traffic in that time - */ - __qdio_outbound_processing(q); - } - - qdio_perf_stat_inc(&perf_stats.outbound_cnt); -} - -/* count must be 1 in iqdio */ -int -do_QDIO(struct ccw_device *cdev,unsigned int callflags, - unsigned int queue_number, unsigned int qidx, - unsigned int count,struct qdio_buffer *buffers) -{ - struct qdio_irq *irq_ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[20]; - - sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) || - (count>QDIO_MAX_BUFFERS_PER_Q) || - (queue_number>QDIO_MAX_QUEUES_PER_IRQ) ) - return -EINVAL; - - if (count==0) - return 0; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - if (callflags&QDIO_FLAG_SYNC_INPUT) - QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number], - sizeof(void*)); - else - QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number], - sizeof(void*)); - sprintf(dbf_text,"flag%04x",callflags); - QDIO_DBF_TEXT3(0,trace,dbf_text); - sprintf(dbf_text,"qi%02xct%02x",qidx,count); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE) - return -EBUSY; - - if (callflags&QDIO_FLAG_SYNC_INPUT) - do_qdio_handle_inbound(irq_ptr->input_qs[queue_number], - callflags, qidx, count, buffers); - else if (callflags&QDIO_FLAG_SYNC_OUTPUT) - do_qdio_handle_outbound(irq_ptr->output_qs[queue_number], - callflags, qidx, count, buffers); - else { - QDIO_DBF_TEXT3(1,trace,"doQD:inv"); - return -EINVAL; - } - return 0; -} - -static int -qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, - int buffer_length, int *eof, void *data) -{ - int c=0; - - /* we are always called with buffer_length=4k, so we all - deliver on the first read */ - if (offset>0) - return 0; - -#define _OUTP_IT(x...) c+=sprintf(buffer+c,x) -#ifdef CONFIG_64BIT - _OUTP_IT("Number of tasklet runs (total) : %li\n", - (long)atomic64_read(&perf_stats.tl_runs)); - _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.inbound_tl_runs), - (long)atomic64_read(&perf_stats.inbound_tl_runs_resched)); - _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.inbound_thin_tl_runs), - (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched)); - _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.outbound_tl_runs), - (long)atomic64_read(&perf_stats.outbound_tl_runs_resched)); - _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_syncs)); - _OUTP_IT("Number of SIGA in's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_ins)); - _OUTP_IT("Number of SIGA out's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_outs)); - _OUTP_IT("Number of PCIs caught : %li\n", - (long)atomic64_read(&perf_stats.pcis)); - _OUTP_IT("Number of adapter interrupts caught : %li\n", - (long)atomic64_read(&perf_stats.thinints)); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n", - (long)atomic64_read(&perf_stats.fast_reqs)); - _OUTP_IT("\n"); - _OUTP_IT("Number of inbound transfers : %li\n", - (long)atomic64_read(&perf_stats.inbound_cnt)); - _OUTP_IT("Number of do_QDIOs outbound : %li\n", - (long)atomic64_read(&perf_stats.outbound_cnt)); -#else /* CONFIG_64BIT */ - _OUTP_IT("Number of tasklet runs (total) : %i\n", - atomic_read(&perf_stats.tl_runs)); - _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.inbound_tl_runs), - atomic_read(&perf_stats.inbound_tl_runs_resched)); - _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.inbound_thin_tl_runs), - atomic_read(&perf_stats.inbound_thin_tl_runs_resched)); - _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.outbound_tl_runs), - atomic_read(&perf_stats.outbound_tl_runs_resched)); - _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %i\n", - atomic_read(&perf_stats.siga_syncs)); - _OUTP_IT("Number of SIGA in's issued : %i\n", - atomic_read(&perf_stats.siga_ins)); - _OUTP_IT("Number of SIGA out's issued : %i\n", - atomic_read(&perf_stats.siga_outs)); - _OUTP_IT("Number of PCIs caught : %i\n", - atomic_read(&perf_stats.pcis)); - _OUTP_IT("Number of adapter interrupts caught : %i\n", - atomic_read(&perf_stats.thinints)); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n", - atomic_read(&perf_stats.fast_reqs)); - _OUTP_IT("\n"); - _OUTP_IT("Number of inbound transfers : %i\n", - atomic_read(&perf_stats.inbound_cnt)); - _OUTP_IT("Number of do_QDIOs outbound : %i\n", - atomic_read(&perf_stats.outbound_cnt)); -#endif /* CONFIG_64BIT */ - _OUTP_IT("\n"); - - return c; -} - -static struct proc_dir_entry *qdio_perf_proc_file; - -static void -qdio_add_procfs_entry(void) -{ - proc_perf_file_registration=0; - qdio_perf_proc_file=create_proc_entry(QDIO_PERF, - S_IFREG|0444,NULL); - if (qdio_perf_proc_file) { - qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; - } else proc_perf_file_registration=-1; - - if (proc_perf_file_registration) - QDIO_PRINT_WARN("was not able to register perf. " \ - "proc-file (%i).\n", - proc_perf_file_registration); -} - -static void -qdio_remove_procfs_entry(void) -{ - if (!proc_perf_file_registration) /* means if it went ok earlier */ - remove_proc_entry(QDIO_PERF,NULL); -} - -/** - * attributes in sysfs - *****************************************************************************/ - -static ssize_t -qdio_performance_stats_show(struct bus_type *bus, char *buf) -{ - return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); -} - -static ssize_t -qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count) -{ - unsigned long i; - int ret; - - ret = strict_strtoul(buf, 16, &i); - if (!ret && ((i == 0) || (i == 1))) { - if (i == qdio_performance_stats) - return count; - qdio_performance_stats = i; - if (i==0) { - /* reset perf. stat. info */ -#ifdef CONFIG_64BIT - atomic64_set(&perf_stats.tl_runs, 0); - atomic64_set(&perf_stats.outbound_tl_runs, 0); - atomic64_set(&perf_stats.inbound_tl_runs, 0); - atomic64_set(&perf_stats.inbound_tl_runs_resched, 0); - atomic64_set(&perf_stats.inbound_thin_tl_runs, 0); - atomic64_set(&perf_stats.inbound_thin_tl_runs_resched, - 0); - atomic64_set(&perf_stats.siga_outs, 0); - atomic64_set(&perf_stats.siga_ins, 0); - atomic64_set(&perf_stats.siga_syncs, 0); - atomic64_set(&perf_stats.pcis, 0); - atomic64_set(&perf_stats.thinints, 0); - atomic64_set(&perf_stats.fast_reqs, 0); - atomic64_set(&perf_stats.outbound_cnt, 0); - atomic64_set(&perf_stats.inbound_cnt, 0); -#else /* CONFIG_64BIT */ - atomic_set(&perf_stats.tl_runs, 0); - atomic_set(&perf_stats.outbound_tl_runs, 0); - atomic_set(&perf_stats.inbound_tl_runs, 0); - atomic_set(&perf_stats.inbound_tl_runs_resched, 0); - atomic_set(&perf_stats.inbound_thin_tl_runs, 0); - atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0); - atomic_set(&perf_stats.siga_outs, 0); - atomic_set(&perf_stats.siga_ins, 0); - atomic_set(&perf_stats.siga_syncs, 0); - atomic_set(&perf_stats.pcis, 0); - atomic_set(&perf_stats.thinints, 0); - atomic_set(&perf_stats.fast_reqs, 0); - atomic_set(&perf_stats.outbound_cnt, 0); - atomic_set(&perf_stats.inbound_cnt, 0); -#endif /* CONFIG_64BIT */ - } - } else { - QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n"); - return -EINVAL; - } - return count; -} - -static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show, - qdio_performance_stats_store); - -static void -tiqdio_register_thinints(void) -{ - char dbf_text[20]; - - tiqdio_ind = - s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL, - TIQDIO_THININT_ISC); - if (IS_ERR(tiqdio_ind)) { - sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_PRINT_ERR("failed to register adapter handler " \ - "(rc=%li).\nAdapter interrupts might " \ - "not work. Continuing.\n", - PTR_ERR(tiqdio_ind)); - tiqdio_ind = NULL; - } -} - -static void -tiqdio_unregister_thinints(void) -{ - if (tiqdio_ind) - s390_unregister_adapter_interrupt(tiqdio_ind, - TIQDIO_THININT_ISC); -} - -static int -qdio_get_qdio_memory(void) -{ - int i; - indicator_used[0]=1; - - for (i=1;i<INDICATORS_PER_CACHELINE;i++) - indicator_used[i]=0; - indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), - GFP_KERNEL); - if (!indicators) - return -ENOMEM; - return 0; -} - -static void -qdio_release_qdio_memory(void) -{ - kfree(indicators); -} - -static void -qdio_unregister_dbf_views(void) -{ - if (qdio_dbf_setup) - debug_unregister(qdio_dbf_setup); - if (qdio_dbf_sbal) - debug_unregister(qdio_dbf_sbal); - if (qdio_dbf_sense) - debug_unregister(qdio_dbf_sense); - if (qdio_dbf_trace) - debug_unregister(qdio_dbf_trace); -#ifdef CONFIG_QDIO_DEBUG - if (qdio_dbf_slsb_out) - debug_unregister(qdio_dbf_slsb_out); - if (qdio_dbf_slsb_in) - debug_unregister(qdio_dbf_slsb_in); -#endif /* CONFIG_QDIO_DEBUG */ -} - -static int -qdio_register_dbf_views(void) -{ - qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME, - QDIO_DBF_SETUP_PAGES, - QDIO_DBF_SETUP_NR_AREAS, - QDIO_DBF_SETUP_LEN); - if (!qdio_dbf_setup) - goto oom; - debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL); - - qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME, - QDIO_DBF_SBAL_PAGES, - QDIO_DBF_SBAL_NR_AREAS, - QDIO_DBF_SBAL_LEN); - if (!qdio_dbf_sbal) - goto oom; - - debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL); - - qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME, - QDIO_DBF_SENSE_PAGES, - QDIO_DBF_SENSE_NR_AREAS, - QDIO_DBF_SENSE_LEN); - if (!qdio_dbf_sense) - goto oom; - - debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL); - - qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME, - QDIO_DBF_TRACE_PAGES, - QDIO_DBF_TRACE_NR_AREAS, - QDIO_DBF_TRACE_LEN); - if (!qdio_dbf_trace) - goto oom; - - debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL); - -#ifdef CONFIG_QDIO_DEBUG - qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME, - QDIO_DBF_SLSB_OUT_PAGES, - QDIO_DBF_SLSB_OUT_NR_AREAS, - QDIO_DBF_SLSB_OUT_LEN); - if (!qdio_dbf_slsb_out) - goto oom; - debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL); - - qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME, - QDIO_DBF_SLSB_IN_PAGES, - QDIO_DBF_SLSB_IN_NR_AREAS, - QDIO_DBF_SLSB_IN_LEN); - if (!qdio_dbf_slsb_in) - goto oom; - debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; -oom: - QDIO_PRINT_ERR("not enough memory for dbf.\n"); - qdio_unregister_dbf_views(); - return -ENOMEM; -} - -static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size) -{ - return (void *) get_zeroed_page(gfp_mask|GFP_DMA); -} - -static void qdio_mempool_free(void *element, void *size) -{ - free_page((unsigned long) element); -} - -static int __init -init_QDIO(void) -{ - int res; - void *ptr; - - printk("qdio: loading %s\n",version); - - res=qdio_get_qdio_memory(); - if (res) - return res; - - qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), - 256, 0, NULL); - if (!qdio_q_cache) { - qdio_release_qdio_memory(); - return -ENOMEM; - } - - res = qdio_register_dbf_views(); - if (res) { - kmem_cache_destroy(qdio_q_cache); - qdio_release_qdio_memory(); - return res; - } - - QDIO_DBF_TEXT0(0,setup,"initQDIO"); - res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); - - memset((void*)&perf_stats,0,sizeof(perf_stats)); - QDIO_DBF_TEXT0(0,setup,"perfstat"); - ptr=&perf_stats; - QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*)); - - qdio_add_procfs_entry(); - - qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS, - qdio_mempool_alloc, - qdio_mempool_free, NULL); - - isc_register(QDIO_AIRQ_ISC); - if (tiqdio_check_chsc_availability()) - QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); - - tiqdio_register_thinints(); - - return 0; - } - -static void __exit -cleanup_QDIO(void) -{ - tiqdio_unregister_thinints(); - isc_unregister(QDIO_AIRQ_ISC); - qdio_remove_procfs_entry(); - qdio_release_qdio_memory(); - qdio_unregister_dbf_views(); - mempool_destroy(qdio_mempool_scssc); - kmem_cache_destroy(qdio_q_cache); - bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); - printk("qdio: %s: module removed\n",version); -} - -module_init(init_QDIO); -module_exit(cleanup_QDIO); - -EXPORT_SYMBOL(qdio_allocate); -EXPORT_SYMBOL(qdio_establish); -EXPORT_SYMBOL(qdio_initialize); -EXPORT_SYMBOL(qdio_activate); -EXPORT_SYMBOL(do_QDIO); -EXPORT_SYMBOL(qdio_shutdown); -EXPORT_SYMBOL(qdio_free); -EXPORT_SYMBOL(qdio_cleanup); -EXPORT_SYMBOL(qdio_synchronize); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 7656081a24d..c1a70985abf 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -1,66 +1,20 @@ +/* + * linux/drivers/s390/cio/qdio.h + * + * Copyright 2000,2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ #ifndef _CIO_QDIO_H #define _CIO_QDIO_H #include <asm/page.h> -#include <asm/isc.h> #include <asm/schid.h> +#include "chsc.h" -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_VERBOSE_LEVEL 9 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_VERBOSE_LEVEL 5 -#endif /* CONFIG_QDIO_DEBUG */ -#define QDIO_USE_PROCESSING_STATE - -#define QDIO_MINIMAL_BH_RELIEF_TIME 16 -#define QDIO_TIMER_POLL_VALUE 1 -#define IQDIO_TIMER_POLL_VALUE 1 - -/* - * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as - * we never know, whether we'll get initiative again, e.g. to give the - * transmit skb's back to the stack, however the stack may be waiting for - * them... therefore we define 4 as threshold to start polling (which - * will stop as soon as the asynchronous queue catches up) - * btw, this only applies to the asynchronous HiperSockets queue - */ -#define IQDIO_FILL_LEVEL_TO_POLL 4 - -#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC -#define TIQDIO_DELAY_TARGET 0 -#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ -#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ -#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */ -#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */ -#define IQDIO_LOCAL_LAPS 4 -#define IQDIO_LOCAL_LAPS_INT 1 -#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2 -/*#define IQDIO_IQDC_INT_PARM 0x1234*/ - -#define QDIO_Q_LAPS 5 - -#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY - -#define L2_CACHELINE_SIZE 256 -#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32)) - -#define QDIO_PERF "qdio_perf" - -/* must be a power of 2 */ -/*#define QDIO_STATS_NUMBER 4 - -#define QDIO_STATS_CLASSES 2 -#define QDIO_STATS_COUNT_NEEDED 2*/ - -#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before - exiting without having use_count - of the queue to 0 */ - -#define QDIO_ESTABLISH_TIMEOUT (1*HZ) -#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) -#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) -#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) -#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */ +#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ +#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ +#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, @@ -72,565 +26,352 @@ enum qdio_irq_states { NR_QDIO_IRQ_STATES, }; -/* used as intparm in do_IO: */ -#define QDIO_DOING_SENSEID 0 -#define QDIO_DOING_ESTABLISH 1 -#define QDIO_DOING_ACTIVATE 2 -#define QDIO_DOING_CLEANUP 3 - -/************************* DEBUG FACILITY STUFF *********************/ - -#define QDIO_DBF_HEX(ex,name,level,addr,len) \ - do { \ - if (ex) \ - debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ - else \ - debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ - } while (0) -#define QDIO_DBF_TEXT(ex,name,level,text) \ - do { \ - if (ex) \ - debug_text_exception(qdio_dbf_##name,level,text); \ - else \ - debug_text_event(qdio_dbf_##name,level,text); \ - } while (0) - - -#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) -#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) -#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) -#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) -#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) -#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) -#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) -#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text) -#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text) -#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text) -#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SETUP_NAME "qdio_setup" -#define QDIO_DBF_SETUP_LEN 8 -#define QDIO_DBF_SETUP_PAGES 4 -#define QDIO_DBF_SETUP_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SETUP_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SETUP_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */ -#define QDIO_DBF_SBAL_LEN 256 -#define QDIO_DBF_SBAL_PAGES 4 -#define QDIO_DBF_SBAL_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SBAL_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SBAL_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TRACE_NAME "qdio_trace" -#define QDIO_DBF_TRACE_LEN 8 -#define QDIO_DBF_TRACE_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TRACE_PAGES 16 -#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */ -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TRACE_PAGES 4 -#define QDIO_DBF_TRACE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SENSE_NAME "qdio_sense" -#define QDIO_DBF_SENSE_LEN 64 -#define QDIO_DBF_SENSE_PAGES 2 -#define QDIO_DBF_SENSE_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SENSE_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SENSE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT - -#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out" -#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_OUT_PAGES 256 -#define QDIO_DBF_SLSB_OUT_NR_AREAS 1 -#define QDIO_DBF_SLSB_OUT_LEVEL 6 - -#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in" -#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_IN_PAGES 256 -#define QDIO_DBF_SLSB_IN_NR_AREAS 1 -#define QDIO_DBF_SLSB_IN_LEVEL 6 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_PRINTK_HEADER QDIO_NAME ": " - -#if QDIO_VERBOSE_LEVEL>8 -#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_STUPID(x...) do { } while (0) -#endif +/* used as intparm in do_IO */ +#define QDIO_DOING_ESTABLISH 1 +#define QDIO_DOING_ACTIVATE 2 +#define QDIO_DOING_CLEANUP 3 + +#define SLSB_STATE_NOT_INIT 0x0 +#define SLSB_STATE_EMPTY 0x1 +#define SLSB_STATE_PRIMED 0x2 +#define SLSB_STATE_HALTED 0xe +#define SLSB_STATE_ERROR 0xf +#define SLSB_TYPE_INPUT 0x0 +#define SLSB_TYPE_OUTPUT 0x20 +#define SLSB_OWNER_PROG 0x80 +#define SLSB_OWNER_CU 0x40 + +#define SLSB_P_INPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */ +#define SLSB_P_INPUT_ACK \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */ +#define SLSB_CU_INPUT_EMPTY \ + (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */ +#define SLSB_P_INPUT_PRIMED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */ +#define SLSB_P_INPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */ +#define SLSB_P_INPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */ +#define SLSB_P_OUTPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ +#define SLSB_P_OUTPUT_EMPTY \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ +#define SLSB_CU_OUTPUT_PRIMED \ + (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ +#define SLSB_P_OUTPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */ +#define SLSB_P_OUTPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */ + +#define SLSB_ERROR_DURING_LOOKUP 0xff + +/* additional CIWs returned by extended Sense-ID */ +#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ +#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ -#if QDIO_VERBOSE_LEVEL>7 -#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALL(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>6 -#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_INFO(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>5 -#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_WARN(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>4 -#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ERR(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>3 -#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_CRIT(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>2 -#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALERT(x...) do { } while (0) -#endif +/* flags for st qdio sch data */ +#define CHSC_FLAG_QDIO_CAPABILITY 0x80 +#define CHSC_FLAG_VALIDITY 0x40 + +/* qdio adapter-characteristics-1 flag */ +#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ +#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ +#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ +#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ +#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ +#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ +#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ -#if QDIO_VERBOSE_LEVEL>1 -#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_EMERG(x...) do { } while (0) -#endif - -#define QDIO_HEXDUMP16(importance,header,ptr) \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x\n",*(((char*)ptr)), \ - *(((char*)ptr)+1),*(((char*)ptr)+2), \ - *(((char*)ptr)+3),*(((char*)ptr)+4), \ - *(((char*)ptr)+5),*(((char*)ptr)+6), \ - *(((char*)ptr)+7),*(((char*)ptr)+8), \ - *(((char*)ptr)+9),*(((char*)ptr)+10), \ - *(((char*)ptr)+11),*(((char*)ptr)+12), \ - *(((char*)ptr)+13),*(((char*)ptr)+14), \ - *(((char*)ptr)+15)); \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)+16),*(((char*)ptr)+17), \ - *(((char*)ptr)+18),*(((char*)ptr)+19), \ - *(((char*)ptr)+20),*(((char*)ptr)+21), \ - *(((char*)ptr)+22),*(((char*)ptr)+23), \ - *(((char*)ptr)+24),*(((char*)ptr)+25), \ - *(((char*)ptr)+26),*(((char*)ptr)+27), \ - *(((char*)ptr)+28),*(((char*)ptr)+29), \ - *(((char*)ptr)+30),*(((char*)ptr)+31)); - -/****************** END OF DEBUG FACILITY STUFF *********************/ +#ifdef CONFIG_64BIT +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) +{ + register unsigned long _ccq asm ("0") = *count; + register unsigned long _token asm ("1") = token; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; -/* - * Some instructions as assembly - */ + asm volatile( + " .insn rsy,0xeb000000008A,%1,0,0(%2)" + : "+d" (_ccq), "+d" (_queuestart) + : "d" ((unsigned long)state), "d" (_token) + : "memory", "cc"); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; -static inline int -do_sqbs(unsigned long sch, unsigned char state, int queue, - unsigned int *start, unsigned int *count) -{ -#ifdef CONFIG_64BIT - register unsigned long _ccq asm ("0") = *count; - register unsigned long _sch asm ("1") = sch; - unsigned long _queuestart = ((unsigned long)queue << 32) | *start; - - asm volatile( - " .insn rsy,0xeb000000008A,%1,0,0(%2)" - : "+d" (_ccq), "+d" (_queuestart) - : "d" ((unsigned long)state), "d" (_sch) - : "memory", "cc"); - *count = _ccq & 0xff; - *start = _queuestart & 0xff; - - return (_ccq >> 32) & 0xff; -#else - return 0; -#endif + return (_ccq >> 32) & 0xff; } -static inline int -do_eqbs(unsigned long sch, unsigned char *state, int queue, - unsigned int *start, unsigned int *count) +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count) { -#ifdef CONFIG_64BIT register unsigned long _ccq asm ("0") = *count; - register unsigned long _sch asm ("1") = sch; + register unsigned long _token asm ("1") = token; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _state = 0; asm volatile( " .insn rrf,0xB99c0000,%1,%2,0,0" : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) - : "d" (_sch) - : "memory", "cc" ); + : "d" (_token) + : "memory", "cc"); *count = _ccq & 0xff; *start = _queuestart & 0xff; *state = _state & 0xff; return (_ccq >> 32) & 0xff; -#else - return 0; -#endif -} - - -static inline int -do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) -{ - register unsigned long reg0 asm ("0") = 2; - register struct subchannel_id reg1 asm ("1") = schid; - register unsigned long reg2 asm ("2") = mask1; - register unsigned long reg3 asm ("3") = mask2; - int cc; - - asm volatile( - " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc"); - return cc; -} - -static inline int -do_siga_input(struct subchannel_id schid, unsigned int mask) -{ - register unsigned long reg0 asm ("0") = 1; - register struct subchannel_id reg1 asm ("1") = schid; - register unsigned long reg2 asm ("2") = mask; - int cc; - - asm volatile( - " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory"); - return cc; -} - -static inline int -do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, - unsigned int fc) -{ - register unsigned long __fc asm("0") = fc; - register unsigned long __schid asm("1") = schid; - register unsigned long __mask asm("2") = mask; - int cc; - - asm volatile( - " siga 0\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) - : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) - : "cc", "memory"); - (*bb) = ((unsigned int) __fc) >> 31; - return cc; -} - -static inline unsigned long -do_clear_global_summary(void) -{ - register unsigned long __fn asm("1") = 3; - register unsigned long __tmp asm("2"); - register unsigned long __time asm("3"); - - asm volatile( - " .insn rre,0xb2650000,2,0" - : "+d" (__fn), "=d" (__tmp), "=d" (__time)); - return __time; } - -/* - * QDIO device commands returned by extended Sense-ID - */ -#define DEFAULT_ESTABLISH_QS_CMD 0x1b -#define DEFAULT_ESTABLISH_QS_COUNT 0x1000 -#define DEFAULT_ACTIVATE_QS_CMD 0x1f -#define DEFAULT_ACTIVATE_QS_COUNT 0 - -/* - * additional CIWs returned by extended Sense-ID - */ -#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ -#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ +#else +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) { return 0; } +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count) { return 0; } +#endif /* CONFIG_64BIT */ -#define QDIO_CHSC_RESPONSE_CODE_OK 1 -/* flags for st qdio sch data */ -#define CHSC_FLAG_QDIO_CAPABILITY 0x80 -#define CHSC_FLAG_VALIDITY 0x40 +struct qdio_irq; -#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 -#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 -#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 +struct siga_flag { + u8 input:1; + u8 output:1; + u8 sync:1; + u8 no_sync_ti:1; + u8 no_sync_out_ti:1; + u8 no_sync_out_pci:1; + u8:2; +} __attribute__ ((packed)); -struct qdio_chsc_ssqd { +struct chsc_ssqd_area { struct chsc_header request; - u16 reserved1:10; - u16 ssid:2; - u16 fmt:4; + u16:10; + u8 ssid:2; + u8 fmt:4; u16 first_sch; - u16 reserved2; + u16:16; u16 last_sch; - u32 reserved3; + u32:32; struct chsc_header response; - u32 reserved4; - u8 flags; - u8 reserved5; - u16 sch; - u8 qfmt; - u8 parm; - u8 qdioac1; - u8 sch_class; - u8 pct; - u8 icnt; - u8 reserved7; - u8 ocnt; - u8 reserved8; - u8 mbccnt; - u16 qdioac2; - u64 sch_token; -}; + u32:32; + struct qdio_ssqd_desc qdio_ssqd; +} __attribute__ ((packed)); -struct qdio_perf_stats { -#ifdef CONFIG_64BIT - atomic64_t tl_runs; - atomic64_t outbound_tl_runs; - atomic64_t outbound_tl_runs_resched; - atomic64_t inbound_tl_runs; - atomic64_t inbound_tl_runs_resched; - atomic64_t inbound_thin_tl_runs; - atomic64_t inbound_thin_tl_runs_resched; - - atomic64_t siga_outs; - atomic64_t siga_ins; - atomic64_t siga_syncs; - atomic64_t pcis; - atomic64_t thinints; - atomic64_t fast_reqs; - - atomic64_t outbound_cnt; - atomic64_t inbound_cnt; -#else /* CONFIG_64BIT */ - atomic_t tl_runs; - atomic_t outbound_tl_runs; - atomic_t outbound_tl_runs_resched; - atomic_t inbound_tl_runs; - atomic_t inbound_tl_runs_resched; - atomic_t inbound_thin_tl_runs; - atomic_t inbound_thin_tl_runs_resched; - - atomic_t siga_outs; - atomic_t siga_ins; - atomic_t siga_syncs; - atomic_t pcis; - atomic_t thinints; - atomic_t fast_reqs; - - atomic_t outbound_cnt; - atomic_t inbound_cnt; -#endif /* CONFIG_64BIT */ +struct scssc_area { + struct chsc_header request; + u16 operation_code; + u16:16; + u32:32; + u32:32; + u64 summary_indicator_addr; + u64 subchannel_indicator_addr; + u32 ks:4; + u32 kc:4; + u32:21; + u32 isc:3; + u32 word_with_d_bit; + u32:32; + struct subchannel_id schid; + u32 reserved[1004]; + struct chsc_header response; + u32:32; +} __attribute__ ((packed)); + +struct qdio_input_q { + /* input buffer acknowledgement flag */ + int polling; + + /* last time of noticing incoming data */ + u64 timestamp; + + /* lock for clearing the acknowledgement */ + spinlock_t lock; }; -/* unlikely as the later the better */ -#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) -#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,~0U) -#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,0) +struct qdio_output_q { + /* failed siga-w attempts*/ + atomic_t busy_siga_counter; -#define NOW qdio_get_micros() -#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW -#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time) -#define SAVE_FRONTIER(q,val) q->last_move_ftc=val -#define GET_SAVED_FRONTIER(q) (q->last_move_ftc) + /* start time of busy condition */ + u64 timestamp; -#define MY_MODULE_STRING(x) #x + /* PCIs are enabled for the queue */ + int pci_out_enabled; -#ifdef CONFIG_64BIT -#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) -#else /* CONFIG_64BIT */ -#define QDIO_GET_ADDR(x) ((__u32)(long)x) -#endif /* CONFIG_64BIT */ + /* timer to check for more outbound work */ + struct timer_list timer; +}; struct qdio_q { - volatile struct slsb slsb; + struct slsb slsb; + union { + struct qdio_input_q in; + struct qdio_output_q out; + } u; - char unused[QDIO_MAX_BUFFERS_PER_Q]; + /* queue number */ + int nr; - __u32 * dev_st_chg_ind; + /* bitmask of queue number */ + int mask; + /* input or output queue */ int is_input_q; - struct subchannel_id schid; - struct ccw_device *cdev; - - unsigned int is_iqdio_q; - unsigned int is_thinint_q; - /* bit 0 means queue 0, bit 1 means queue 1, ... */ - unsigned int mask; - unsigned int q_no; + /* list of thinint input queues */ + struct list_head entry; + /* upper-layer program handler */ qdio_handler_t (*handler); - /* points to the next buffer to be checked for having - * been processed by the card (outbound) - * or to the next buffer the program should check for (inbound) */ - volatile int first_to_check; - /* and the last time it was: */ - volatile int last_move_ftc; + /* + * inbound: next buffer the program should check for + * outbound: next buffer to check for having been processed + * by the card + */ + int first_to_check; - atomic_t number_of_buffers_used; - atomic_t polling; + /* first_to_check of the last time */ + int last_move_ftc; - unsigned int siga_in; - unsigned int siga_out; - unsigned int siga_sync; - unsigned int siga_sync_done_on_thinints; - unsigned int siga_sync_done_on_outb_tis; - unsigned int hydra_gives_outbound_pcis; + /* beginning position for calling the program */ + int first_to_kick; - /* used to save beginning position when calling dd_handlers */ - int first_element_to_kick; + /* number of buffers in use by the adapter */ + atomic_t nr_buf_used; - atomic_t use_count; - atomic_t is_in_shutdown; - - void *irq_ptr; - - struct timer_list timer; -#ifdef QDIO_USE_TIMERS_FOR_POLLING - atomic_t timer_already_set; - spinlock_t timer_lock; -#else /* QDIO_USE_TIMERS_FOR_POLLING */ + struct qdio_irq *irq_ptr; struct tasklet_struct tasklet; -#endif /* QDIO_USE_TIMERS_FOR_POLLING */ - - enum qdio_irq_states state; - - /* used to store the error condition during a data transfer */ + /* error condition during a data transfer */ unsigned int qdio_error; - unsigned int siga_error; - unsigned int error_status_flags; - - /* list of interesting queues */ - volatile struct qdio_q *list_next; - volatile struct qdio_q *list_prev; struct sl *sl; - volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; - - struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; - - unsigned long int_parm; - - /*struct { - int in_bh_check_limit; - int threshold; - } threshold_classes[QDIO_STATS_CLASSES];*/ - - struct { - /* inbound: the time to stop polling - outbound: the time to kick peer */ - int threshold; /* the real value */ - - /* outbound: last time of do_QDIO - inbound: last time of noticing incoming data */ - /*__u64 last_transfer_times[QDIO_STATS_NUMBER]; - int last_transfer_index; */ - - __u64 last_transfer_time; - __u64 busy_start; - } timing; - atomic_t busy_siga_counter; - unsigned int queue_type; - unsigned int is_pci_out; - - /* leave this member at the end. won't be cleared in qdio_fill_qs */ - struct slib *slib; /* a page is allocated under this pointer, - sl points into this page, offset PAGE_SIZE/2 - (after slib) */ + struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; + + /* + * Warning: Leave this member at the end so it won't be cleared in + * qdio_fill_qs. A page is allocated under this pointer and used for + * slib and sl. slib is 2048 bytes big and sl points to offset + * PAGE_SIZE / 2. + */ + struct slib *slib; } __attribute__ ((aligned(256))); struct qdio_irq { - __u32 * volatile dev_st_chg_ind; + struct qib qib; + u32 *dsci; /* address of device state change indicator */ + struct ccw_device *cdev; unsigned long int_parm; struct subchannel_id schid; - - unsigned int is_iqdio_irq; - unsigned int is_thinint_irq; - unsigned int hydra_gives_outbound_pcis; - unsigned int sync_done_on_outb_pcis; - - /* QEBSM facility */ - unsigned int is_qebsm; - unsigned long sch_token; + unsigned long sch_token; /* QEBSM facility */ enum qdio_irq_states state; - unsigned int no_input_qs; - unsigned int no_output_qs; + struct siga_flag siga_flag; /* siga sync information from qdioac */ - unsigned char qdioac; + int nr_input_qs; + int nr_output_qs; struct ccw1 ccw; - struct ciw equeue; struct ciw aqueue; - struct qib qib; - - void (*original_int_handler) (struct ccw_device *, - unsigned long, struct irb *); + struct qdio_ssqd_desc ssqd_desc; + + void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); - /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ + /* + * Warning: Leave these members together at the end so they won't be + * cleared in qdio_setup_irq. + */ struct qdr *qdr; + unsigned long chsc_page; + struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; - struct semaphore setting_up_sema; + + struct mutex setup_mutex; }; -#endif + +/* helper functions */ +#define queue_type(q) q->irq_ptr->qib.qfmt + +#define is_thinint_irq(irq) \ + (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ + css_general_characteristics.aif_osa) + +/* the highest iqdio queue is used for multicast */ +static inline int multicast_outbound(struct qdio_q *q) +{ + return (q->irq_ptr->nr_output_qs > 1) && + (q->nr == q->irq_ptr->nr_output_qs - 1); +} + +static inline unsigned long long get_usecs(void) +{ + return monotonic_clock() >> 12; +} + +#define pci_out_supported(q) \ + (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) +#define is_qebsm(q) (q->irq_ptr->sch_token != 0) + +#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti) +#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti) +#define need_siga_in(q) (q->irq_ptr->siga_flag.input) +#define need_siga_out(q) (q->irq_ptr->siga_flag.output) +#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) +#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) + +#define for_each_input_queue(irq_ptr, q, i) \ + for (i = 0, q = irq_ptr->input_qs[0]; \ + i < irq_ptr->nr_input_qs; \ + q = irq_ptr->input_qs[++i]) +#define for_each_output_queue(irq_ptr, q, i) \ + for (i = 0, q = irq_ptr->output_qs[0]; \ + i < irq_ptr->nr_output_qs; \ + q = irq_ptr->output_qs[++i]) + +#define prev_buf(bufnr) \ + ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) +#define next_buf(bufnr) \ + ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) +#define add_buf(bufnr, inc) \ + ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) + +/* prototypes for thin interrupt */ +void qdio_sync_after_thinint(struct qdio_q *q); +int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); +void qdio_check_outbound_after_thinint(struct qdio_q *q); +int qdio_inbound_q_moved(struct qdio_q *q); +void qdio_kick_inbound_handler(struct qdio_q *q); +void qdio_stop_polling(struct qdio_q *q); +int qdio_siga_sync_q(struct qdio_q *q); + +void qdio_setup_thinint(struct qdio_irq *irq_ptr); +int qdio_establish_thinint(struct qdio_irq *irq_ptr); +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_inbound_processing(unsigned long q); +int tiqdio_allocate_memory(void); +void tiqdio_free_memory(void); +int tiqdio_register_thinints(void); +void tiqdio_unregister_thinints(void); + +/* prototypes for setup */ +void qdio_inbound_processing(unsigned long data); +void qdio_outbound_processing(unsigned long data); +void qdio_outbound_timer(unsigned long data); +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb); +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, + int nr_output_qs); +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); +int qdio_setup_irq(struct qdio_initialize *init_data); +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_release_memory(struct qdio_irq *irq_ptr); +int qdio_setup_init(void); +void qdio_setup_exit(void); + +#endif /* _CIO_QDIO_H */ diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c new file mode 100644 index 00000000000..337aa3087a7 --- /dev/null +++ b/drivers/s390/cio/qdio_debug.c @@ -0,0 +1,240 @@ +/* + * drivers/s390/cio/qdio_debug.c + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <asm/qdio.h> +#include <asm/debug.h> +#include "qdio_debug.h" +#include "qdio.h" + +debug_info_t *qdio_dbf_setup; +debug_info_t *qdio_dbf_trace; + +static struct dentry *debugfs_root; +#define MAX_DEBUGFS_QUEUES 32 +static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; +static DEFINE_MUTEX(debugfs_mutex); + +void qdio_allocate_do_dbf(struct qdio_initialize *init_data) +{ + char dbf_text[20]; + + sprintf(dbf_text, "qfmt:%x", init_data->q_format); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8); + sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *)); + sprintf(dbf_text, "niq:%4x", init_data->no_input_qs); + QDIO_DBF_TEXT0(0, setup, dbf_text); + sprintf(dbf_text, "noq:%4x", init_data->no_output_qs); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long)); + QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long)); + QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *)); +} + +static void qdio_unregister_dbf_views(void) +{ + if (qdio_dbf_setup) + debug_unregister(qdio_dbf_setup); + if (qdio_dbf_trace) + debug_unregister(qdio_dbf_trace); +} + +static int qdio_register_dbf_views(void) +{ + qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES, + QDIO_DBF_SETUP_NR_AREAS, + QDIO_DBF_SETUP_LEN); + if (!qdio_dbf_setup) + goto oom; + debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL); + + qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES, + QDIO_DBF_TRACE_NR_AREAS, + QDIO_DBF_TRACE_LEN); + if (!qdio_dbf_trace) + goto oom; + debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL); + return 0; +oom: + qdio_unregister_dbf_views(); + return -ENOMEM; +} + +static int qstat_show(struct seq_file *m, void *v) +{ + unsigned char state; + struct qdio_q *q = m->private; + int i; + + if (!q) + return 0; + + seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci); + seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); + seq_printf(m, "ftc: %d\n", q->first_to_check); + seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); + seq_printf(m, "polling: %d\n", q->u.in.polling); + seq_printf(m, "slsb buffer states:\n"); + + qdio_siga_sync_q(q); + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { + get_buf_state(q, i, &state); + switch (state) { + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_OUTPUT_NOT_INIT: + seq_printf(m, "N"); + break; + case SLSB_P_INPUT_PRIMED: + case SLSB_CU_OUTPUT_PRIMED: + seq_printf(m, "+"); + break; + case SLSB_P_INPUT_ACK: + seq_printf(m, "A"); + break; + case SLSB_P_INPUT_ERROR: + case SLSB_P_OUTPUT_ERROR: + seq_printf(m, "x"); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_OUTPUT_EMPTY: + seq_printf(m, "-"); + break; + case SLSB_P_INPUT_HALTED: + case SLSB_P_OUTPUT_HALTED: + seq_printf(m, "."); + break; + default: + seq_printf(m, "?"); + } + if (i == 63) + seq_printf(m, "\n"); + } + seq_printf(m, "\n"); + return 0; +} + +static ssize_t qstat_seq_write(struct file *file, const char __user *buf, + size_t count, loff_t *off) +{ + struct seq_file *seq = file->private_data; + struct qdio_q *q = seq->private; + + if (!q) + return 0; + + if (q->is_input_q) + xchg(q->irq_ptr->dsci, 1); + local_bh_disable(); + tasklet_schedule(&q->tasklet); + local_bh_enable(); + return count; +} + +static int qstat_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qstat_show, + filp->f_path.dentry->d_inode->i_private); +} + +static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name) +{ + memset(name, 0, sizeof(name)); + sprintf(name, "%s", cdev->dev.bus_id); + if (q->is_input_q) + sprintf(name + strlen(name), "_input"); + else + sprintf(name + strlen(name), "_output"); + sprintf(name + strlen(name), "_%d", q->nr); +} + +static void remove_debugfs_entry(struct qdio_q *q) +{ + int i; + + for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) { + if (!debugfs_queues[i]) + continue; + if (debugfs_queues[i]->d_inode->i_private == q) { + debugfs_remove(debugfs_queues[i]); + debugfs_queues[i] = NULL; + } + } +} + +static struct file_operations debugfs_fops = { + .owner = THIS_MODULE, + .open = qstat_seq_open, + .read = seq_read, + .write = qstat_seq_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) +{ + int i = 0; + char name[40]; + + while (debugfs_queues[i] != NULL) { + i++; + if (i >= MAX_DEBUGFS_QUEUES) + return; + } + get_queue_name(q, cdev, name); + debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, + debugfs_root, q, &debugfs_fops); +} + +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +{ + struct qdio_q *q; + int i; + + mutex_lock(&debugfs_mutex); + for_each_input_queue(irq_ptr, q, i) + setup_debugfs_entry(q, cdev); + for_each_output_queue(irq_ptr, q, i) + setup_debugfs_entry(q, cdev); + mutex_unlock(&debugfs_mutex); +} + +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +{ + struct qdio_q *q; + int i; + + mutex_lock(&debugfs_mutex); + for_each_input_queue(irq_ptr, q, i) + remove_debugfs_entry(q); + for_each_output_queue(irq_ptr, q, i) + remove_debugfs_entry(q); + mutex_unlock(&debugfs_mutex); +} + +int __init qdio_debug_init(void) +{ + debugfs_root = debugfs_create_dir("qdio_queues", NULL); + return qdio_register_dbf_views(); +} + +void qdio_debug_exit(void) +{ + debugfs_remove(debugfs_root); + qdio_unregister_dbf_views(); +} diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h new file mode 100644 index 00000000000..8484b83698e --- /dev/null +++ b/drivers/s390/cio/qdio_debug.h @@ -0,0 +1,91 @@ +/* + * drivers/s390/cio/qdio_debug.h + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#ifndef QDIO_DEBUG_H +#define QDIO_DEBUG_H + +#include <asm/debug.h> +#include <asm/qdio.h> +#include "qdio.h" + +#define QDIO_DBF_HEX(ex, name, level, addr, len) \ + do { \ + if (ex) \ + debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \ + else \ + debug_event(qdio_dbf_##name, level, (void *)(addr), len); \ + } while (0) +#define QDIO_DBF_TEXT(ex, name, level, text) \ + do { \ + if (ex) \ + debug_text_exception(qdio_dbf_##name, level, text); \ + else \ + debug_text_event(qdio_dbf_##name, level, text); \ + } while (0) + +#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len) +#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len) +#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len) + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len) +#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len) +#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len) +#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len) +#else +#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0) +#endif /* CONFIG_QDIO_DEBUG */ + +#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text) +#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text) +#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text) + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text) +#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text) +#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text) +#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text) +#else +#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0) +#endif /* CONFIG_QDIO_DEBUG */ + +/* s390dbf views */ +#define QDIO_DBF_SETUP_LEN 8 +#define QDIO_DBF_SETUP_PAGES 4 +#define QDIO_DBF_SETUP_NR_AREAS 1 + +#define QDIO_DBF_TRACE_LEN 8 +#define QDIO_DBF_TRACE_NR_AREAS 2 + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_TRACE_PAGES 16 +#define QDIO_DBF_SETUP_LEVEL 6 +#define QDIO_DBF_TRACE_LEVEL 4 +#else /* !CONFIG_QDIO_DEBUG */ +#define QDIO_DBF_TRACE_PAGES 4 +#define QDIO_DBF_SETUP_LEVEL 2 +#define QDIO_DBF_TRACE_LEVEL 2 +#endif /* CONFIG_QDIO_DEBUG */ + +extern debug_info_t *qdio_dbf_setup; +extern debug_info_t *qdio_dbf_trace; + +void qdio_allocate_do_dbf(struct qdio_initialize *init_data); +void debug_print_bstat(struct qdio_q *q); +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +int qdio_debug_init(void); +void qdio_debug_exit(void); +#endif diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c new file mode 100644 index 00000000000..d10c73cc168 --- /dev/null +++ b/drivers/s390/cio/qdio_main.c @@ -0,0 +1,1755 @@ +/* + * linux/drivers/s390/cio/qdio_main.c + * + * Linux for s390 qdio support, buffer handling, qdio API and module support. + * + * Copyright 2000,2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <asm/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "qdio.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ + "Jan Glauber <jang@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("QDIO base support"); +MODULE_LICENSE("GPL"); + +static inline int do_siga_sync(struct subchannel_id schid, + unsigned int out_mask, unsigned int in_mask) +{ + register unsigned long __fc asm ("0") = 2; + register struct subchannel_id __schid asm ("1") = schid; + register unsigned long out asm ("2") = out_mask; + register unsigned long in asm ("3") = in_mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); + return cc; +} + +static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) +{ + register unsigned long __fc asm ("0") = 1; + register struct subchannel_id __schid asm ("1") = schid; + register unsigned long __mask asm ("2") = mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); + return cc; +} + +/** + * do_siga_output - perform SIGA-w/wt function + * @schid: subchannel id or in case of QEBSM the subchannel token + * @mask: which output queues to process + * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer + * @fc: function code to perform + * + * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. + * Note: For IQDC unicast queues only the highest priority queue is processed. + */ +static inline int do_siga_output(unsigned long schid, unsigned long mask, + u32 *bb, unsigned int fc) +{ + register unsigned long __fc asm("0") = fc; + register unsigned long __schid asm("1") = schid; + register unsigned long __mask asm("2") = mask; + int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; + + asm volatile( + " siga 0\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) + : : "cc", "memory"); + *bb = ((unsigned int) __fc) >> 31; + return cc; +} + +static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) +{ + char dbf_text[15]; + + /* all done or next buffer state different */ + if (ccq == 0 || ccq == 32) + return 0; + /* not all buffers processed */ + if (ccq == 96 || ccq == 97) + return 1; + /* notify devices immediately */ + sprintf(dbf_text, "%d", ccq); + QDIO_DBF_TEXT2(1, trace, dbf_text); + return -EIO; +} + +/** + * qdio_do_eqbs - extract buffer states for QEBSM + * @q: queue to manipulate + * @state: state of the extracted buffers + * @start: buffer number to start at + * @count: count of buffers to examine + * + * Returns the number of successfull extracted equal buffer states. + * Stops processing if a state is different from the last buffers state. + */ +static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, + int start, int count) +{ + unsigned int ccq = 0; + int tmp_count = count, tmp_start = start; + int nr = q->nr; + int rc; + char dbf_text[15]; + + BUG_ON(!q->irq_ptr->sch_token); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + rc = qdio_check_ccq(q, ccq); + + /* At least one buffer was processed, return and extract the remaining + * buffers later. + */ + if ((ccq == 96) && (count != tmp_count)) + return (count - tmp_count); + if (rc == 1) { + QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); + goto again; + } + + if (rc < 0) { + QDIO_DBF_TEXT2(1, trace, "eqberr"); + sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); + QDIO_DBF_TEXT2(1, trace, dbf_text); + q->handler(q->irq_ptr->cdev, + QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, q->irq_ptr->int_parm); + return 0; + } + return count - tmp_count; +} + +/** + * qdio_do_sqbs - set buffer states for QEBSM + * @q: queue to manipulate + * @state: new state of the buffers + * @start: first buffer number to change + * @count: how many buffers to change + * + * Returns the number of successfully changed buffers. + * Does retrying until the specified count of buffer states is set or an + * error occurs. + */ +static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, + int count) +{ + unsigned int ccq = 0; + int tmp_count = count, tmp_start = start; + int nr = q->nr; + int rc; + char dbf_text[15]; + + BUG_ON(!q->irq_ptr->sch_token); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + rc = qdio_check_ccq(q, ccq); + if (rc == 1) { + QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); + goto again; + } + if (rc < 0) { + QDIO_DBF_TEXT3(1, trace, "sqberr"); + sprintf(dbf_text, "%2x,%2x", count, tmp_count); + QDIO_DBF_TEXT3(1, trace, dbf_text); + sprintf(dbf_text, "%d,%d", ccq, nr); + QDIO_DBF_TEXT3(1, trace, dbf_text); + + q->handler(q->irq_ptr->cdev, + QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, q->irq_ptr->int_parm); + return 0; + } + WARN_ON(tmp_count); + return count - tmp_count; +} + +/* returns number of examined buffers and their common state in *state */ +static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, + unsigned char *state, unsigned int count) +{ + unsigned char __state = 0; + int i; + + BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); + BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); + + if (is_qebsm(q)) + return qdio_do_eqbs(q, state, bufnr, count); + + for (i = 0; i < count; i++) { + if (!__state) + __state = q->slsb.val[bufnr]; + else if (q->slsb.val[bufnr] != __state) + break; + bufnr = next_buf(bufnr); + } + *state = __state; + return i; +} + +inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state) +{ + return get_buf_states(q, bufnr, state, 1); +} + +/* wrap-around safe setting of slsb states, returns number of changed buffers */ +static inline int set_buf_states(struct qdio_q *q, int bufnr, + unsigned char state, int count) +{ + int i; + + BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); + BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); + + if (is_qebsm(q)) + return qdio_do_sqbs(q, state, bufnr, count); + + for (i = 0; i < count; i++) { + xchg(&q->slsb.val[bufnr], state); + bufnr = next_buf(bufnr); + } + return count; +} + +static inline int set_buf_state(struct qdio_q *q, int bufnr, + unsigned char state) +{ + return set_buf_states(q, bufnr, state, 1); +} + +/* set slsb states to initial state */ +void qdio_init_buf_states(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); + for_each_output_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); +} + +static int qdio_siga_sync(struct qdio_q *q, unsigned int output, + unsigned int input) +{ + int cc; + + if (!need_siga_sync(q)) + return 0; + + qdio_perf_stat_inc(&perf_stats.siga_sync); + + cc = do_siga_sync(q->irq_ptr->schid, output, input); + if (cc) { + QDIO_DBF_TEXT4(0, trace, "sigasync"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + } + return cc; +} + +inline int qdio_siga_sync_q(struct qdio_q *q) +{ + if (q->is_input_q) + return qdio_siga_sync(q, 0, q->mask); + else + return qdio_siga_sync(q, q->mask, 0); +} + +static inline int qdio_siga_sync_out(struct qdio_q *q) +{ + return qdio_siga_sync(q, ~0U, 0); +} + +static inline int qdio_siga_sync_all(struct qdio_q *q) +{ + return qdio_siga_sync(q, ~0U, ~0U); +} + +static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) +{ + unsigned int fc = 0; + unsigned long schid; + + if (!is_qebsm(q)) + schid = *((u32 *)&q->irq_ptr->schid); + else { + schid = q->irq_ptr->sch_token; + fc |= 0x80; + } + return do_siga_output(schid, q->mask, busy_bit, fc); +} + +static int qdio_siga_output(struct qdio_q *q) +{ + int cc; + u32 busy_bit; + u64 start_time = 0; + + QDIO_DBF_TEXT5(0, trace, "sigaout"); + QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); + + qdio_perf_stat_inc(&perf_stats.siga_out); +again: + cc = qdio_do_siga_output(q, &busy_bit); + if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { + if (!start_time) + start_time = get_usecs(); + else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) + goto again; + } + + if (cc == 2 && busy_bit) + cc |= QDIO_ERROR_SIGA_BUSY; + if (cc) + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + return cc; +} + +static inline int qdio_siga_input(struct qdio_q *q) +{ + int cc; + + QDIO_DBF_TEXT4(0, trace, "sigain"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + + qdio_perf_stat_inc(&perf_stats.siga_in); + + cc = do_siga_input(q->irq_ptr->schid, q->mask); + if (cc) + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + return cc; +} + +/* called from thinint inbound handler */ +void qdio_sync_after_thinint(struct qdio_q *q) +{ + if (pci_out_supported(q)) { + if (need_siga_sync_thinint(q)) + qdio_siga_sync_all(q); + else if (need_siga_sync_out_thinint(q)) + qdio_siga_sync_out(q); + } else + qdio_siga_sync_q(q); +} + +inline void qdio_stop_polling(struct qdio_q *q) +{ + spin_lock_bh(&q->u.in.lock); + if (!q->u.in.polling) { + spin_unlock_bh(&q->u.in.lock); + return; + } + q->u.in.polling = 0; + qdio_perf_stat_inc(&perf_stats.debug_stop_polling); + + /* show the card that we are not polling anymore */ + set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); + spin_unlock_bh(&q->u.in.lock); +} + +static void announce_buffer_error(struct qdio_q *q) +{ + char dbf_text[15]; + + if (q->is_input_q) + QDIO_DBF_TEXT3(1, trace, "inperr"); + else + QDIO_DBF_TEXT3(0, trace, "outperr"); + + sprintf(dbf_text, "%x-%x-%x", q->first_to_check, + q->sbal[q->first_to_check]->element[14].flags, + q->sbal[q->first_to_check]->element[15].flags); + QDIO_DBF_TEXT3(1, trace, dbf_text); + QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); + + q->qdio_error = QDIO_ERROR_SLSB_STATE; +} + +static int get_inbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state; + + /* + * If we still poll don't update last_move_ftc, keep the + * previously ACK buffer there. + */ + if (!q->u.in.polling) + q->last_move_ftc = q->first_to_check; + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + + /* + * No siga sync here, as a PCI or we after a thin interrupt + * will sync the queues. + */ + + /* need to set count to 1 for non-qebsm */ + if (!is_qebsm(q)) + count = 1; + +check_next: + if (q->first_to_check == stop) + goto out; + + count = get_buf_states(q, q->first_to_check, &state, count); + if (!count) + goto out; + + switch (state) { + case SLSB_P_INPUT_PRIMED: + QDIO_DBF_TEXT5(0, trace, "inptprim"); + + /* + * Only ACK the first buffer. The ACK will be removed in + * qdio_stop_polling. + */ + if (q->u.in.polling) + state = SLSB_P_INPUT_NOT_INIT; + else { + q->u.in.polling = 1; + state = SLSB_P_INPUT_ACK; + } + set_buf_state(q, q->first_to_check, state); + + /* + * Need to change all PRIMED buffers to NOT_INIT, otherwise + * we're loosing initiative in the thinint code. + */ + if (count > 1) + set_buf_states(q, next_buf(q->first_to_check), + SLSB_P_INPUT_NOT_INIT, count - 1); + + /* + * No siga-sync needed for non-qebsm here, as the inbound queue + * will be synced on the next siga-r, resp. + * tiqdio_is_inbound_q_done will do the siga-sync. + */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + goto check_next; + case SLSB_P_INPUT_ERROR: + announce_buffer_error(q); + /* process the buffer, the upper layer will take care of it */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_ACK: + QDIO_DBF_TEXT5(0, trace, "inpnipro"); + break; + default: + BUG(); + } +out: + QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); + return q->first_to_check; +} + +int qdio_inbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_inbound_buffer_frontier(q); + + if ((bufnr != q->last_move_ftc) || q->qdio_error) { + if (!need_siga_sync(q) && !pci_out_supported(q)) + q->u.in.timestamp = get_usecs(); + + QDIO_DBF_TEXT4(0, trace, "inhasmvd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + return 1; + } else + return 0; +} + +static int qdio_inbound_q_done(struct qdio_q *q) +{ + unsigned char state; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + if (!atomic_read(&q->nr_buf_used)) + return 1; + + /* + * We need that one for synchronization with the adapter, as it + * does a kind of PCI avoidance. + */ + qdio_siga_sync_q(q); + + get_buf_state(q, q->first_to_check, &state); + if (state == SLSB_P_INPUT_PRIMED) + /* we got something to do */ + return 0; + + /* on VM, we don't poll, so the q is always done here */ + if (need_siga_sync(q) || pci_out_supported(q)) + return 1; + + /* + * At this point we know, that inbound first_to_check + * has (probably) not moved (see qdio_inbound_processing). + */ + if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "inqisdon"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + sprintf(dbf_text, "pf%02x", q->first_to_check); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 1; + } else { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "inqisntd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + sprintf(dbf_text, "pf%02x", q->first_to_check); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 0; + } +} + +void qdio_kick_inbound_handler(struct qdio_q *q) +{ + int count, start, end; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + qdio_perf_stat_inc(&perf_stats.inbound_handler); + + start = q->first_to_kick; + end = q->first_to_check; + if (end >= start) + count = end - start; + else + count = end + QDIO_MAX_BUFFERS_PER_Q - start; + +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text, "s=%2xc=%2x", start, count); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return; + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, + start, count, q->irq_ptr->int_parm); + + /* for the next time */ + q->first_to_kick = q->first_to_check; + q->qdio_error = 0; +} + +static void __qdio_inbound_processing(struct qdio_q *q) +{ + qdio_perf_stat_inc(&perf_stats.tasklet_inbound); +again: + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_inbound_handler(q); + + if (!qdio_inbound_q_done(q)) + /* means poll time is not yet over */ + goto again; + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!qdio_inbound_q_done(q)) + goto again; +} + +/* inbound tasklet */ +void qdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_inbound_processing(q); +} + +static int get_outbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state; + + if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || + (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) + qdio_siga_sync_q(q); + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + + /* need to set count to 1 for non-qebsm */ + if (!is_qebsm(q)) + count = 1; + +check_next: + if (q->first_to_check == stop) + return q->first_to_check; + + count = get_buf_states(q, q->first_to_check, &state, count); + if (!count) + return q->first_to_check; + + switch (state) { + case SLSB_P_OUTPUT_EMPTY: + /* the adapter got it */ + QDIO_DBF_TEXT5(0, trace, "outpempt"); + + atomic_sub(count, &q->nr_buf_used); + q->first_to_check = add_buf(q->first_to_check, count); + /* + * We fetch all buffer states at once. get_buf_states may + * return count < stop. For QEBSM we do not loop. + */ + if (is_qebsm(q)) + break; + goto check_next; + case SLSB_P_OUTPUT_ERROR: + announce_buffer_error(q); + /* process the buffer, the upper layer will take care of it */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + break; + case SLSB_CU_OUTPUT_PRIMED: + /* the adapter has not fetched the output yet */ + QDIO_DBF_TEXT5(0, trace, "outpprim"); + break; + case SLSB_P_OUTPUT_NOT_INIT: + case SLSB_P_OUTPUT_HALTED: + break; + default: + BUG(); + } + return q->first_to_check; +} + +/* all buffers processed? */ +static inline int qdio_outbound_q_done(struct qdio_q *q) +{ + return atomic_read(&q->nr_buf_used) == 0; +} + +static inline int qdio_outbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_outbound_buffer_frontier(q); + + if ((bufnr != q->last_move_ftc) || q->qdio_error) { + q->last_move_ftc = bufnr; + QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + return 1; + } else + return 0; +} + +/* + * VM could present us cc=2 and busy bit set on SIGA-write + * during reconfiguration of their Guest LAN (only in iqdio mode, + * otherwise qdio is asynchronous and cc=2 and busy bit there will take + * the queues down immediately). + * + * Therefore qdio_siga_output will try for a short time constantly, + * if such a condition occurs. If it doesn't change, it will + * increase the busy_siga_counter and save the timestamp, and + * schedule the queue for later processing. qdio_outbound_processing + * will check out the counter. If non-zero, it will call qdio_kick_outbound_q + * as often as the value of the counter. This will attempt further SIGA + * instructions. For each successful SIGA, the counter is + * decreased, for failing SIGAs the counter remains the same, after + * all. After some time of no movement, qdio_kick_outbound_q will + * finally fail and reflect corresponding error codes to call + * the upper layer module and have it take the queues down. + * + * Note that this is a change from the original HiperSockets design + * (saying cc=2 and busy bit means take the queues down), but in + * these days Guest LAN didn't exist... excessive cc=2 with busy bit + * conditions will still take the queues down, but the threshold is + * higher due to the Guest LAN environment. + * + * Called from outbound tasklet and do_QDIO handler. + */ +static void qdio_kick_outbound_q(struct qdio_q *q) +{ + int rc; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; + + QDIO_DBF_TEXT5(0, trace, "kickoutq"); + QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); +#endif /* CONFIG_QDIO_DEBUG */ + + if (!need_siga_out(q)) + return; + + rc = qdio_siga_output(q); + switch (rc) { + case 0: + /* went smooth this time, reset timestamp */ + q->u.out.timestamp = 0; + + /* TODO: improve error handling for CC=0 case */ +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT3(0, trace, "cc2reslv"); + sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, + atomic_read(&q->u.out.busy_siga_counter)); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + break; + /* cc=2 and busy bit */ + case (2 | QDIO_ERROR_SIGA_BUSY): + atomic_inc(&q->u.out.busy_siga_counter); + + /* if the last siga was successful, save timestamp here */ + if (!q->u.out.timestamp) + q->u.out.timestamp = get_usecs(); + + /* if we're in time, don't touch qdio_error */ + if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { + tasklet_schedule(&q->tasklet); + break; + } + QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, + atomic_read(&q->u.out.busy_siga_counter)); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + default: + /* for plain cc=1, 2 or 3 */ + q->qdio_error = rc; + } +} + +static void qdio_kick_outbound_handler(struct qdio_q *q) +{ + int start, end, count; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + start = q->first_to_kick; + end = q->last_move_ftc; + if (end >= start) + count = end - start; + else + count = end + QDIO_MAX_BUFFERS_PER_Q - start; + +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "kickouth"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + + sprintf(dbf_text, "s=%2xc=%2x", start, count); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return; + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, + q->irq_ptr->int_parm); + + /* for the next time: */ + q->first_to_kick = q->last_move_ftc; + q->qdio_error = 0; +} + +static void __qdio_outbound_processing(struct qdio_q *q) +{ + int siga_attempts; + + qdio_perf_stat_inc(&perf_stats.tasklet_outbound); + + /* see comment in qdio_kick_outbound_q */ + siga_attempts = atomic_read(&q->u.out.busy_siga_counter); + while (siga_attempts--) { + atomic_dec(&q->u.out.busy_siga_counter); + qdio_kick_outbound_q(q); + } + + BUG_ON(atomic_read(&q->nr_buf_used) < 0); + + if (qdio_outbound_q_moved(q)) + qdio_kick_outbound_handler(q); + + if (queue_type(q) == QDIO_ZFCP_QFMT) { + if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) + tasklet_schedule(&q->tasklet); + return; + } + + /* bail out for HiperSockets unicast queues */ + if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) + return; + + if (q->u.out.pci_out_enabled) + return; + + /* + * Now we know that queue type is either qeth without pci enabled + * or HiperSockets multicast. Make sure buffer switch from PRIMED to + * EMPTY is noticed and outbound_handler is called after some time. + */ + if (qdio_outbound_q_done(q)) + del_timer(&q->u.out.timer); + else { + if (!timer_pending(&q->u.out.timer)) { + mod_timer(&q->u.out.timer, jiffies + 10 * HZ); + qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); + } + } +} + +/* outbound tasklet */ +void qdio_outbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_outbound_processing(q); +} + +void qdio_outbound_timer(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + tasklet_schedule(&q->tasklet); +} + +/* called from thinint inbound tasklet */ +void qdio_check_outbound_after_thinint(struct qdio_q *q) +{ + struct qdio_q *out; + int i; + + if (!pci_out_supported(q)) + return; + + for_each_output_queue(q->irq_ptr, out, i) + if (!qdio_outbound_q_done(out)) + tasklet_schedule(&out->tasklet); +} + +static inline void qdio_set_state(struct qdio_irq *irq_ptr, + enum qdio_irq_states state) +{ +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; + + QDIO_DBF_TEXT5(0, trace, "newstate"); + sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); + QDIO_DBF_TEXT5(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + irq_ptr->state = state; + mb(); +} + +static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) +{ + char dbf_text[15]; + + if (irb->esw.esw0.erw.cons) { + sprintf(dbf_text, "sens%4x", schid.sch_no); + QDIO_DBF_TEXT2(1, trace, dbf_text); + QDIO_DBF_HEX0(0, trace, irb, 64); + QDIO_DBF_HEX0(0, trace, irb->ecw, 64); + } +} + +/* PCI interrupt handler */ +static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) +{ + int i; + struct qdio_q *q; + + qdio_perf_stat_inc(&perf_stats.pci_int); + + for_each_input_queue(irq_ptr, q, i) + tasklet_schedule(&q->tasklet); + + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + return; + + for_each_output_queue(irq_ptr, q, i) { + if (qdio_outbound_q_done(q)) + continue; + + if (!siga_syncs_out_pci(q)) + qdio_siga_sync_q(q); + + tasklet_schedule(&q->tasklet); + } +} + +static void qdio_handle_activate_check(struct ccw_device *cdev, + unsigned long intparm, int cstat, int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + char dbf_text[15]; + + QDIO_DBF_TEXT2(1, trace, "ick2"); + sprintf(dbf_text, "%s", cdev->dev.bus_id); + QDIO_DBF_TEXT2(1, trace, dbf_text); + QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); + + if (irq_ptr->nr_input_qs) { + q = irq_ptr->input_qs[0]; + } else if (irq_ptr->nr_output_qs) { + q = irq_ptr->output_qs[0]; + } else { + dump_stack(); + goto no_handler; + } + q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, irq_ptr->int_parm); +no_handler: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); +} + +static void qdio_call_shutdown(struct work_struct *work) +{ + struct ccw_device_private *priv; + struct ccw_device *cdev; + + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + put_device(&cdev->dev); +} + +static void qdio_int_error(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_INACTIVE: + case QDIO_IRQ_STATE_CLEANUP: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + break; + case QDIO_IRQ_STATE_ESTABLISHED: + case QDIO_IRQ_STATE_ACTIVE: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); + if (get_device(&cdev->dev)) { + /* Can't call shutdown from interrupt context. */ + PREPARE_WORK(&cdev->private->kick_work, + qdio_call_shutdown); + queue_work(ccw_device_work, &cdev->private->kick_work); + } + break; + default: + WARN_ON(1); + } + wake_up(&cdev->private->wait_q); +} + +static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, + int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { + QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); + goto error; + } + + if (!(dstat & DEV_STAT_DEV_END)) { + QDIO_DBF_TEXT2(1, setup, "eq:no de"); + goto error; + } + + if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { + QDIO_DBF_TEXT2(1, setup, "eq:badio"); + goto error; + } + return 0; +error: + QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + return 1; +} + +static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, + int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + char dbf_text[15]; + + sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + if (!qdio_establish_check_errors(cdev, cstat, dstat)) + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); +} + +/* qdio interrupt handler */ +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + int cstat, dstat; + char dbf_text[15]; + + qdio_perf_stat_inc(&perf_stats.qdio_int); + + if (!intparm || !irq_ptr) { + sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + return; + } + + if (IS_ERR(irb)) { + switch (PTR_ERR(irb)) { + case -EIO: + sprintf(dbf_text, "ierr%4x", + cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + qdio_int_error(cdev); + return; + case -ETIMEDOUT: + sprintf(dbf_text, "qtoh%4x", + cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + qdio_int_error(cdev); + return; + default: + WARN_ON(1); + return; + } + } + qdio_irq_check_sense(irq_ptr->schid, irb); + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_INACTIVE: + qdio_establish_handle_irq(cdev, cstat, dstat); + break; + + case QDIO_IRQ_STATE_CLEANUP: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + break; + + case QDIO_IRQ_STATE_ESTABLISHED: + case QDIO_IRQ_STATE_ACTIVE: + if (cstat & SCHN_STAT_PCI) { + qdio_int_handler_pci(irq_ptr); + /* no state change so no need to wake up wait_q */ + return; + } + if ((cstat & ~SCHN_STAT_PCI) || dstat) { + qdio_handle_activate_check(cdev, intparm, cstat, + dstat); + break; + } + default: + WARN_ON(1); + } + wake_up(&cdev->private->wait_q); +} + +/** + * qdio_get_ssqd_desc - get qdio subchannel description + * @cdev: ccw device to get description for + * + * Returns a pointer to the saved qdio subchannel description, + * or NULL for not setup qdio devices. + */ +struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + + QDIO_DBF_TEXT0(0, setup, "getssqd"); + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return NULL; + + return &irq_ptr->ssqd_desc; +} +EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); + +/** + * qdio_cleanup - shutdown queues and free data structures + * @cdev: associated ccw device + * @how: use halt or clear to shutdown + * + * This function calls qdio_shutdown() for @cdev with method @how + * and on success qdio_free() for @cdev. + */ +int qdio_cleanup(struct ccw_device *cdev, int how) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + int rc; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + rc = qdio_shutdown(cdev, how); + if (rc == 0) + rc = qdio_free(cdev); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_cleanup); + +static void qdio_shutdown_queues(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + tasklet_disable(&q->tasklet); + + for_each_output_queue(irq_ptr, q, i) { + tasklet_disable(&q->tasklet); + del_timer(&q->u.out.timer); + } +} + +/** + * qdio_shutdown - shut down a qdio subchannel + * @cdev: associated ccw device + * @how: use halt or clear to shutdown + */ +int qdio_shutdown(struct ccw_device *cdev, int how) +{ + struct qdio_irq *irq_ptr; + int rc; + unsigned long flags; + char dbf_text[15]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + mutex_lock(&irq_ptr->setup_mutex); + /* + * Subchannel was already shot down. We cannot prevent being called + * twice since cio may trigger a shutdown asynchronously. + */ + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + mutex_unlock(&irq_ptr->setup_mutex); + return 0; + } + + sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + tiqdio_remove_input_queues(irq_ptr); + qdio_shutdown_queues(cdev); + qdio_shutdown_debug_entries(irq_ptr, cdev); + + /* cleanup subchannel */ + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + + if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) + rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); + else + /* default behaviour is halt */ + rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); + if (rc) { + sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + sprintf(dbf_text, "rc=%d", rc); + QDIO_DBF_TEXT0(0, setup, dbf_text); + goto no_cleanup; + } + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || + irq_ptr->state == QDIO_IRQ_STATE_ERR, + 10 * HZ); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + +no_cleanup: + qdio_shutdown_thinint(irq_ptr); + + /* restore interrupt handler */ + if ((void *)cdev->handler == (void *)qdio_int_handler) + cdev->handler = irq_ptr->orig_handler; + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + mutex_unlock(&irq_ptr->setup_mutex); + module_put(THIS_MODULE); + if (rc) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(qdio_shutdown); + +/** + * qdio_free - free data structures for a qdio subchannel + * @cdev: associated ccw device + */ +int qdio_free(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + mutex_lock(&irq_ptr->setup_mutex); + + sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + cdev->private->qdio_data = NULL; + mutex_unlock(&irq_ptr->setup_mutex); + + qdio_release_memory(irq_ptr); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_free); + +/** + * qdio_initialize - allocate and establish queues for a qdio subchannel + * @init_data: initialization data + * + * This function first allocates queues via qdio_allocate() and on success + * establishes them via qdio_establish(). + */ +int qdio_initialize(struct qdio_initialize *init_data) +{ + int rc; + char dbf_text[15]; + + sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + rc = qdio_allocate(init_data); + if (rc) + return rc; + + rc = qdio_establish(init_data); + if (rc) + qdio_free(init_data->cdev); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_initialize); + +/** + * qdio_allocate - allocate qdio queues and associated data + * @init_data: initialization data + */ +int qdio_allocate(struct qdio_initialize *init_data) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + + sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + if ((init_data->no_input_qs && !init_data->input_handler) || + (init_data->no_output_qs && !init_data->output_handler)) + return -EINVAL; + + if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || + (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) + return -EINVAL; + + if ((!init_data->input_sbal_addr_array) || + (!init_data->output_sbal_addr_array)) + return -EINVAL; + + qdio_allocate_do_dbf(init_data); + + /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ + irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!irq_ptr) + goto out_err; + QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); + QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); + + mutex_init(&irq_ptr->setup_mutex); + + /* + * Allocate a page for the chsc calls in qdio_establish. + * Must be pre-allocated since a zfcp recovery will call + * qdio_establish. In case of low memory and swap on a zfcp disk + * we may not be able to allocate memory otherwise. + */ + irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); + if (!irq_ptr->chsc_page) + goto out_rel; + + /* qdr is used in ccw1.cda which is u32 */ + irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); + if (!irq_ptr->qdr) + goto out_rel; + WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); + + QDIO_DBF_TEXT0(0, setup, "qdr:"); + QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); + + if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, + init_data->no_output_qs)) + goto out_rel; + + init_data->cdev->private->qdio_data = irq_ptr; + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + return 0; +out_rel: + qdio_release_memory(irq_ptr); +out_err: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(qdio_allocate); + +/** + * qdio_establish - establish queues on a qdio subchannel + * @init_data: initialization data + */ +int qdio_establish(struct qdio_initialize *init_data) +{ + char dbf_text[20]; + struct qdio_irq *irq_ptr; + struct ccw_device *cdev = init_data->cdev; + unsigned long saveflags; + int rc; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + if (!try_module_get(THIS_MODULE)) + return -EINVAL; + + sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + mutex_lock(&irq_ptr->setup_mutex); + qdio_setup_irq(init_data); + + rc = qdio_establish_thinint(irq_ptr); + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + /* establish q */ + irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->equeue.count; + irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options_mask(cdev, 0); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + if (rc) { + sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + sprintf(dbf_text, "eq:rc%4x", rc); + QDIO_DBF_TEXT2(1, setup, dbf_text); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || + irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); + + if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; + } + + qdio_setup_ssqd_info(irq_ptr); + sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); + QDIO_DBF_TEXT2(0, setup, dbf_text); + + /* qebsm is now setup if available, initialize buffer states */ + qdio_init_buf_states(irq_ptr); + + mutex_unlock(&irq_ptr->setup_mutex); + qdio_print_subchannel_info(irq_ptr, cdev); + qdio_setup_debug_entries(irq_ptr, cdev); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_establish); + +/** + * qdio_activate - activate queues on a qdio subchannel + * @cdev: associated cdev + */ +int qdio_activate(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + int rc; + unsigned long saveflags; + char dbf_text[20]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + mutex_lock(&irq_ptr->setup_mutex); + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + rc = -EBUSY; + goto out; + } + + sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(0, setup, dbf_text); + QDIO_DBF_TEXT2(0, trace, dbf_text); + + irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->aqueue.count; + irq_ptr->ccw.cda = 0; + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, + 0, DOIO_DENY_PREFETCH); + if (rc) { + sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + sprintf(dbf_text, "aq:rc%4x", rc); + QDIO_DBF_TEXT2(1, setup, dbf_text); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) + goto out; + + if (is_thinint_irq(irq_ptr)) + tiqdio_add_input_queues(irq_ptr); + + /* wait for subchannel to become active */ + msleep(5); + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_STOPPED: + case QDIO_IRQ_STATE_ERR: + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; + default: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); + rc = 0; + } +out: + mutex_unlock(&irq_ptr->setup_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_activate); + +static inline int buf_in_between(int bufnr, int start, int count) +{ + int end = add_buf(start, count); + + if (end > start) { + if (bufnr >= start && bufnr < end) + return 1; + else + return 0; + } + + /* wrap-around case */ + if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || + (bufnr < end)) + return 1; + else + return 0; +} + +/** + * handle_inbound - reset processed input buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are emptied + */ +static void handle_inbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + unsigned long flags; + int used, rc; + + /* + * do_QDIO could run in parallel with the queue tasklet so the + * upper-layer programm could empty the ACK'ed buffer here. + * If that happens we must clear the polling flag, otherwise + * qdio_stop_polling() could set the buffer to NOT_INIT after + * it was set to EMPTY which would kill us. + */ + spin_lock_irqsave(&q->u.in.lock, flags); + if (q->u.in.polling) + if (buf_in_between(q->last_move_ftc, bufnr, count)) + q->u.in.polling = 0; + + count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); + spin_unlock_irqrestore(&q->u.in.lock, flags); + + used = atomic_add_return(count, &q->nr_buf_used) - count; + BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); + + /* no need to signal as long as the adapter had free buffers */ + if (used) + return; + + if (need_siga_in(q)) { + rc = qdio_siga_input(q); + if (rc) + q->qdio_error = rc; + } +} + +/** + * handle_outbound - process filled outbound buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are filled + */ +static void handle_outbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + unsigned char state; + int used; + + qdio_perf_stat_inc(&perf_stats.outbound_handler); + + count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); + used = atomic_add_return(count, &q->nr_buf_used); + BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); + + if (callflags & QDIO_FLAG_PCI_OUT) + q->u.out.pci_out_enabled = 1; + else + q->u.out.pci_out_enabled = 0; + + if (queue_type(q) == QDIO_IQDIO_QFMT) { + if (multicast_outbound(q)) + qdio_kick_outbound_q(q); + else + /* + * One siga-w per buffer required for unicast + * HiperSockets. + */ + while (count--) + qdio_kick_outbound_q(q); + goto out; + } + + if (need_siga_sync(q)) { + qdio_siga_sync_q(q); + goto out; + } + + /* try to fast requeue buffers */ + get_buf_state(q, prev_buf(bufnr), &state); + if (state != SLSB_CU_OUTPUT_PRIMED) + qdio_kick_outbound_q(q); + else { + QDIO_DBF_TEXT5(0, trace, "fast-req"); + qdio_perf_stat_inc(&perf_stats.fast_requeue); + } +out: + /* Fixme: could wait forever if called from process context */ + tasklet_schedule(&q->tasklet); +} + +/** + * do_QDIO - process input or output buffers + * @cdev: associated ccw_device for the qdio subchannel + * @callflags: input or output and special flags from the program + * @q_nr: queue number + * @bufnr: buffer number + * @count: how many buffers to process + */ +int do_QDIO(struct ccw_device *cdev, unsigned int callflags, + int q_nr, int bufnr, int count) +{ + struct qdio_irq *irq_ptr; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[20]; + + sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || + (count > QDIO_MAX_BUFFERS_PER_Q) || + (q_nr > QDIO_MAX_QUEUES_PER_IRQ)) + return -EINVAL; + + if (!count) + return 0; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + +#ifdef CONFIG_QDIO_DEBUG + if (callflags & QDIO_FLAG_SYNC_INPUT) + QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], + sizeof(void *)); + else + QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], + sizeof(void *)); + + sprintf(dbf_text, "flag%04x", callflags); + QDIO_DBF_TEXT3(0, trace, dbf_text); + sprintf(dbf_text, "qi%02xct%02x", bufnr, count); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) + return -EBUSY; + + if (callflags & QDIO_FLAG_SYNC_INPUT) + handle_inbound(irq_ptr->input_qs[q_nr], + callflags, bufnr, count); + else if (callflags & QDIO_FLAG_SYNC_OUTPUT) + handle_outbound(irq_ptr->output_qs[q_nr], + callflags, bufnr, count); + else { + QDIO_DBF_TEXT3(1, trace, "doQD:inv"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(do_QDIO); + +static int __init init_QDIO(void) +{ + int rc; + + rc = qdio_setup_init(); + if (rc) + return rc; + rc = tiqdio_allocate_memory(); + if (rc) + goto out_cache; + rc = qdio_debug_init(); + if (rc) + goto out_ti; + rc = qdio_setup_perf_stats(); + if (rc) + goto out_debug; + rc = tiqdio_register_thinints(); + if (rc) + goto out_perf; + return 0; + +out_perf: + qdio_remove_perf_stats(); +out_debug: + qdio_debug_exit(); +out_ti: + tiqdio_free_memory(); +out_cache: + qdio_setup_exit(); + return rc; +} + +static void __exit exit_QDIO(void) +{ + tiqdio_unregister_thinints(); + tiqdio_free_memory(); + qdio_remove_perf_stats(); + qdio_debug_exit(); + qdio_setup_exit(); +} + +module_init(init_QDIO); +module_exit(exit_QDIO); diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c new file mode 100644 index 00000000000..ea01b85b1cc --- /dev/null +++ b/drivers/s390/cio/qdio_perf.c @@ -0,0 +1,151 @@ +/* + * drivers/s390/cio/qdio_perf.c + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#include <linux/kernel.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <asm/ccwdev.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "ioasm.h" +#include "chsc.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +int qdio_performance_stats; +struct qdio_perf_stats perf_stats; + +#ifdef CONFIG_PROC_FS +static struct proc_dir_entry *qdio_perf_pde; +#endif + +inline void qdio_perf_stat_inc(atomic_long_t *count) +{ + if (qdio_performance_stats) + atomic_long_inc(count); +} + +inline void qdio_perf_stat_dec(atomic_long_t *count) +{ + if (qdio_performance_stats) + atomic_long_dec(count); +} + +/* + * procfs functions + */ +static int qdio_perf_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.qdio_int)); + seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.pci_int)); + seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.thin_int)); + seq_printf(m, "\n"); + seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.tasklet_inbound)); + seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.tasklet_outbound)); + seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.tasklet_thinint), + (long)atomic_long_read(&perf_stats.tasklet_thinint_loop)); + seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.thinint_inbound), + (long)atomic_long_read(&perf_stats.thinint_inbound_loop)); + seq_printf(m, "\n"); + seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_in)); + seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_out)); + seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_sync)); + seq_printf(m, "\n"); + seq_printf(m, "Number of inbound transfers\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.inbound_handler)); + seq_printf(m, "Number of outbound transfers\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.outbound_handler)); + seq_printf(m, "\n"); + seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", + (long)atomic_long_read(&perf_stats.fast_requeue)); + seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", + (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); + seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.debug_stop_polling)); + seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", + (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); + seq_printf(m, "\n"); + return 0; +} +static int qdio_perf_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qdio_perf_proc_show, NULL); +} + +static struct file_operations qdio_perf_proc_fops = { + .owner = THIS_MODULE, + .open = qdio_perf_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * sysfs functions + */ +static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf) +{ + return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); +} + +static ssize_t qdio_perf_stats_store(struct bus_type *bus, + const char *buf, size_t count) +{ + unsigned long i; + + if (strict_strtoul(buf, 16, &i) != 0) + return -EINVAL; + if ((i != 0) && (i != 1)) + return -EINVAL; + if (i == qdio_performance_stats) + return count; + + qdio_performance_stats = i; + /* reset performance statistics */ + if (i == 0) + memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); + return count; +} + +static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show, + qdio_perf_stats_store); + +int __init qdio_setup_perf_stats(void) +{ + int rc; + + rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); + if (rc) + return rc; + +#ifdef CONFIG_PROC_FS + memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); + qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO, + NULL, &qdio_perf_proc_fops); +#endif + return 0; +} + +void __exit qdio_remove_perf_stats(void) +{ +#ifdef CONFIG_PROC_FS + remove_proc_entry("qdio_perf", NULL); +#endif + bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); +} diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h new file mode 100644 index 00000000000..5c406a8b738 --- /dev/null +++ b/drivers/s390/cio/qdio_perf.h @@ -0,0 +1,54 @@ +/* + * drivers/s390/cio/qdio_perf.h + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#ifndef QDIO_PERF_H +#define QDIO_PERF_H + +#include <linux/types.h> +#include <linux/device.h> +#include <asm/atomic.h> + +struct qdio_perf_stats { + /* interrupt handler calls */ + atomic_long_t qdio_int; + atomic_long_t pci_int; + atomic_long_t thin_int; + + /* tasklet runs */ + atomic_long_t tasklet_inbound; + atomic_long_t tasklet_outbound; + atomic_long_t tasklet_thinint; + atomic_long_t tasklet_thinint_loop; + atomic_long_t thinint_inbound; + atomic_long_t thinint_inbound_loop; + atomic_long_t thinint_inbound_loop2; + + /* signal adapter calls */ + atomic_long_t siga_out; + atomic_long_t siga_in; + atomic_long_t siga_sync; + + /* misc */ + atomic_long_t inbound_handler; + atomic_long_t outbound_handler; + atomic_long_t fast_requeue; + + /* for debugging */ + atomic_long_t debug_tl_out_timer; + atomic_long_t debug_stop_polling; +}; + +extern struct qdio_perf_stats perf_stats; +extern int qdio_performance_stats; + +int qdio_setup_perf_stats(void); +void qdio_remove_perf_stats(void); + +extern void qdio_perf_stat_inc(atomic_long_t *count); +extern void qdio_perf_stat_dec(atomic_long_t *count); + +#endif diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c new file mode 100644 index 00000000000..f0923a8aced --- /dev/null +++ b/drivers/s390/cio/qdio_setup.c @@ -0,0 +1,521 @@ +/* + * driver/s390/cio/qdio_setup.c + * + * qdio queue initialization + * + * Copyright (C) IBM Corp. 2008 + * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "ioasm.h" +#include "chsc.h" +#include "qdio.h" +#include "qdio_debug.h" + +static struct kmem_cache *qdio_q_cache; + +/* + * qebsm is only available under 64bit but the adapter sets the feature + * flag anyway, so we manually override it. + */ +static inline int qebsm_possible(void) +{ +#ifdef CONFIG_64BIT + return css_general_characteristics.qebsm; +#endif + return 0; +} + +/* + * qib_param_field: pointer to 128 bytes or NULL, if no param field + * nr_input_qs: pointer to nr_queues*128 words of data or NULL + */ +static void set_impl_params(struct qdio_irq *irq_ptr, + unsigned int qib_param_field_format, + unsigned char *qib_param_field, + unsigned long *input_slib_elements, + unsigned long *output_slib_elements) +{ + struct qdio_q *q; + int i, j; + + if (!irq_ptr) + return; + + WARN_ON((unsigned long)&irq_ptr->qib & 0xff); + irq_ptr->qib.pfmt = qib_param_field_format; + if (qib_param_field) + memcpy(irq_ptr->qib.parm, qib_param_field, + QDIO_MAX_BUFFERS_PER_Q); + + if (!input_slib_elements) + goto output; + + for_each_input_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +output: + if (!output_slib_elements) + return; + + for_each_output_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +} + +static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) +{ + struct qdio_q *q; + int i; + + for (i = 0; i < nr_queues; i++) { + q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); + if (!q) + return -ENOMEM; + WARN_ON((unsigned long)q & 0xff); + + q->slib = (struct slib *) __get_free_page(GFP_KERNEL); + if (!q->slib) { + kmem_cache_free(qdio_q_cache, q); + return -ENOMEM; + } + WARN_ON((unsigned long)q->slib & 0x7ff); + irq_ptr_qs[i] = q; + } + return 0; +} + +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) +{ + int rc; + + rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); + if (rc) + return rc; + rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); + return rc; +} + +static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, + qdio_handler_t *handler, int i) +{ + /* must be cleared by every qdio_establish */ + memset(q, 0, ((char *)&q->slib) - ((char *)q)); + memset(q->slib, 0, PAGE_SIZE); + + q->irq_ptr = irq_ptr; + q->mask = 1 << (31 - i); + q->nr = i; + q->handler = handler; +} + +static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, + void **sbals_array, char *dbf_text, int i) +{ + struct qdio_q *prev; + int j; + + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &q, sizeof(void *)); + + q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); + + /* fill in sbal */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { + q->sbal[j] = *sbals_array++; + WARN_ON((unsigned long)q->sbal[j] & 0xff); + } + + /* fill in slib */ + if (i > 0) { + prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] + : irq_ptr->output_qs[i - 1]; + prev->slib->nsliba = (unsigned long)q->slib; + } + + q->slib->sla = (unsigned long)q->sl; + q->slib->slsba = (unsigned long)&q->slsb.val[0]; + + /* fill in sl */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->sl->element[j].sbal = (unsigned long)q->sbal[j]; + + QDIO_DBF_TEXT2(0, setup, "sl-sb-b0"); + QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *)); + QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *)); + QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *)); +} + +static void setup_queues(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + char dbf_text[20]; + struct qdio_q *q; + void **input_sbal_array = qdio_init->input_sbal_addr_array; + void **output_sbal_array = qdio_init->output_sbal_addr_array; + int i; + + sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + for_each_input_queue(irq_ptr, q, i) { + sprintf(dbf_text, "in-q%4x", i); + setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); + + q->is_input_q = 1; + spin_lock_init(&q->u.in.lock); + setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i); + input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + if (is_thinint_irq(irq_ptr)) + tasklet_init(&q->tasklet, tiqdio_inbound_processing, + (unsigned long) q); + else + tasklet_init(&q->tasklet, qdio_inbound_processing, + (unsigned long) q); + } + + for_each_output_queue(irq_ptr, q, i) { + sprintf(dbf_text, "outq%4x", i); + setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); + + q->is_input_q = 0; + setup_storage_lists(q, irq_ptr, output_sbal_array, + dbf_text, i); + output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + tasklet_init(&q->tasklet, qdio_outbound_processing, + (unsigned long) q); + setup_timer(&q->u.out.timer, (void(*)(unsigned long)) + &qdio_outbound_timer, (unsigned long)q); + } +} + +static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) +{ + if (qdioac & AC1_SIGA_INPUT_NEEDED) + irq_ptr->siga_flag.input = 1; + if (qdioac & AC1_SIGA_OUTPUT_NEEDED) + irq_ptr->siga_flag.output = 1; + if (qdioac & AC1_SIGA_SYNC_NEEDED) + irq_ptr->siga_flag.sync = 1; + if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) + irq_ptr->siga_flag.no_sync_ti = 1; + if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) + irq_ptr->siga_flag.no_sync_out_pci = 1; + + if (irq_ptr->siga_flag.no_sync_out_pci && + irq_ptr->siga_flag.no_sync_ti) + irq_ptr->siga_flag.no_sync_out_ti = 1; +} + +static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, + unsigned char qdioac, unsigned long token) +{ + char dbf_text[15]; + + if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) + goto no_qebsm; + if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || + (!(qdioac & AC1_SC_QEBSM_ENABLED))) + goto no_qebsm; + + irq_ptr->sch_token = token; + + QDIO_DBF_TEXT0(0, setup, "V=V:1"); + sprintf(dbf_text, "%8lx", irq_ptr->sch_token); + QDIO_DBF_TEXT0(0, setup, dbf_text); + return; + +no_qebsm: + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + QDIO_DBF_TEXT0(0, setup, "noV=V"); +} + +static int __get_ssqd_info(struct qdio_irq *irq_ptr) +{ + struct chsc_ssqd_area *ssqd; + int rc; + + QDIO_DBF_TEXT0(0, setup, "getssqd"); + ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + memset(ssqd, 0, PAGE_SIZE); + + ssqd->request = (struct chsc_header) { + .length = 0x0010, + .code = 0x0024, + }; + ssqd->first_sch = irq_ptr->schid.sch_no; + ssqd->last_sch = irq_ptr->schid.sch_no; + ssqd->ssid = irq_ptr->schid.ssid; + + if (chsc(ssqd)) + return -EIO; + rc = chsc_error_from_response(ssqd->response.code); + if (rc) + return rc; + + if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || + !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || + (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no)) + return -EINVAL; + + memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, + sizeof(struct qdio_ssqd_desc)); + return 0; +} + +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) +{ + unsigned char qdioac; + char dbf_text[15]; + int rc; + + rc = __get_ssqd_info(irq_ptr); + if (rc) { + QDIO_DBF_TEXT2(0, setup, "ssqdasig"); + sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(0, setup, dbf_text); + sprintf(dbf_text, "rc:%d", rc); + QDIO_DBF_TEXT2(0, setup, dbf_text); + /* all flags set, worst case */ + qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | + AC1_SIGA_SYNC_NEEDED; + } else + qdioac = irq_ptr->ssqd_desc.qdioac1; + + check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); + process_ac_flags(irq_ptr, qdioac); + + sprintf(dbf_text, "qdioac%2x", qdioac); + QDIO_DBF_TEXT2(0, setup, dbf_text); +} + +void qdio_release_memory(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + /* + * Must check queue array manually since irq_ptr->nr_input_queues / + * irq_ptr->nr_input_queues may not yet be set. + */ + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->input_qs[i]; + if (q) { + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->output_qs[i]; + if (q) { + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + kfree(irq_ptr->qdr); + free_page(irq_ptr->chsc_page); + free_page((unsigned long) irq_ptr); +} + +static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, + struct qdio_q **irq_ptr_qs, + int i, int nr) +{ + irq_ptr->qdr->qdf0[i + nr].sliba = + (unsigned long)irq_ptr_qs[i]->slib; + + irq_ptr->qdr->qdf0[i + nr].sla = + (unsigned long)irq_ptr_qs[i]->sl; + + irq_ptr->qdr->qdf0[i + nr].slsba = + (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; + + irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; +} + +static void setup_qdr(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + int i; + + irq_ptr->qdr->qfmt = qdio_init->q_format; + irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; + irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; + irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ + irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; + irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; + irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; + + for (i = 0; i < qdio_init->no_input_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); + + for (i = 0; i < qdio_init->no_output_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, + qdio_init->no_input_qs); +} + +static void setup_qib(struct qdio_irq *irq_ptr, + struct qdio_initialize *init_data) +{ + if (qebsm_possible()) + irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + + irq_ptr->qib.qfmt = init_data->q_format; + if (init_data->no_input_qs) + irq_ptr->qib.isliba = + (unsigned long)(irq_ptr->input_qs[0]->slib); + if (init_data->no_output_qs) + irq_ptr->qib.osliba = + (unsigned long)(irq_ptr->output_qs[0]->slib); + memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); +} + +int qdio_setup_irq(struct qdio_initialize *init_data) +{ + struct ciw *ciw; + struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; + int rc; + + memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); + /* wipes qib.ac, required by ar7063 */ + memset(irq_ptr->qdr, 0, sizeof(struct qdr)); + + irq_ptr->int_parm = init_data->int_parm; + irq_ptr->nr_input_qs = init_data->no_input_qs; + irq_ptr->nr_output_qs = init_data->no_output_qs; + + irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); + irq_ptr->cdev = init_data->cdev; + setup_queues(irq_ptr, init_data); + + setup_qib(irq_ptr, init_data); + qdio_setup_thinint(irq_ptr); + set_impl_params(irq_ptr, init_data->qib_param_field_format, + init_data->qib_param_field, + init_data->input_slib_elements, + init_data->output_slib_elements); + + /* fill input and output descriptors */ + setup_qdr(irq_ptr, init_data); + + /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ + + /* get qdio commands */ + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); + if (!ciw) { + QDIO_DBF_TEXT2(1, setup, "no eq"); + rc = -EINVAL; + goto out_err; + } + irq_ptr->equeue = *ciw; + + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); + if (!ciw) { + QDIO_DBF_TEXT2(1, setup, "no aq"); + rc = -EINVAL; + goto out_err; + } + irq_ptr->aqueue = *ciw; + + /* set new interrupt handler */ + irq_ptr->orig_handler = init_data->cdev->handler; + init_data->cdev->handler = qdio_int_handler; + return 0; +out_err: + qdio_release_memory(irq_ptr); + return rc; +} + +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev) +{ + char s[80]; + + sprintf(s, "%s ", cdev->dev.bus_id); + + switch (irq_ptr->qib.qfmt) { + case QDIO_QETH_QFMT: + sprintf(s + strlen(s), "OSADE "); + break; + case QDIO_ZFCP_QFMT: + sprintf(s + strlen(s), "ZFCP "); + break; + case QDIO_IQDIO_QFMT: + sprintf(s + strlen(s), "HiperSockets "); + break; + } + sprintf(s + strlen(s), "using: "); + + if (!is_thinint_irq(irq_ptr)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "AdapterInterrupts "); + if (!(irq_ptr->sch_token != 0)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "QEBSM "); + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "OutboundPCI "); + if (!css_general_characteristics.aif_tdd) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "TDD\n"); + printk(KERN_INFO "qdio: %s", s); + + memset(s, 0, sizeof(s)); + sprintf(s, "%s SIGA required: ", cdev->dev.bus_id); + if (irq_ptr->siga_flag.input) + sprintf(s + strlen(s), "Read "); + if (irq_ptr->siga_flag.output) + sprintf(s + strlen(s), "Write "); + if (irq_ptr->siga_flag.sync) + sprintf(s + strlen(s), "Sync "); + if (!irq_ptr->siga_flag.no_sync_ti) + sprintf(s + strlen(s), "SyncAI "); + if (!irq_ptr->siga_flag.no_sync_out_ti) + sprintf(s + strlen(s), "SyncOutAI "); + if (!irq_ptr->siga_flag.no_sync_out_pci) + sprintf(s + strlen(s), "SyncOutPCI"); + sprintf(s + strlen(s), "\n"); + printk(KERN_INFO "qdio: %s", s); +} + +int __init qdio_setup_init(void) +{ + char dbf_text[15]; + + qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), + 256, 0, NULL); + if (!qdio_q_cache) + return -ENOMEM; + + /* Check for OSA/FCP thin interrupts (bit 67). */ + sprintf(dbf_text, "thini%1x", + (css_general_characteristics.aif_osa) ? 1 : 0); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + /* Check for QEBSM support in general (bit 58). */ + sprintf(dbf_text, "cssQBS:%1x", + (qebsm_possible()) ? 1 : 0); + QDIO_DBF_TEXT0(0, setup, dbf_text); + return 0; +} + +void __exit qdio_setup_exit(void) +{ + kmem_cache_destroy(qdio_q_cache); +} diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c new file mode 100644 index 00000000000..9291a771d81 --- /dev/null +++ b/drivers/s390/cio/qdio_thinint.c @@ -0,0 +1,380 @@ +/* + * linux/drivers/s390/cio/thinint_qdio.c + * + * thin interrupt support for qdio + * + * Copyright 2000-2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/io.h> +#include <asm/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/airq.h> +#include <asm/isc.h> + +#include "cio.h" +#include "ioasm.h" +#include "qdio.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +/* + * Restriction: only 63 iqdio subchannels would have its own indicator, + * after that, subsequent subchannels share one indicator + */ +#define TIQDIO_NR_NONSHARED_IND 63 +#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) +#define TIQDIO_SHARED_IND 63 + +/* list of thin interrupt input queues */ +static LIST_HEAD(tiq_list); + +/* adapter local summary indicator */ +static unsigned char *tiqdio_alsi; + +/* device state change indicators */ +struct indicator_t { + u32 ind; /* u32 because of compare-and-swap performance */ + atomic_t count; /* use count, 0 or 1 for non-shared indicators */ +}; +static struct indicator_t *q_indicators; + +static void tiqdio_tasklet_fn(unsigned long data); +static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); + +static int css_qdio_omit_svs; + +static inline unsigned long do_clear_global_summary(void) +{ + register unsigned long __fn asm("1") = 3; + register unsigned long __tmp asm("2"); + register unsigned long __time asm("3"); + + asm volatile( + " .insn rre,0xb2650000,2,0" + : "+d" (__fn), "=d" (__tmp), "=d" (__time)); + return __time; +} + +/* returns addr for the device state change indicator */ +static u32 *get_indicator(void) +{ + int i; + + for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) + if (!atomic_read(&q_indicators[i].count)) { + atomic_set(&q_indicators[i].count, 1); + return &q_indicators[i].ind; + } + + /* use the shared indicator */ + atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); + return &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static void put_indicator(u32 *addr) +{ + int i; + + if (!addr) + return; + i = ((unsigned long)addr - (unsigned long)q_indicators) / + sizeof(struct indicator_t); + atomic_dec(&q_indicators[i].count); +} + +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ + if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) + css_qdio_omit_svs = 1; + + for_each_input_queue(irq_ptr, q, i) { + list_add_rcu(&q->entry, &tiq_list); + synchronize_rcu(); + } + xchg(irq_ptr->dsci, 1); + tasklet_schedule(&tiqdio_tasklet); +} + +/* + * we cannot stop the tiqdio tasklet here since it is for all + * thinint qdio devices and it must run as long as there is a + * thinint device left + */ +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) { + list_del_rcu(&q->entry); + synchronize_rcu(); + } +} + +static inline int tiqdio_inbound_q_done(struct qdio_q *q) +{ + unsigned char state; + + if (!atomic_read(&q->nr_buf_used)) + return 1; + + qdio_siga_sync_q(q); + get_buf_state(q, q->first_to_check, &state); + + if (state == SLSB_P_INPUT_PRIMED) + /* more work coming */ + return 0; + return 1; +} + +static inline int shared_ind(struct qdio_irq *irq_ptr) +{ + return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static void __tiqdio_inbound_processing(struct qdio_q *q) +{ + qdio_perf_stat_inc(&perf_stats.thinint_inbound); + qdio_sync_after_thinint(q); + + /* + * Maybe we have work on our outbound queues... at least + * we have to check the PCI capable queues. + */ + qdio_check_outbound_after_thinint(q); + +again: + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_inbound_handler(q); + + if (!tiqdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); + goto again; + } + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!tiqdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); + goto again; + } +} + +void tiqdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + + __tiqdio_inbound_processing(q); +} + +/* check for work on all inbound thinint queues */ +static void tiqdio_tasklet_fn(unsigned long data) +{ + struct qdio_q *q; + + qdio_perf_stat_inc(&perf_stats.tasklet_thinint); +again: + + /* protect tiq_list entries, only changed in activate or shutdown */ + rcu_read_lock(); + + list_for_each_entry_rcu(q, &tiq_list, entry) + /* only process queues from changed sets */ + if (*q->irq_ptr->dsci) { + + /* only clear it if the indicator is non-shared */ + if (!shared_ind(q->irq_ptr)) + xchg(q->irq_ptr->dsci, 0); + /* + * don't call inbound processing directly since + * that could starve other thinint queues + */ + tasklet_schedule(&q->tasklet); + } + + rcu_read_unlock(); + + /* + * if we used the shared indicator clear it now after all queues + * were processed + */ + if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { + xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); + + /* prevent racing */ + if (*tiqdio_alsi) + xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); + } + + /* check for more work */ + if (*tiqdio_alsi) { + xchg(tiqdio_alsi, 0); + qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); + goto again; + } +} + +/** + * tiqdio_thinint_handler - thin interrupt handler for qdio + * @ind: pointer to adapter local summary indicator + * @drv_data: NULL + */ +static void tiqdio_thinint_handler(void *ind, void *drv_data) +{ + qdio_perf_stat_inc(&perf_stats.thin_int); + + /* + * SVS only when needed: issue SVS to benefit from iqdio interrupt + * avoidance (SVS clears adapter interrupt suppression overwrite) + */ + if (!css_qdio_omit_svs) + do_clear_global_summary(); + + /* + * reset local summary indicator (tiqdio_alsi) to stop adapter + * interrupts for now, the tasklet will clean all dsci's + */ + xchg((u8 *)ind, 0); + tasklet_hi_schedule(&tiqdio_tasklet); +} + +static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) +{ + struct scssc_area *scssc_area; + char dbf_text[15]; + void *ptr; + int rc; + + scssc_area = (struct scssc_area *)irq_ptr->chsc_page; + memset(scssc_area, 0, PAGE_SIZE); + + if (reset) { + scssc_area->summary_indicator_addr = 0; + scssc_area->subchannel_indicator_addr = 0; + } else { + scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); + scssc_area->subchannel_indicator_addr = + virt_to_phys(irq_ptr->dsci); + } + + scssc_area->request = (struct chsc_header) { + .length = 0x0fe0, + .code = 0x0021, + }; + scssc_area->operation_code = 0; + scssc_area->ks = PAGE_DEFAULT_KEY; + scssc_area->kc = PAGE_DEFAULT_KEY; + scssc_area->isc = QDIO_AIRQ_ISC; + scssc_area->schid = irq_ptr->schid; + + /* enable the time delay disablement facility */ + if (css_general_characteristics.aif_tdd) + scssc_area->word_with_d_bit = 0x10000000; + + rc = chsc(scssc_area); + if (rc) + return -EIO; + + rc = chsc_error_from_response(scssc_area->response.code); + if (rc) { + sprintf(dbf_text, "sidR%4x", scssc_area->response.code); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT1(0, setup, dbf_text); + ptr = &scssc_area->response; + QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); + return rc; + } + + QDIO_DBF_TEXT2(0, setup, "setscind"); + QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, + sizeof(unsigned long)); + QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, + sizeof(unsigned long)); + return 0; +} + +/* allocate non-shared indicators and shared indicator */ +int __init tiqdio_allocate_memory(void) +{ + q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, + GFP_KERNEL); + if (!q_indicators) + return -ENOMEM; + return 0; +} + +void tiqdio_free_memory(void) +{ + kfree(q_indicators); +} + +int __init tiqdio_register_thinints(void) +{ + char dbf_text[20]; + + isc_register(QDIO_AIRQ_ISC); + tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, + NULL, QDIO_AIRQ_ISC); + if (IS_ERR(tiqdio_alsi)) { + sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); + QDIO_DBF_TEXT0(0, setup, dbf_text); + tiqdio_alsi = NULL; + isc_unregister(QDIO_AIRQ_ISC); + return -ENOMEM; + } + return 0; +} + +int qdio_establish_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return 0; + + /* Check for aif time delay disablement. If installed, + * omit SVS even under LPAR + */ + if (css_general_characteristics.aif_tdd) + css_qdio_omit_svs = 1; + return set_subchannel_ind(irq_ptr, 0); +} + +void qdio_setup_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + irq_ptr->dsci = get_indicator(); + QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); +} + +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + + /* reset adapter interrupt indicators */ + put_indicator(irq_ptr->dsci); + set_subchannel_ind(irq_ptr, 1); +} + +void __exit tiqdio_unregister_thinints(void) +{ + tasklet_disable(&tiqdio_tasklet); + + if (tiqdio_alsi) { + s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); + isc_unregister(QDIO_AIRQ_ISC); + } +} diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 699ac11debd..1895dbb553c 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /*not used unless the microcode gets patched*/ #define QETH_PCI_TIMER_VALUE(card) 3 -#define QETH_MIN_INPUT_THRESHOLD 1 -#define QETH_MAX_INPUT_THRESHOLD 500 -#define QETH_MIN_OUTPUT_THRESHOLD 1 -#define QETH_MAX_OUTPUT_THRESHOLD 300 - /* priority queing */ #define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING #define QETH_DEFAULT_QUEUE 2 @@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, enum qeth_prot_versions); int qeth_query_setadapterparms(struct qeth_card *); -int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, - unsigned int, const char *); +int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *); void qeth_queue_input_buffer(struct qeth_card *, int); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); void qeth_schedule_recovery(struct qeth_card *); void qeth_qdio_output_handler(struct ccw_device *, unsigned int, - unsigned int, unsigned int, - unsigned int, int, int, - unsigned long); + int, int, int, unsigned long); void qeth_clear_ipacmd_list(struct qeth_card *); int qeth_qdio_clear_card(struct qeth_card *, int); void qeth_clear_working_pool_list(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0ac54dc638c..c3ad89e302b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card, static int qeth_qdio_activate(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 3, "qdioact"); - return qdio_activate(CARD_DDEV(card), 0); + return qdio_activate(CARD_DDEV(card)); } static int qeth_dm_act(struct qeth_card *card) @@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card) card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, - card->qdio.in_buf_pool.buf_count - 1, NULL); + card->qdio.in_buf_pool.buf_count - 1); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } - rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - return rc; - } /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { memset(card->qdio.out_qs[i]->qdio_bufs, 0, @@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card) EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, - unsigned int siga_error, const char *dbftext) + const char *dbftext) { - if (qdio_error || siga_error) { + if (qdio_error) { QETH_DBF_TEXT(TRACE, 2, dbftext); QETH_DBF_TEXT(QERR, 2, dbftext); QETH_DBF_TEXT_(QERR, 2, " F15=%02X", @@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, QETH_DBF_TEXT_(QERR, 2, " F14=%02X", buf->element[14].flags & 0xff); QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); - QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error); return 1; } return 0; @@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros(); } - rc = do_QDIO(CARD_DDEV(card), - QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, - 0, queue->next_buf_to_init, count, NULL); + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, + queue->next_buf_to_init, count); if (card->options.performance_stats) card->perf_stats.inbound_do_qdio_time += qeth_get_micros() - @@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); static int qeth_handle_send_error(struct qeth_card *card, - struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, - unsigned int siga_err) + struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { int sbalf15 = buffer->buffer->element[15].flags & 0xff; - int cc = siga_err & 3; + int cc = qdio_err & 3; QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); - qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); + qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr"); switch (cc) { case 0: if (qdio_err) { @@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card, } return QETH_SEND_ERROR_NONE; case 2: - if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { + if (qdio_err & QDIO_ERROR_SIGA_BUSY) { QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); return QETH_SEND_ERROR_KICK_IT; @@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) return 0; } -static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, - int index, int count) +static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, + int count) { struct qeth_qdio_out_buffer *buf; int rc; @@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, qeth_get_micros(); } qdio_flags = QDIO_FLAG_SYNC_OUTPUT; - if (under_int) - qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, - queue->queue_no, index, count, NULL); + queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - @@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) queue->card->perf_stats.bufs_sent_pack += flush_cnt; if (flush_cnt) - qeth_flush_buffers(queue, 1, index, flush_cnt); + qeth_flush_buffers(queue, index, flush_cnt); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } } } -void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, - unsigned int qdio_error, unsigned int siga_error, - unsigned int __queue, int first_element, int count, - unsigned long card_ptr) +void qeth_qdio_output_handler(struct ccw_device *ccwdev, + unsigned int qdio_error, int __queue, int first_element, + int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; @@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, int i; QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); - if (status & QDIO_STATUS_LOOK_FOR_ERROR) { - if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 2, "achkcond"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 2, "%08x", status); - netif_stop_queue(card->dev); - qeth_schedule_recovery(card); - return; - } + if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { + QETH_DBF_TEXT(TRACE, 2, "achkcond"); + QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + return; } if (card->options.performance_stats) { card->perf_stats.outbound_handler_cnt++; @@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, for (i = first_element; i < (first_element + count); ++i) { buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; /*we only handle the KICK_IT error by doing a recovery */ - if (qeth_handle_send_error(card, buffer, - qdio_error, siga_error) + if (qeth_handle_send_error(card, buffer, qdio_error) == QETH_SEND_ERROR_KICK_IT){ netif_stop_queue(card->dev); qeth_schedule_recovery(card); @@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card, atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); if (ctx == NULL) { qeth_fill_buffer(queue, buffer, skb); - qeth_flush_buffers(queue, 0, index, 1); + qeth_flush_buffers(queue, index, 1); } else { flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); WARN_ON(buffers_needed != flush_cnt); - qeth_flush_buffers(queue, 0, index, flush_cnt); + qeth_flush_buffers(queue, index, flush_cnt); } return 0; out: @@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, * again */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){ - qeth_flush_buffers(queue, 0, - start_index, flush_count); + qeth_flush_buffers(queue, start_index, + flush_count); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; @@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, flush_count += tmp; out: if (flush_count) - qeth_flush_buffers(queue, 0, start_index, flush_count); + qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); /* @@ -3274,7 +3259,7 @@ out: if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) flush_count += qeth_flush_buffers_on_no_pci(queue); if (flush_count) - qeth_flush_buffers(queue, 0, start_index, flush_count); + qeth_flush_buffers(queue, start_index, flush_count); } /* at this point the queue is UNLOCKED again */ if (queue->card->options.performance_stats && do_pack) @@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.q_format = qeth_get_qdio_q_format(card); init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; - init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD; - init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD; - init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD; - init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD; init_data.no_input_qs = 1; init_data.no_output_qs = card->qdio.no_out_queues; init_data.input_handler = card->discipline.input_handler; @@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, int qeth_core_hardsetup_card(struct qeth_card *card) { + struct qdio_ssqd_desc *qdio_ssqd; int retries = 3; - int mpno; + int mpno = 0; int rc; QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); @@ -3784,7 +3766,10 @@ retry: QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); return rc; } - mpno = qdio_get_ssqd_pct(CARD_DDEV(card)); + + qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card)); + if (qdio_ssqd) + mpno = qdio_ssqd->pcnt; if (mpno) mpno = min(mpno - 1, QETH_MAX_PORTNO); if (card->info.portno > mpno) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index f682f7b1448..3fbc3bdec0c 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -726,8 +726,7 @@ tx_drop: } static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, - unsigned int status, unsigned int qdio_err, - unsigned int siga_err, unsigned int queue, + unsigned int qdio_err, unsigned int queue, int first_element, int count, unsigned long card_ptr) { struct net_device *net_dev; @@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, card->perf_stats.inbound_cnt++; card->perf_stats.inbound_start_time = qeth_get_micros(); } - if (status & QDIO_STATUS_LOOK_FOR_ERROR) { - if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, - count); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); - qeth_schedule_recovery(card); - return; - } + if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { + QETH_DBF_TEXT(TRACE, 1, "qdinchk"); + QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); + QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, + count); + QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); + qeth_schedule_recovery(card); + return; } for (i = first_element; i < (first_element + count); ++i) { index = i % QDIO_MAX_BUFFERS_PER_Q; buffer = &card->qdio.in_q->bufs[index]; - if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && - qeth_check_qdio_errors(buffer->buffer, - qdio_err, siga_err, "qinerr"))) + if (!(qdio_err && + qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr"))) qeth_l2_process_inbound_buffer(card, buffer, index); /* clear buffer and give back to hardware */ qeth_put_buffer_pool_entry(card, buffer->pool_entry); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 06deaee50f6..22f64aa6dd1 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) } static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, - unsigned int status, unsigned int qdio_err, - unsigned int siga_err, unsigned int queue, int first_element, + unsigned int qdio_err, unsigned int queue, int first_element, int count, unsigned long card_ptr) { struct net_device *net_dev; @@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, card->perf_stats.inbound_cnt++; card->perf_stats.inbound_start_time = qeth_get_micros(); } - if (status & QDIO_STATUS_LOOK_FOR_ERROR) { - if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", - first_element, count); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); - qeth_schedule_recovery(card); - return; - } + if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { + QETH_DBF_TEXT(TRACE, 1, "qdinchk"); + QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); + QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", + first_element, count); + QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); + qeth_schedule_recovery(card); + return; } for (i = first_element; i < (first_element + count); ++i) { index = i % QDIO_MAX_BUFFERS_PER_Q; buffer = &card->qdio.in_q->bufs[index]; - if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && + if (!(qdio_err && qeth_check_qdio_errors(buffer->buffer, - qdio_err, siga_err, "qinerr"))) + qdio_err, "qinerr"))) qeth_l3_process_inbound_buffer(card, buffer, index); /* clear buffer and give back to hardware */ qeth_put_buffer_pool_entry(card, buffer->pool_entry); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 36169c6944f..fca48b88fc5 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -297,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, /** * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure * @adapter: adapter affected by this QDIO related event - * @status: as passed by qdio module * @qdio_error: as passed by qdio module - * @siga_error: as passed by qdio module * @sbal_index: first buffer with error condition, as passed by qdio module * @sbal_count: number of buffers affected, as passed by qdio module */ -void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, - unsigned int qdio_error, unsigned int siga_error, - int sbal_index, int sbal_count) +void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, + unsigned int qdio_error, int sbal_index, + int sbal_count) { struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; unsigned long flags; @@ -313,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, spin_lock_irqsave(&adapter->hba_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); - r->u.qdio.status = status; r->u.qdio.qdio_error = qdio_error; - r->u.qdio.siga_error = siga_error; r->u.qdio.sbal_index = sbal_index; r->u.qdio.sbal_count = sbal_count; debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); @@ -398,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p, static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) { - zfcp_dbf_out(p, "status", "0x%08x", r->status); zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); - zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error); zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index d04aea60497..0ddb18449d1 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status { } __attribute__ ((packed)); struct zfcp_hba_dbf_record_qdio { - u32 status; u32 qdio_error; - u32 siga_error; u8 sbal_index; u8 sbal_count; } __attribute__ ((packed)); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 8065b2b224b..edfdb21591f 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -48,9 +48,8 @@ extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *); extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, struct fsf_status_read_buffer *); -extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, - unsigned int, unsigned int, unsigned int, - int, int); +extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, + int); extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 72e3094796d..d6dbd653fde 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -74,17 +74,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) } } -static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status, - unsigned int qdio_err, unsigned int siga_err, - unsigned int queue_no, int first, int count, +static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, + int queue_no, int first, int count, unsigned long parm) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; struct zfcp_qdio_queue *queue = &adapter->req_q; - if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { - zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, - first, count); + if (unlikely(qdio_err)) { + zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); zfcp_qdio_handler_error(adapter, 140); return; } @@ -129,8 +127,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) count = atomic_read(&queue->count) + processed; - retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, - 0, start, count, NULL); + retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count); if (unlikely(retval)) { atomic_set(&queue->count, count); @@ -142,9 +139,8 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) } } -static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, - unsigned int qdio_err, unsigned int siga_err, - unsigned int queue_no, int first, int count, +static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, + int queue_no, int first, int count, unsigned long parm) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; @@ -152,9 +148,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status, volatile struct qdio_buffer_element *sbale; int sbal_idx, sbale_idx, sbal_no; - if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { - zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err, - first, count); + if (unlikely(qdio_err)) { + zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); zfcp_qdio_handler_error(adapter, 147); return; } @@ -362,7 +357,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) } retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, - count, NULL); + count); if (unlikely(retval)) { zfcp_qdio_zero_sbals(req_q->sbal, first, count); return retval; @@ -400,10 +395,6 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter) init_data->qib_param_field = NULL; init_data->input_slib_elements = NULL; init_data->output_slib_elements = NULL; - init_data->min_input_threshold = 1; - init_data->max_input_threshold = 5000; - init_data->min_output_threshold = 1; - init_data->max_output_threshold = 1000; init_data->no_input_qs = 1; init_data->no_output_qs = 1; init_data->input_handler = zfcp_qdio_int_resp; @@ -436,9 +427,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter) atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); spin_unlock(&req_q->lock); - while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) - == -EINPROGRESS) - ssleep(1); + qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); /* cleanup used outbound sbals */ count = atomic_read(&req_q->count); @@ -473,7 +462,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter) return -EIO; } - if (qdio_activate(adapter->ccw_device, 0)) { + if (qdio_activate(adapter->ccw_device)) { dev_err(&adapter->ccw_device->dev, "Activate of QDIO queues failed.\n"); goto failed_qdio; @@ -487,7 +476,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter) } if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, - QDIO_MAX_BUFFERS_PER_Q, NULL)) { + QDIO_MAX_BUFFERS_PER_Q)) { dev_err(&adapter->ccw_device->dev, "Init of QDIO response queue failed.\n"); goto failed_qdio; @@ -501,9 +490,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter) return 0; failed_qdio: - while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR) - == -EINPROGRESS) - ssleep(1); - + qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); return -EIO; } diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index ed53f14007a..f2467e936e5 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -416,12 +416,17 @@ static int clariion_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_device *sdev; struct scsi_dh_data *scsi_dh_data; struct clariion_dh_data *h; int i, found = 0; unsigned long flags; + if (!scsi_is_sdev_device(dev)) + return 0; + + sdev = to_scsi_device(dev); + if (action == BUS_NOTIFY_ADD_DEVICE) { for (i = 0; clariion_dev_list[i].vendor; i++) { if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 12ceab7b366..ae6be87d6a8 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -131,11 +131,16 @@ static int hp_sw_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_device *sdev; struct scsi_dh_data *scsi_dh_data; int i, found = 0; unsigned long flags; + if (!scsi_is_sdev_device(dev)) + return 0; + + sdev = to_scsi_device(dev); + if (action == BUS_NOTIFY_ADD_DEVICE) { for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor, diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 6fff077a888..fdf34b0ec6e 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -608,12 +608,17 @@ static int rdac_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_device *sdev; struct scsi_dh_data *scsi_dh_data; struct rdac_dh_data *h; int i, found = 0; unsigned long flags; + if (!scsi_is_sdev_device(dev)) + return 0; + + sdev = to_scsi_device(dev); + if (action == BUS_NOTIFY_ADD_DEVICE) { for (i = 0; rdac_dev_list[i].vendor; i++) { if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor, diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 683bce375c7..f843c1383a4 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -258,19 +258,6 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) return ide_stopped; } -static ide_startstop_t -idescsi_atapi_abort(ide_drive_t *drive, struct request *rq) -{ - debug_log("%s called for %lu\n", __func__, - ((struct ide_atapi_pc *) rq->special)->scsi_cmd->serial_number); - - rq->errors |= ERROR_MAX; - - idescsi_end_request(drive, 0, 0); - - return ide_stopped; -} - static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs) { idescsi_scsi_t *scsi = drive_to_idescsi(drive); @@ -524,7 +511,6 @@ static ide_driver_t idescsi_driver = { .do_request = idescsi_do_request, .end_request = idescsi_end_request, .error = idescsi_atapi_error, - .abort = idescsi_atapi_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idescsi_proc, #endif diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 97c68d021d2..638b68649e7 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c @@ -383,21 +383,14 @@ static int __devinit check_name(char *name) return 0; } -static int __devinit check_resources(struct pnp_option *option) +static int __devinit check_resources(struct pnp_dev *dev) { - struct pnp_option *tmp; - if (!option) - return 0; + resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8}; + int i; - for (tmp = option; tmp; tmp = tmp->next) { - struct pnp_port *port; - for (port = tmp->port; port; port = port->next) - if ((port->size == 8) && - ((port->min == 0x2f8) || - (port->min == 0x3f8) || - (port->min == 0x2e8) || - (port->min == 0x3e8))) - return 1; + for (i = 0; i < ARRAY_SIZE(base); i++) { + if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8)) + return 1; } return 0; @@ -420,10 +413,7 @@ static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags) (dev->card && check_name(dev->card->name)))) return -ENODEV; - if (check_resources(dev->independent)) - return 0; - - if (check_resources(dev->dependent)) + if (check_resources(dev)) return 0; return -ENODEV; |