diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-18 22:00:54 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-18 22:00:54 +0200 |
commit | bb2c018b09b681d43f5e08124b83e362647ea82b (patch) | |
tree | d794902c78f9fdd04ed88a4b8d451ed6f9292ec0 /drivers | |
parent | Merge branch 'linus' into cpus4096 (diff) | |
parent | Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff) | |
download | linux-bb2c018b09b681d43f5e08124b83e362647ea82b.tar.xz linux-bb2c018b09b681d43f5e08124b83e362647ea82b.zip |
Merge branch 'linus' into cpus4096
Conflicts:
drivers/acpi/processor_throttling.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers')
438 files changed, 34408 insertions, 31397 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index bba867391a85..735f5ea17473 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -336,6 +336,15 @@ config ACPI_EC the battery and thermal drivers. If you are compiling for a mobile system, say Y. +config ACPI_PCI_SLOT + tristate "PCI slot detection driver" + default n + help + This driver will attempt to discover all PCI slots in your system, + and creates entries in /sys/bus/pci/slots/. This feature can + help you correlate PCI bus addresses with the physical geography + of your slots. If you are unsure, say N. + config ACPI_POWER bool default y diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 40b0fcae4c78..52a4cd4b81d0 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -21,7 +21,7 @@ obj-$(CONFIG_X86) += blacklist.o # # ACPI Core Subsystem (Interpreter) # -obj-y += osl.o utils.o \ +obj-y += osl.o utils.o reboot.o\ dispatcher/ events/ executer/ hardware/ \ namespace/ parser/ resources/ tables/ \ utilities/ @@ -48,6 +48,7 @@ obj-$(CONFIG_ACPI_DOCK) += dock.o obj-$(CONFIG_ACPI_BAY) += bay.o obj-$(CONFIG_ACPI_VIDEO) += video.o obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o +obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o obj-$(CONFIG_ACPI_POWER) += power.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI_CONTAINER) += container.o diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c index 61b6c5beb2d3..e6caf5d42e0e 100644 --- a/drivers/acpi/bay.c +++ b/drivers/acpi/bay.c @@ -380,6 +380,9 @@ static int __init bay_init(void) if (acpi_disabled) return -ENODEV; + if (acpi_disabled) + return -ENODEV; + /* look for dockable drive bays */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_bay, &bays, NULL); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index a6dbcf4d9ef5..ccae305ee55d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -295,6 +295,28 @@ int acpi_bus_set_power(acpi_handle handle, int state) EXPORT_SYMBOL(acpi_bus_set_power); +bool acpi_bus_power_manageable(acpi_handle handle) +{ + struct acpi_device *device; + int result; + + result = acpi_bus_get_device(handle, &device); + return result ? false : device->flags.power_manageable; +} + +EXPORT_SYMBOL(acpi_bus_power_manageable); + +bool acpi_bus_can_wakeup(acpi_handle handle) +{ + struct acpi_device *device; + int result; + + result = acpi_bus_get_device(handle, &device); + return result ? false : device->wakeup.flags.valid; +} + +EXPORT_SYMBOL(acpi_bus_can_wakeup); + /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ @@ -612,7 +634,7 @@ static int __init acpi_bus_init_irq(void) return 0; } -acpi_native_uint acpi_gbl_permanent_mmap; +u8 acpi_gbl_permanent_mmap; void __init acpi_early_init(void) diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c index 610b1ee102b0..949f7c75029e 100644 --- a/drivers/acpi/dispatcher/dsinit.c +++ b/drivers/acpi/dispatcher/dsinit.c @@ -151,7 +151,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ******************************************************************************/ acpi_status -acpi_ds_initialize_objects(acpi_native_uint table_index, +acpi_ds_initialize_objects(u32 table_index, struct acpi_namespace_node * start_node) { acpi_status status; diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c index 2509809a36cf..4613b9ca5792 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/dispatcher/dsmethod.c @@ -377,7 +377,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, } info->parameters = &this_walk_state->operands[0]; - info->parameter_type = ACPI_PARAM_ARGS; status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, obj_desc->method.aml_start, diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c index a818e0ddb996..6a81c4400edf 100644 --- a/drivers/acpi/dispatcher/dsopcode.c +++ b/drivers/acpi/dispatcher/dsopcode.c @@ -691,12 +691,6 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state, status = acpi_ex_resolve_operands(op->common.aml_opcode, ACPI_WALK_OPERANDS, walk_state); - - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - walk_state->num_operands, - "after AcpiExResolveOperands"); - if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)", acpi_ps_get_opcode_name(op->common.aml_opcode), @@ -785,10 +779,6 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - 1, "after AcpiExResolveOperands"); - obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { return_ACPI_STATUS(AE_NOT_EXIST); @@ -848,7 +838,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, union acpi_operand_object **operand; struct acpi_namespace_node *node; union acpi_parse_object *next_op; - acpi_native_uint table_index; + u32 table_index; struct acpi_table_header *table; ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op); @@ -882,10 +872,6 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - 1, "after AcpiExResolveOperands"); - operand = &walk_state->operands[0]; /* Find the ACPI table */ @@ -1091,10 +1077,8 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(status); } - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name(op->common.aml_opcode), - 1, "after AcpiExResolveOperands"); - + ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, + acpi_ps_get_opcode_name(op->common.aml_opcode), 1); /* * Get the bank_value operand and save it * (at Top of stack) diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c index b246b9657ead..b5072fa9c920 100644 --- a/drivers/acpi/dispatcher/dswexec.c +++ b/drivers/acpi/dispatcher/dswexec.c @@ -408,14 +408,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) [walk_state-> num_operands - 1]), walk_state); - if (ACPI_SUCCESS(status)) { - ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, - ACPI_IMODE_EXECUTE, - acpi_ps_get_opcode_name - (walk_state->opcode), - walk_state->num_operands, - "after ExResolveOperands"); - } } if (ACPI_SUCCESS(status)) { diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c index 1386ced332ec..b00d4af791aa 100644 --- a/drivers/acpi/dispatcher/dswstate.c +++ b/drivers/acpi/dispatcher/dswstate.c @@ -70,7 +70,7 @@ acpi_status acpi_ds_result_pop(union acpi_operand_object **object, struct acpi_walk_state *walk_state) { - acpi_native_uint index; + u32 index; union acpi_generic_state *state; acpi_status status; @@ -122,7 +122,7 @@ acpi_ds_result_pop(union acpi_operand_object **object, ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object, acpi_ut_get_object_type_name(*object), - (u32) index, walk_state, walk_state->result_count)); + index, walk_state, walk_state->result_count)); return (AE_OK); } @@ -146,7 +146,7 @@ acpi_ds_result_push(union acpi_operand_object * object, { union acpi_generic_state *state; acpi_status status; - acpi_native_uint index; + u32 index; ACPI_FUNCTION_NAME(ds_result_push); @@ -400,7 +400,7 @@ void acpi_ds_obj_stack_pop_and_delete(u32 pop_count, struct acpi_walk_state *walk_state) { - acpi_native_int i; + s32 i; union acpi_operand_object *obj_desc; ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete); @@ -409,7 +409,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count, return; } - for (i = (acpi_native_int) (pop_count - 1); i >= 0; i--) { + for (i = (s32) pop_count - 1; i >= 0; i--) { if (walk_state->num_operands == 0) { return; } @@ -615,14 +615,8 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, walk_state->pass_number = pass_number; if (info) { - if (info->parameter_type == ACPI_PARAM_GPE) { - walk_state->gpe_event_info = - ACPI_CAST_PTR(struct acpi_gpe_event_info, - info->parameters); - } else { - walk_state->params = info->parameters; - walk_state->caller_return_desc = &info->return_object; - } + walk_state->params = info->parameters; + walk_state->caller_return_desc = &info->return_object; } status = acpi_ps_init_scope(&walk_state->parser_state, op); diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index bb7c51f712bd..1e872e79db33 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -920,6 +920,9 @@ static int __init dock_init(void) if (acpi_disabled) return 0; + if (acpi_disabled) + return 0; + /* look for a dock station */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_dock, &num, NULL); diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c index 5d30e5be1b1c..c56c5c6ea77b 100644 --- a/drivers/acpi/events/evevent.c +++ b/drivers/acpi/events/evevent.c @@ -188,7 +188,7 @@ acpi_status acpi_ev_install_xrupt_handlers(void) static acpi_status acpi_ev_fixed_event_initialize(void) { - acpi_native_uint i; + u32 i; acpi_status status; /* @@ -231,7 +231,7 @@ u32 acpi_ev_fixed_event_detect(void) u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u32 fixed_status; u32 fixed_enable; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_NAME(ev_fixed_event_detect); @@ -260,7 +260,7 @@ u32 acpi_ev_fixed_event_detect(void) /* Found an active (signalled) event */ acpi_os_fixed_event_count(i); - int_status |= acpi_ev_fixed_event_dispatch((u32) i); + int_status |= acpi_ev_fixed_event_dispatch(i); } } diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c index 5354be44f876..c5e53aae86f7 100644 --- a/drivers/acpi/events/evgpe.c +++ b/drivers/acpi/events/evgpe.c @@ -256,7 +256,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) return_ACPI_STATUS(status); } - /* Mark wake-disabled or HW disable, or both */ + /* Clear the appropriate enabled flags for this GPE */ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { case ACPI_GPE_TYPE_WAKE: @@ -273,13 +273,23 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) /* Disable the requested runtime GPE */ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); - - /* fallthrough */ + break; default: - acpi_hw_write_gpe_enable_reg(gpe_event_info); + break; } + /* + * Even if we don't know the GPE type, make sure that we always + * disable it. low_disable_gpe will just clear the enable bit for this + * GPE and write it. It will not write out the current GPE enable mask, + * since this may inadvertently enable GPEs too early, if a rogue GPE has + * come in during ACPICA initialization - possibly as a result of AML or + * other code that has enabled the GPE. + */ + status = acpi_hw_low_disable_gpe(gpe_event_info); + return_ACPI_STATUS(status); + return_ACPI_STATUS(AE_OK); } @@ -305,7 +315,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, { union acpi_operand_object *obj_desc; struct acpi_gpe_block_info *gpe_block; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_ENTRY(); @@ -379,8 +389,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) u32 status_reg; u32 enable_reg; acpi_cpu_flags flags; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; ACPI_FUNCTION_NAME(ev_gpe_detect); @@ -462,13 +472,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) */ int_status |= acpi_ev_gpe_dispatch(&gpe_block-> - event_info[(i * - ACPI_GPE_REGISTER_WIDTH) - + - j], - (u32) j + - gpe_register_info-> - base_gpe_number); + event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } } @@ -555,10 +559,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) */ info->prefix_node = local_gpe_event_info.dispatch.method_node; - info->parameters = - ACPI_CAST_PTR(union acpi_operand_object *, - gpe_event_info); - info->parameter_type = ACPI_PARAM_GPE; info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info); diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c index e6c4d4c49e79..73c058e2f5c2 100644 --- a/drivers/acpi/events/evgpeblk.c +++ b/drivers/acpi/events/evgpeblk.c @@ -189,8 +189,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block) { struct acpi_gpe_event_info *gpe_event_info; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers); @@ -203,7 +203,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { gpe_event_info = &gpe_block-> - event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; + event_info[((acpi_size) i * + ACPI_GPE_REGISTER_WIDTH) + j]; if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { @@ -744,8 +745,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) struct acpi_gpe_event_info *gpe_event_info = NULL; struct acpi_gpe_event_info *this_event; struct acpi_gpe_register_info *this_register; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; acpi_status status; ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); @@ -983,8 +984,8 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, struct acpi_gpe_walk_info gpe_info; u32 wake_gpe_count; u32 gpe_enabled_count; - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); @@ -1033,7 +1034,8 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, gpe_event_info = &gpe_block-> - event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j]; + event_info[((acpi_size) i * + ACPI_GPE_REGISTER_WIDTH) + j]; if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_METHOD) diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c index 2113e58e2221..1d5670be729a 100644 --- a/drivers/acpi/events/evmisc.c +++ b/drivers/acpi/events/evmisc.c @@ -575,7 +575,7 @@ acpi_status acpi_ev_release_global_lock(void) void acpi_ev_terminate(void) { - acpi_native_uint i; + u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ev_terminate); @@ -589,7 +589,7 @@ void acpi_ev_terminate(void) /* Disable all fixed events */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { - status = acpi_disable_event((u32) i, 0); + status = acpi_disable_event(i, 0); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not disable fixed event %d", diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c index 1628f5934752..236fbd1ca438 100644 --- a/drivers/acpi/events/evregion.c +++ b/drivers/acpi/events/evregion.c @@ -81,7 +81,7 @@ acpi_ev_install_handler(acpi_handle obj_handle, acpi_status acpi_ev_install_region_handlers(void) { acpi_status status; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ev_install_region_handlers); @@ -151,7 +151,7 @@ acpi_status acpi_ev_install_region_handlers(void) acpi_status acpi_ev_initialize_op_regions(void) { acpi_status status; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ev_initialize_op_regions); @@ -219,7 +219,6 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) info->prefix_node = region_obj2->extra.method_REG; info->pathname = NULL; info->parameters = args; - info->parameter_type = ACPI_PARAM_ARGS; info->flags = ACPI_IGNORE_RETURN_VALUE; /* diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c index 2e3d2c5e4f4d..6b94b38df07d 100644 --- a/drivers/acpi/events/evrgnini.c +++ b/drivers/acpi/events/evrgnini.c @@ -380,7 +380,7 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) acpi_status status; struct acpica_device_id hid; struct acpi_compatible_id_list *cid; - acpi_native_uint i; + u32 i; /* * Get the _HID and check for a PCI Root Bridge diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c index 99a7502e6a87..73bfd6bf962f 100644 --- a/drivers/acpi/events/evxfevnt.c +++ b/drivers/acpi/events/evxfevnt.c @@ -472,7 +472,6 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) } ACPI_EXPORT_SYMBOL(acpi_clear_gpe) -#ifdef ACPI_FUTURE_USAGE /******************************************************************************* * * FUNCTION: acpi_get_event_status @@ -489,6 +488,7 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe) acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) { acpi_status status = AE_OK; + u32 value; ACPI_FUNCTION_TRACE(acpi_get_event_status); @@ -506,7 +506,20 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) status = acpi_get_register(acpi_gbl_fixed_event_info[event]. - status_register_id, event_status); + enable_register_id, &value); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + + *event_status = value; + + status = + acpi_get_register(acpi_gbl_fixed_event_info[event]. + status_register_id, &value); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + + if (value) + *event_status |= ACPI_EVENT_FLAG_SET; return_ACPI_STATUS(status); } @@ -566,7 +579,6 @@ acpi_get_gpe_status(acpi_handle gpe_device, } ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) -#endif /* ACPI_FUTURE_USAGE */ /******************************************************************************* * * FUNCTION: acpi_install_gpe_block diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c index 39d742190584..2a32c843cb4a 100644 --- a/drivers/acpi/executer/exconfig.c +++ b/drivers/acpi/executer/exconfig.c @@ -53,7 +53,7 @@ ACPI_MODULE_NAME("exconfig") /* Local prototypes */ static acpi_status -acpi_ex_add_table(acpi_native_uint table_index, +acpi_ex_add_table(u32 table_index, struct acpi_namespace_node *parent_node, union acpi_operand_object **ddb_handle); @@ -73,7 +73,7 @@ acpi_ex_add_table(acpi_native_uint table_index, ******************************************************************************/ static acpi_status -acpi_ex_add_table(acpi_native_uint table_index, +acpi_ex_add_table(u32 table_index, struct acpi_namespace_node *parent_node, union acpi_operand_object **ddb_handle) { @@ -96,7 +96,8 @@ acpi_ex_add_table(acpi_native_uint table_index, /* Install the new table into the local data structures */ - obj_desc->reference.object = ACPI_CAST_PTR(void, table_index); + obj_desc->reference.object = ACPI_CAST_PTR(void, + (unsigned long)table_index); /* Add the table to the namespace */ @@ -128,12 +129,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, { acpi_status status; union acpi_operand_object **operand = &walk_state->operands[0]; - acpi_native_uint table_index; struct acpi_namespace_node *parent_node; struct acpi_namespace_node *start_node; struct acpi_namespace_node *parameter_node = NULL; union acpi_operand_object *ddb_handle; struct acpi_table_header *table; + u32 table_index; ACPI_FUNCTION_TRACE(ex_load_table_op); @@ -280,7 +281,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, { union acpi_operand_object *ddb_handle; struct acpi_table_desc table_desc; - acpi_native_uint table_index; + u32 table_index; acpi_status status; u32 length; @@ -437,7 +438,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) { acpi_status status = AE_OK; union acpi_operand_object *table_desc = ddb_handle; - acpi_native_uint table_index; + u32 table_index; struct acpi_table_header *table; ACPI_FUNCTION_TRACE(ex_unload_table); @@ -454,9 +455,9 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) return_ACPI_STATUS(AE_BAD_PARAMETER); } - /* Get the table index from the ddb_handle */ + /* Get the table index from the ddb_handle (acpi_size for 64-bit case) */ - table_index = (acpi_native_uint) table_desc->reference.object; + table_index = (u32) (acpi_size) table_desc->reference.object; /* Invoke table handler if present */ diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c index fd954b4ed83d..261d97516d9b 100644 --- a/drivers/acpi/executer/exconvrt.c +++ b/drivers/acpi/executer/exconvrt.c @@ -288,11 +288,11 @@ acpi_ex_convert_to_ascii(acpi_integer integer, u16 base, u8 * string, u8 data_width) { acpi_integer digit; - acpi_native_uint i; - acpi_native_uint j; - acpi_native_uint k = 0; - acpi_native_uint hex_length; - acpi_native_uint decimal_length; + u32 i; + u32 j; + u32 k = 0; + u32 hex_length; + u32 decimal_length; u32 remainder; u8 supress_zeros; @@ -348,7 +348,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer, /* hex_length: 2 ascii hex chars per data byte */ - hex_length = (acpi_native_uint) ACPI_MUL_2(data_width); + hex_length = ACPI_MUL_2(data_width); for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) { /* Get one hex digit, most significant digits first */ diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c index 60e62c4f0577..ad09696d5069 100644 --- a/drivers/acpi/executer/excreate.c +++ b/drivers/acpi/executer/excreate.c @@ -45,8 +45,6 @@ #include <acpi/acinterp.h> #include <acpi/amlcode.h> #include <acpi/acnamesp.h> -#include <acpi/acevents.h> -#include <acpi/actables.h> #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("excreate") diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c index 74f1b22601b3..2be2e2bf95bf 100644 --- a/drivers/acpi/executer/exdump.c +++ b/drivers/acpi/executer/exdump.c @@ -580,25 +580,22 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) case ACPI_TYPE_BUFFER: - acpi_os_printf("Buffer len %X @ %p\n", + acpi_os_printf("Buffer length %.2X @ %p\n", obj_desc->buffer.length, obj_desc->buffer.pointer); - length = obj_desc->buffer.length; - if (length > 64) { - length = 64; - } - /* Debug only -- dump the buffer contents */ if (obj_desc->buffer.pointer) { - acpi_os_printf("Buffer Contents: "); - - for (index = 0; index < length; index++) { - acpi_os_printf(" %02x", - obj_desc->buffer.pointer[index]); + length = obj_desc->buffer.length; + if (length > 128) { + length = 128; } - acpi_os_printf("\n"); + + acpi_os_printf + ("Buffer Contents: (displaying length 0x%.2X)\n", + length); + ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length); } break; @@ -756,54 +753,42 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) * * FUNCTION: acpi_ex_dump_operands * - * PARAMETERS: Operands - Operand list - * interpreter_mode - Load or Exec - * Ident - Identification - * num_levels - # of stack entries to dump above line - * Note - Output notation - * module_name - Caller's module name - * line_number - Caller's invocation line number + * PARAMETERS: Operands - A list of Operand objects + * opcode_name - AML opcode name + * num_operands - Operand count for this opcode * - * DESCRIPTION: Dump the object stack + * DESCRIPTION: Dump the operands associated with the opcode * ******************************************************************************/ void acpi_ex_dump_operands(union acpi_operand_object **operands, - acpi_interpreter_mode interpreter_mode, - char *ident, - u32 num_levels, - char *note, char *module_name, u32 line_number) + const char *opcode_name, u32 num_operands) { - acpi_native_uint i; - ACPI_FUNCTION_NAME(ex_dump_operands); - if (!ident) { - ident = "?"; - } - - if (!note) { - note = "?"; + if (!opcode_name) { + opcode_name = "UNKNOWN"; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "************* Operand Stack Contents (Opcode [%s], %d Operands)\n", - ident, num_levels)); + "**** Start operand dump for opcode [%s], %d operands\n", + opcode_name, num_operands)); - if (num_levels == 0) { - num_levels = 1; + if (num_operands == 0) { + num_operands = 1; } - /* Dump the operand stack starting at the top */ + /* Dump the individual operands */ - for (i = 0; num_levels > 0; i--, num_levels--) { - acpi_ex_dump_operand(operands[i], 0); + while (num_operands) { + acpi_ex_dump_operand(*operands, 0); + operands++; + num_operands--; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "************* Operand Stack dump from %s(%d), %s\n", - module_name, line_number, note)); + "**** End operand dump for [%s]\n", opcode_name)); return; } diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c index e336b5dc7a50..9ff9d1f4615d 100644 --- a/drivers/acpi/executer/exfldio.c +++ b/drivers/acpi/executer/exfldio.c @@ -153,14 +153,15 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, /* * Slack mode only: We will go ahead and allow access to this * field if it is within the region length rounded up to the next - * access width boundary. + * access width boundary. acpi_size cast for 64-bit compile. */ if (ACPI_ROUND_UP(rgn_desc->region.length, obj_desc->common_field. access_byte_width) >= - (obj_desc->common_field.base_byte_offset + - (acpi_native_uint) obj_desc->common_field. - access_byte_width + field_datum_byte_offset)) { + ((acpi_size) obj_desc->common_field. + base_byte_offset + + obj_desc->common_field.access_byte_width + + field_datum_byte_offset)) { return_ACPI_STATUS(AE_OK); } } diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c index cc956a5b5267..731414a581a6 100644 --- a/drivers/acpi/executer/exmisc.c +++ b/drivers/acpi/executer/exmisc.c @@ -329,8 +329,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, /* Result of two Strings is a String */ - return_desc = acpi_ut_create_string_object((acpi_size) - (operand0->string. + return_desc = acpi_ut_create_string_object(((acpi_size) + operand0->string. length + local_operand1-> string.length)); @@ -352,8 +352,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, /* Result of two Buffers is a Buffer */ - return_desc = acpi_ut_create_buffer_object((acpi_size) - (operand0->buffer. + return_desc = acpi_ut_create_buffer_object(((acpi_size) + operand0->buffer. length + local_operand1-> buffer.length)); diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c index 3a2f8cd4c62a..5d438c32989d 100644 --- a/drivers/acpi/executer/exprep.c +++ b/drivers/acpi/executer/exprep.c @@ -503,11 +503,11 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) */ second_desc = obj_desc->common.next_object; second_desc->extra.aml_start = - ((union acpi_parse_object *)(info->data_register_node))-> - named.data; + ACPI_CAST_PTR(union acpi_parse_object, + info->data_register_node)->named.data; second_desc->extra.aml_length = - ((union acpi_parse_object *)(info->data_register_node))-> - named.length; + ACPI_CAST_PTR(union acpi_parse_object, + info->data_register_node)->named.length; break; diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c index 7cd8bb54fa01..7a41c409ae4d 100644 --- a/drivers/acpi/executer/exregion.c +++ b/drivers/acpi/executer/exregion.c @@ -156,7 +156,7 @@ acpi_ex_system_memory_space_handler(u32 function, /* Create a new mapping starting at the address given */ mem_info->mapped_logical_address = - acpi_os_map_memory((acpi_native_uint) address, window_size); + acpi_os_map_memory((acpi_physical_address) address, window_size); if (!mem_info->mapped_logical_address) { ACPI_ERROR((AE_INFO, "Could not map memory at %8.8X%8.8X, size %X", diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c index 73e29e566a70..54085f16ec28 100644 --- a/drivers/acpi/executer/exresop.c +++ b/drivers/acpi/executer/exresop.c @@ -698,5 +698,9 @@ acpi_ex_resolve_operands(u16 opcode, } } + ACPI_DUMP_OPERANDS(walk_state->operands, + acpi_ps_get_opcode_name(opcode), + walk_state->num_operands); + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c index 76c875bc3154..38b55e352495 100644 --- a/drivers/acpi/executer/exstore.c +++ b/drivers/acpi/executer/exstore.c @@ -343,12 +343,6 @@ acpi_ex_store(union acpi_operand_object *source_desc, acpi_ut_get_object_type_name(dest_desc), dest_desc)); - ACPI_DUMP_STACK_ENTRY(source_desc); - ACPI_DUMP_STACK_ENTRY(dest_desc); - ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ExStore", - 2, - "Target is not a Reference or Constant object"); - return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 6cf10cbc1eee..55c17afbe669 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -148,7 +148,7 @@ acpi_fan_write_state(struct file *file, const char __user * buffer, int result = 0; struct seq_file *m = file->private_data; struct acpi_device *device = m->private; - char state_string[12] = { '\0' }; + char state_string[3] = { '\0' }; if (count > sizeof(state_string) - 1) return -EINVAL; @@ -157,6 +157,12 @@ acpi_fan_write_state(struct file *file, const char __user * buffer, return -EFAULT; state_string[count] = '\0'; + if ((state_string[0] < '0') || (state_string[0] > '3')) + return -EINVAL; + if (state_string[1] == '\n') + state_string[1] = '\0'; + if (state_string[1] != '\0') + return -EINVAL; result = acpi_bus_set_power(device->handle, simple_strtoul(state_string, NULL, 0)); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 9b227d4dc9c9..0f2dd81736bd 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -166,6 +166,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) "firmware_node"); ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, "physical_node"); + if (acpi_dev->wakeup.flags.valid) + device_set_wakeup_capable(dev, true); } return 0; @@ -336,6 +338,9 @@ static int __init acpi_rtc_init(void) if (acpi_disabled) return 0; + if (acpi_disabled) + return 0; + if (dev) { rtc_wake_setup(); rtc_info.wake_on = rtc_wake_on; diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c index 14bc4f456ae8..0b80db9d9197 100644 --- a/drivers/acpi/hardware/hwgpe.c +++ b/drivers/acpi/hardware/hwgpe.c @@ -55,6 +55,54 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /****************************************************************************** * + * FUNCTION: acpi_hw_low_disable_gpe + * + * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled + * + * RETURN: Status + * + * DESCRIPTION: Disable a single GPE in the enable register. + * + ******************************************************************************/ + +acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) +{ + struct acpi_gpe_register_info *gpe_register_info; + acpi_status status; + u32 enable_mask; + + /* Get the info block for the entire GPE register */ + + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + return (AE_NOT_EXIST); + } + + /* Get current value of the enable register that contains this GPE */ + + status = acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, &enable_mask, + &gpe_register_info->enable_address); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Clear just the bit that corresponds to this GPE */ + + ACPI_CLEAR_BIT(enable_mask, + ((u32) 1 << + (gpe_event_info->gpe_number - + gpe_register_info->base_gpe_number))); + + /* Write the updated enable mask */ + + status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, enable_mask, + &gpe_register_info->enable_address); + + return (status); +} + +/****************************************************************************** + * * FUNCTION: acpi_hw_write_gpe_enable_reg * * PARAMETERS: gpe_event_info - Info block for the GPE to be enabled @@ -68,7 +116,7 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, ******************************************************************************/ acpi_status -acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info) +acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status; @@ -138,7 +186,6 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) * ******************************************************************************/ -#ifdef ACPI_FUTURE_USAGE acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, acpi_event_status * event_status) @@ -198,7 +245,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, unlock_and_exit: return (status); } -#endif /* ACPI_FUTURE_USAGE */ /****************************************************************************** * diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c index 5445751b8a3e..0ab22004728a 100644 --- a/drivers/acpi/namespace/nsdump.c +++ b/drivers/acpi/namespace/nsdump.c @@ -73,7 +73,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle, void acpi_ns_print_pathname(u32 num_segments, char *pathname) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_NAME(ns_print_pathname); @@ -515,12 +515,12 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, if (obj_type > ACPI_TYPE_LOCAL_MAX) { acpi_os_printf - ("(Ptr to ACPI Object type %X [UNKNOWN])\n", + ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n", obj_type); bytes_to_dump = 32; } else { acpi_os_printf - ("(Ptr to ACPI Object type %X [%s])\n", + ("(Pointer to ACPI Object type %.2X [%s])\n", obj_type, acpi_ut_get_type_name(obj_type)); bytes_to_dump = sizeof(union acpi_operand_object); diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c index 14bdfa92bea0..d369164e00b0 100644 --- a/drivers/acpi/namespace/nseval.c +++ b/drivers/acpi/namespace/nseval.c @@ -138,6 +138,41 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info) return_ACPI_STATUS(AE_NULL_OBJECT); } + /* + * Calculate the number of arguments being passed to the method + */ + + info->param_count = 0; + if (info->parameters) { + while (info->parameters[info->param_count]) + info->param_count++; + } + + /* Error if too few arguments were passed in */ + + if (info->param_count < info->obj_desc->method.param_count) { + ACPI_ERROR((AE_INFO, + "Insufficient arguments - " + "method [%4.4s] needs %d, found %d", + acpi_ut_get_node_name(info->resolved_node), + info->obj_desc->method.param_count, + info->param_count)); + return_ACPI_STATUS(AE_MISSING_ARGUMENTS); + } + + /* Just a warning if too many arguments */ + + else if (info->param_count > + info->obj_desc->method.param_count) { + ACPI_WARNING((AE_INFO, + "Excess arguments - " + "method [%4.4s] needs %d, found %d", + acpi_ut_get_node_name(info-> + resolved_node), + info->obj_desc->method.param_count, + info->param_count)); + } + ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:", ACPI_LV_INFO, _COMPONENT); diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c index 6d6d930c8e18..e4c57510d798 100644 --- a/drivers/acpi/namespace/nsinit.c +++ b/drivers/acpi/namespace/nsinit.c @@ -542,7 +542,6 @@ acpi_ns_init_one_device(acpi_handle obj_handle, info->prefix_node = device_node; info->pathname = METHOD_NAME__INI; info->parameters = NULL; - info->parameter_type = ACPI_PARAM_ARGS; info->flags = ACPI_IGNORE_RETURN_VALUE; /* diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c index 2c92f6cf5ce1..a4a412b7c029 100644 --- a/drivers/acpi/namespace/nsload.c +++ b/drivers/acpi/namespace/nsload.c @@ -71,8 +71,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle); ******************************************************************************/ acpi_status -acpi_ns_load_table(acpi_native_uint table_index, - struct acpi_namespace_node *node) +acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) { acpi_status status; diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c index 46a79b0103b6..a82271a9dbb3 100644 --- a/drivers/acpi/namespace/nsparse.c +++ b/drivers/acpi/namespace/nsparse.c @@ -63,13 +63,13 @@ ACPI_MODULE_NAME("nsparse") * ******************************************************************************/ acpi_status -acpi_ns_one_complete_parse(acpi_native_uint pass_number, - acpi_native_uint table_index, - struct acpi_namespace_node * start_node) +acpi_ns_one_complete_parse(u32 pass_number, + u32 table_index, + struct acpi_namespace_node *start_node) { union acpi_parse_object *parse_root; acpi_status status; - acpi_native_uint aml_length; + u32 aml_length; u8 *aml_start; struct acpi_walk_state *walk_state; struct acpi_table_header *table; @@ -112,8 +112,8 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number, aml_start = (u8 *) table + sizeof(struct acpi_table_header); aml_length = table->length - sizeof(struct acpi_table_header); status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, - aml_start, (u32) aml_length, - NULL, (u8) pass_number); + aml_start, aml_length, NULL, + (u8) pass_number); } if (ACPI_FAILURE(status)) { @@ -158,8 +158,7 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number, ******************************************************************************/ acpi_status -acpi_ns_parse_table(acpi_native_uint table_index, - struct acpi_namespace_node *start_node) +acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) { acpi_status status; diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c index 64c039843ed2..b0817e1127b1 100644 --- a/drivers/acpi/namespace/nsutils.c +++ b/drivers/acpi/namespace/nsutils.c @@ -73,9 +73,9 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search); ******************************************************************************/ void -acpi_ns_report_error(char *module_name, +acpi_ns_report_error(const char *module_name, u32 line_number, - char *internal_name, acpi_status lookup_status) + const char *internal_name, acpi_status lookup_status) { acpi_status status; u32 bad_name; @@ -130,11 +130,11 @@ acpi_ns_report_error(char *module_name, ******************************************************************************/ void -acpi_ns_report_method_error(char *module_name, +acpi_ns_report_method_error(const char *module_name, u32 line_number, - char *message, + const char *message, struct acpi_namespace_node *prefix_node, - char *path, acpi_status method_status) + const char *path, acpi_status method_status) { acpi_status status; struct acpi_namespace_node *node = prefix_node; @@ -167,7 +167,8 @@ acpi_ns_report_method_error(char *module_name, ******************************************************************************/ void -acpi_ns_print_node_pathname(struct acpi_namespace_node *node, char *message) +acpi_ns_print_node_pathname(struct acpi_namespace_node *node, + const char *message) { struct acpi_buffer buffer; acpi_status status; @@ -296,7 +297,7 @@ u32 acpi_ns_local(acpi_object_type type) void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info) { - char *next_external_char; + const char *next_external_char; u32 i; ACPI_FUNCTION_ENTRY(); @@ -363,9 +364,9 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) { u32 num_segments = info->num_segments; char *internal_name = info->internal_name; - char *external_name = info->next_external_char; + const char *external_name = info->next_external_char; char *result = NULL; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ns_build_internal_name); @@ -400,12 +401,11 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) result = &internal_name[i]; } else if (num_segments == 2) { internal_name[i] = AML_DUAL_NAME_PREFIX; - result = &internal_name[(acpi_native_uint) (i + 1)]; + result = &internal_name[(acpi_size) i + 1]; } else { internal_name[i] = AML_MULTI_NAME_PREFIX_OP; - internal_name[(acpi_native_uint) (i + 1)] = - (char)num_segments; - result = &internal_name[(acpi_native_uint) (i + 2)]; + internal_name[(acpi_size) i + 1] = (char)num_segments; + result = &internal_name[(acpi_size) i + 2]; } } @@ -472,7 +472,8 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info) * *******************************************************************************/ -acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name) +acpi_status +acpi_ns_internalize_name(const char *external_name, char **converted_name) { char *internal_name; struct acpi_namestring_info info; @@ -528,15 +529,15 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name) acpi_status acpi_ns_externalize_name(u32 internal_name_length, - char *internal_name, + const char *internal_name, u32 * converted_name_length, char **converted_name) { - acpi_native_uint names_index = 0; - acpi_native_uint num_segments = 0; - acpi_native_uint required_length; - acpi_native_uint prefix_length = 0; - acpi_native_uint i = 0; - acpi_native_uint j = 0; + u32 names_index = 0; + u32 num_segments = 0; + u32 required_length; + u32 prefix_length = 0; + u32 i = 0; + u32 j = 0; ACPI_FUNCTION_TRACE(ns_externalize_name); @@ -582,9 +583,8 @@ acpi_ns_externalize_name(u32 internal_name_length, /* <count> 4-byte names */ names_index = prefix_length + 2; - num_segments = (acpi_native_uint) (u8) - internal_name[(acpi_native_uint) - (prefix_length + 1)]; + num_segments = (u8) + internal_name[(acpi_size) prefix_length + 1]; break; case AML_DUAL_NAME_PREFIX: @@ -823,7 +823,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) acpi_status acpi_ns_get_node(struct acpi_namespace_node *prefix_node, - char *pathname, + const char *pathname, u32 flags, struct acpi_namespace_node **return_node) { union acpi_generic_state scope_info; diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c index a8d549187c84..38be5865d95d 100644 --- a/drivers/acpi/namespace/nsxfeval.c +++ b/drivers/acpi/namespace/nsxfeval.c @@ -182,7 +182,6 @@ acpi_evaluate_object(acpi_handle handle, } info->pathname = pathname; - info->parameter_type = ACPI_PARAM_ARGS; /* Convert and validate the device handle */ @@ -442,7 +441,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, u32 flags; struct acpica_device_id hid; struct acpi_compatible_id_list *cid; - acpi_native_uint i; + u32 i; int found; status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 658e5f3abae0..cb9864e39bae 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -120,10 +120,10 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) struct acpi_srat_mem_affinity *p = (struct acpi_srat_mem_affinity *)header; ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n", + "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s\n", (unsigned long)p->base_address, (unsigned long)p->length, - p->memory_type, p->proximity_domain, + p->proximity_domain, (p->flags & ACPI_SRAT_MEM_ENABLED)? "enabled" : "disabled", (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)? diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c index e94463778845..d830b29b85b1 100644 --- a/drivers/acpi/parser/psargs.c +++ b/drivers/acpi/parser/psargs.c @@ -76,7 +76,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) { u8 *aml = parser_state->aml; u32 package_length = 0; - acpi_native_uint byte_count; + u32 byte_count; u8 byte_zero_mask = 0x3F; /* Default [0:5] */ ACPI_FUNCTION_TRACE(ps_get_next_package_length); @@ -86,7 +86,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) * used to encode the package length, either 0,1,2, or 3 */ byte_count = (aml[0] >> 6); - parser_state->aml += (byte_count + 1); + parser_state->aml += ((acpi_size) byte_count + 1); /* Get bytes 3, 2, 1 as needed */ diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c index 52581454c47c..270469aae842 100644 --- a/drivers/acpi/parser/psxface.c +++ b/drivers/acpi/parser/psxface.c @@ -333,9 +333,9 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) static void acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action) { - acpi_native_uint i; + u32 i; - if ((info->parameter_type == ACPI_PARAM_ARGS) && (info->parameters)) { + if (info->parameters) { /* Update reference count for each parameter */ diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 89022a74faee..11acaee14d66 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -162,7 +162,7 @@ do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt) !strcmp(prt->source, quirk->source) && strlen(prt->source) >= strlen(quirk->actual_source)) { printk(KERN_WARNING PREFIX "firmware reports " - "%04x:%02x:%02x[%c] connected to %s; " + "%04x:%02x:%02x PCI INT %c connected to %s; " "changing to %s\n", entry->id.segment, entry->id.bus, entry->id.device, 'A' + entry->pin, @@ -429,7 +429,7 @@ acpi_pci_irq_derive(struct pci_dev *dev, { struct pci_dev *bridge = dev; int irq = -1; - u8 bridge_pin = 0; + u8 bridge_pin = 0, orig_pin = pin; if (!dev) @@ -463,8 +463,8 @@ acpi_pci_irq_derive(struct pci_dev *dev, } if (irq < 0) { - printk(KERN_WARNING PREFIX "Unable to derive IRQ for device %s\n", - pci_name(dev)); + dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n", + 'A' + orig_pin); return -1; } @@ -487,6 +487,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) int triggering = ACPI_LEVEL_SENSITIVE; int polarity = ACPI_ACTIVE_LOW; char *link = NULL; + char link_desc[16]; int rc; @@ -503,7 +504,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) pin--; if (!dev->bus) { - printk(KERN_ERR PREFIX "Invalid (NULL) 'bus' field\n"); + dev_err(&dev->dev, "invalid (NULL) 'bus' field\n"); return -ENODEV; } @@ -538,8 +539,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) * driver reported one, then use it. Exit in any case. */ if (irq < 0) { - printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI", - pci_name(dev), ('A' + pin)); + dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin); /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF)) { printk(" - using IRQ %d\n", dev->irq); @@ -554,21 +554,21 @@ int acpi_pci_irq_enable(struct pci_dev *dev) rc = acpi_register_gsi(irq, triggering, polarity); if (rc < 0) { - printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: failed " - "to register GSI\n", pci_name(dev), ('A' + pin)); + dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", + 'A' + pin); return rc; } dev->irq = rc; - printk(KERN_INFO PREFIX "PCI Interrupt %s[%c] -> ", - pci_name(dev), 'A' + pin); - if (link) - printk("Link [%s] -> ", link); + snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); + else + link_desc[0] = '\0'; - printk("GSI %u (%s, %s) -> IRQ %d\n", irq, - (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", - (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); + dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n", + 'A' + pin, link_desc, irq, + (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge", + (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq); return 0; } @@ -616,10 +616,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) * (e.g. PCI_UNDEFINED_IRQ). */ - printk(KERN_INFO PREFIX "PCI interrupt for device %s disabled\n", - pci_name(dev)); - + dev_info(&dev->dev, "PCI INT %c disabled\n", 'A' + pin); acpi_unregister_gsi(gsi); - - return; } diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c new file mode 100644 index 000000000000..b9ab030a52d5 --- /dev/null +++ b/drivers/acpi/pci_slot.c @@ -0,0 +1,368 @@ +/* + * pci_slot.c - ACPI PCI Slot Driver + * + * The code here is heavily leveraged from the acpiphp module. + * Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance. + * Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code + * review and fixes. + * + * Copyright (C) 2007 Alex Chiang <achiang@hp.com> + * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/acpi.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +static int debug; +static int check_sta_before_sun; + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>" +#define DRIVER_DESC "ACPI PCI Slot Detection Driver" +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); +MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); +module_param(debug, bool, 0644); + +#define _COMPONENT ACPI_PCI_COMPONENT +ACPI_MODULE_NAME("pci_slot"); + +#define MY_NAME "pci_slot" +#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) +#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) +#define dbg(format, arg...) \ + do { \ + if (debug) \ + printk(KERN_DEBUG "%s: " format, \ + MY_NAME , ## arg); \ + } while (0) + +#define SLOT_NAME_SIZE 20 /* Inspired by #define in acpiphp.h */ + +struct acpi_pci_slot { + acpi_handle root_handle; /* handle of the root bridge */ + struct pci_slot *pci_slot; /* corresponding pci_slot */ + struct list_head list; /* node in the list of slots */ +}; + +static int acpi_pci_slot_add(acpi_handle handle); +static void acpi_pci_slot_remove(acpi_handle handle); + +static LIST_HEAD(slot_list); +static DEFINE_MUTEX(slot_list_lock); +static struct acpi_pci_driver acpi_pci_slot_driver = { + .add = acpi_pci_slot_add, + .remove = acpi_pci_slot_remove, +}; + +static int +check_slot(acpi_handle handle, int *device, unsigned long *sun) +{ + int retval = 0; + unsigned long adr, sta; + acpi_status status; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + dbg("Checking slot on path: %s\n", (char *)buffer.pointer); + + if (check_sta_before_sun) { + /* If SxFy doesn't have _STA, we just assume it's there */ + status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); + if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) { + retval = -1; + goto out; + } + } + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); + if (ACPI_FAILURE(status)) { + dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer); + retval = -1; + goto out; + } + + *device = (adr >> 16) & 0xffff; + + /* No _SUN == not a slot == bail */ + status = acpi_evaluate_integer(handle, "_SUN", NULL, sun); + if (ACPI_FAILURE(status)) { + dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer); + retval = -1; + goto out; + } + +out: + kfree(buffer.pointer); + return retval; +} + +struct callback_args { + acpi_walk_callback user_function; /* only for walk_p2p_bridge */ + struct pci_bus *pci_bus; + acpi_handle root_handle; +}; + +/* + * register_slot + * + * Called once for each SxFy object in the namespace. Don't worry about + * calling pci_create_slot multiple times for the same pci_bus:device, + * since each subsequent call simply bumps the refcount on the pci_slot. + * + * The number of calls to pci_destroy_slot from unregister_slot is + * symmetrical. + */ +static acpi_status +register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + int device; + unsigned long sun; + char name[SLOT_NAME_SIZE]; + struct acpi_pci_slot *slot; + struct pci_slot *pci_slot; + struct callback_args *parent_context = context; + struct pci_bus *pci_bus = parent_context->pci_bus; + + if (check_slot(handle, &device, &sun)) + return AE_OK; + + slot = kmalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + err("%s: cannot allocate memory\n", __func__); + return AE_OK; + } + + snprintf(name, sizeof(name), "%u", (u32)sun); + pci_slot = pci_create_slot(pci_bus, device, name); + if (IS_ERR(pci_slot)) { + err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); + kfree(slot); + } + + slot->root_handle = parent_context->root_handle; + slot->pci_slot = pci_slot; + INIT_LIST_HEAD(&slot->list); + mutex_lock(&slot_list_lock); + list_add(&slot->list, &slot_list); + mutex_unlock(&slot_list_lock); + + dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n", + pci_slot, pci_bus->number, device, name); + + return AE_OK; +} + +/* + * walk_p2p_bridge - discover and walk p2p bridges + * @handle: points to an acpi_pci_root + * @context: p2p_bridge_context pointer + * + * Note that when we call ourselves recursively, we pass a different + * value of pci_bus in the child_context. + */ +static acpi_status +walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) +{ + int device, function; + unsigned long adr; + acpi_status status; + acpi_handle dummy_handle; + acpi_walk_callback user_function; + + struct pci_dev *dev; + struct pci_bus *pci_bus; + struct callback_args child_context; + struct callback_args *parent_context = context; + + pci_bus = parent_context->pci_bus; + user_function = parent_context->user_function; + + status = acpi_get_handle(handle, "_ADR", &dummy_handle); + if (ACPI_FAILURE(status)) + return AE_OK; + + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); + if (ACPI_FAILURE(status)) + return AE_OK; + + device = (adr >> 16) & 0xffff; + function = adr & 0xffff; + + dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function)); + if (!dev || !dev->subordinate) + goto out; + + child_context.pci_bus = dev->subordinate; + child_context.user_function = user_function; + child_context.root_handle = parent_context->root_handle; + + dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number); + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + user_function, &child_context, NULL); + if (ACPI_FAILURE(status)) + goto out; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + walk_p2p_bridge, &child_context, NULL); +out: + pci_dev_put(dev); + return AE_OK; +} + +/* + * walk_root_bridge - generic root bridge walker + * @handle: points to an acpi_pci_root + * @user_function: user callback for slot objects + * + * Call user_function for all objects underneath this root bridge. + * Walk p2p bridges underneath us and call user_function on those too. + */ +static int +walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function) +{ + int seg, bus; + unsigned long tmp; + acpi_status status; + acpi_handle dummy_handle; + struct pci_bus *pci_bus; + struct callback_args context; + + /* If the bridge doesn't have _STA, we assume it is always there */ + status = acpi_get_handle(handle, "_STA", &dummy_handle); + if (ACPI_SUCCESS(status)) { + status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp); + if (ACPI_FAILURE(status)) { + info("%s: _STA evaluation failure\n", __func__); + return 0; + } + if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0) + /* don't register this object */ + return 0; + } + + status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp); + seg = ACPI_SUCCESS(status) ? tmp : 0; + + status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp); + bus = ACPI_SUCCESS(status) ? tmp : 0; + + pci_bus = pci_find_bus(seg, bus); + if (!pci_bus) + return 0; + + context.pci_bus = pci_bus; + context.user_function = user_function; + context.root_handle = handle; + + dbg("root bridge walk, pci_bus = %x\n", pci_bus->number); + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + user_function, &context, NULL); + if (ACPI_FAILURE(status)) + return status; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, + walk_p2p_bridge, &context, NULL); + if (ACPI_FAILURE(status)) + err("%s: walk_p2p_bridge failure - %d\n", __func__, status); + + return status; +} + +/* + * acpi_pci_slot_add + * @handle: points to an acpi_pci_root + */ +static int +acpi_pci_slot_add(acpi_handle handle) +{ + acpi_status status; + + status = walk_root_bridge(handle, register_slot); + if (ACPI_FAILURE(status)) + err("%s: register_slot failure - %d\n", __func__, status); + + return status; +} + +/* + * acpi_pci_slot_remove + * @handle: points to an acpi_pci_root + */ +static void +acpi_pci_slot_remove(acpi_handle handle) +{ + struct acpi_pci_slot *slot, *tmp; + + mutex_lock(&slot_list_lock); + list_for_each_entry_safe(slot, tmp, &slot_list, list) { + if (slot->root_handle == handle) { + list_del(&slot->list); + pci_destroy_slot(slot->pci_slot); + kfree(slot); + } + } + mutex_unlock(&slot_list_lock); +} + +static int do_sta_before_sun(const struct dmi_system_id *d) +{ + info("%s detected: will evaluate _STA before calling _SUN\n", d->ident); + check_sta_before_sun = 1; + return 0; +} + +static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = { + /* + * Fujitsu Primequest machines will return 1023 to indicate an + * error if the _SUN method is evaluated on SxFy objects that + * are not present (as indicated by _STA), so for those machines, + * we want to check _STA before evaluating _SUN. + */ + { + .callback = do_sta_before_sun, + .ident = "Fujitsu PRIMEQUEST", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"), + DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"), + }, + }, + {} +}; + +static int __init +acpi_pci_slot_init(void) +{ + dmi_check_system(acpi_pci_slot_dmi_table); + acpi_pci_register_driver(&acpi_pci_slot_driver); + return 0; +} + +static void __exit +acpi_pci_slot_exit(void) +{ + acpi_pci_unregister_driver(&acpi_pci_slot_driver); +} + +module_init(acpi_pci_slot_init); +module_exit(acpi_pci_slot_exit); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 81e4f081a4ae..4ab21cb1c8c7 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -292,69 +292,135 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev) return 0; } +/** + * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in + * ACPI 3.0) _PSW (Power State Wake) + * @dev: Device to handle. + * @enable: 0 - disable, 1 - enable the wake capabilities of the device. + * @sleep_state: Target sleep state of the system. + * @dev_state: Target power state of the device. + * + * Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power + * State Wake) for the device, if present. On failure reset the device's + * wakeup.flags.valid flag. + * + * RETURN VALUE: + * 0 if either _DSW or _PSW has been successfully executed + * 0 if neither _DSW nor _PSW has been found + * -ENODEV if the execution of either _DSW or _PSW has failed + */ +int acpi_device_sleep_wake(struct acpi_device *dev, + int enable, int sleep_state, int dev_state) +{ + union acpi_object in_arg[3]; + struct acpi_object_list arg_list = { 3, in_arg }; + acpi_status status = AE_OK; + + /* + * Try to execute _DSW first. + * + * Three agruments are needed for the _DSW object: + * Argument 0: enable/disable the wake capabilities + * Argument 1: target system state + * Argument 2: target device state + * When _DSW object is called to disable the wake capabilities, maybe + * the first argument is filled. The values of the other two agruments + * are meaningless. + */ + in_arg[0].type = ACPI_TYPE_INTEGER; + in_arg[0].integer.value = enable; + in_arg[1].type = ACPI_TYPE_INTEGER; + in_arg[1].integer.value = sleep_state; + in_arg[2].type = ACPI_TYPE_INTEGER; + in_arg[2].integer.value = dev_state; + status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL); + if (ACPI_SUCCESS(status)) { + return 0; + } else if (status != AE_NOT_FOUND) { + printk(KERN_ERR PREFIX "_DSW execution failed\n"); + dev->wakeup.flags.valid = 0; + return -ENODEV; + } + + /* Execute _PSW */ + arg_list.count = 1; + in_arg[0].integer.value = enable; + status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); + if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { + printk(KERN_ERR PREFIX "_PSW execution failed\n"); + dev->wakeup.flags.valid = 0; + return -ENODEV; + } + + return 0; +} + /* * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): * 1. Power on the power resources required for the wakeup device - * 2. Enable _PSW (power state wake) for the device if present + * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power + * State Wake) for the device, if present */ -int acpi_enable_wakeup_device_power(struct acpi_device *dev) +int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) { - union acpi_object arg = { ACPI_TYPE_INTEGER }; - struct acpi_object_list arg_list = { 1, &arg }; - acpi_status status = AE_OK; - int i; - int ret = 0; + int i, err; if (!dev || !dev->wakeup.flags.valid) - return -1; + return -EINVAL; + + /* + * Do not execute the code below twice in a row without calling + * acpi_disable_wakeup_device_power() in between for the same device + */ + if (dev->wakeup.flags.prepared) + return 0; - arg.integer.value = 1; /* Open power resource */ for (i = 0; i < dev->wakeup.resources.count; i++) { - ret = acpi_power_on(dev->wakeup.resources.handles[i], dev); + int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev); if (ret) { printk(KERN_ERR PREFIX "Transition power state\n"); dev->wakeup.flags.valid = 0; - return -1; + return -ENODEV; } } - /* Execute PSW */ - status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); - if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { - printk(KERN_ERR PREFIX "Evaluate _PSW\n"); - dev->wakeup.flags.valid = 0; - ret = -1; - } + /* + * Passing 3 as the third argument below means the device may be placed + * in arbitrary power state afterwards. + */ + err = acpi_device_sleep_wake(dev, 1, sleep_state, 3); + if (!err) + dev->wakeup.flags.prepared = 1; - return ret; + return err; } /* * Shutdown a wakeup device, counterpart of above method - * 1. Disable _PSW (power state wake) + * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power + * State Wake) for the device, if present * 2. Shutdown down the power resources */ int acpi_disable_wakeup_device_power(struct acpi_device *dev) { - union acpi_object arg = { ACPI_TYPE_INTEGER }; - struct acpi_object_list arg_list = { 1, &arg }; - acpi_status status = AE_OK; - int i; - int ret = 0; - + int i, ret; if (!dev || !dev->wakeup.flags.valid) - return -1; + return -EINVAL; - arg.integer.value = 0; - /* Execute PSW */ - status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); - if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { - printk(KERN_ERR PREFIX "Evaluate _PSW\n"); - dev->wakeup.flags.valid = 0; - return -1; - } + /* + * Do not execute the code below twice in a row without calling + * acpi_enable_wakeup_device_power() in between for the same device + */ + if (!dev->wakeup.flags.prepared) + return 0; + + dev->wakeup.flags.prepared = 0; + + ret = acpi_device_sleep_wake(dev, 0, 0, 0); + if (ret) + return ret; /* Close power resource */ for (i = 0; i < dev->wakeup.resources.count; i++) { @@ -362,7 +428,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) if (ret) { printk(KERN_ERR PREFIX "Transition power state\n"); dev->wakeup.flags.valid = 0; - return -1; + return -ENODEV; } } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 9dd0fa93b9e1..ec0f2d581ece 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -118,8 +118,31 @@ static const struct file_operations acpi_processor_info_fops = { .release = single_release, }; -struct acpi_processor *processors[NR_CPUS]; +DEFINE_PER_CPU(struct acpi_processor *, processors); struct acpi_processor_errata errata __read_mostly; +static int set_no_mwait(const struct dmi_system_id *id) +{ + printk(KERN_NOTICE PREFIX "%s detected - " + "disable mwait for CPU C-stetes\n", id->ident); + idle_nomwait = 1; + return 0; +} + +static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { + { + set_no_mwait, "IFL91 board", { + DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), + DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), + DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, + { + set_no_mwait, "Extensa 5220", { + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "ACER"), + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, + {}, +}; /* -------------------------------------------------------------------------- Errata Handling @@ -265,7 +288,20 @@ static int acpi_processor_set_pdc(struct acpi_processor *pr) if (!pdc_in) return status; + if (idle_nomwait) { + /* + * If mwait is disabled for CPU C-states, the C2C3_FFH access + * mode will be disabled in the parameter of _PDC object. + * Of course C1_FFH access mode will also be disabled. + */ + union acpi_object *obj; + u32 *buffer = NULL; + obj = pdc_in->pointer; + buffer = (u32 *)(obj->buffer.pointer); + buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); + + } status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL); if (ACPI_FAILURE(status)) @@ -614,14 +650,14 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid) return 0; } -static void *processor_device_array[NR_CPUS]; +static DEFINE_PER_CPU(void *, processor_device_array); static int __cpuinit acpi_processor_start(struct acpi_device *device) { int result = 0; acpi_status status = AE_OK; struct acpi_processor *pr; - + struct sys_device *sysdev; pr = acpi_driver_data(device); @@ -638,20 +674,24 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device) * ACPI id of processors can be reported wrongly by the BIOS. * Don't trust it blindly */ - if (processor_device_array[pr->id] != NULL && - processor_device_array[pr->id] != device) { + if (per_cpu(processor_device_array, pr->id) != NULL && + per_cpu(processor_device_array, pr->id) != device) { printk(KERN_WARNING "BIOS reported wrong ACPI id " "for the processor\n"); return -ENODEV; } - processor_device_array[pr->id] = device; + per_cpu(processor_device_array, pr->id) = device; - processors[pr->id] = pr; + per_cpu(processors, pr->id) = pr; result = acpi_processor_add_fs(device); if (result) goto end; + sysdev = get_cpu_sysdev(pr->id); + if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) + return -EFAULT; + status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify, pr); @@ -749,7 +789,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct acpi_processor *pr = processors[cpu]; + struct acpi_processor *pr = per_cpu(processors, cpu); if (action == CPU_ONLINE && pr) { acpi_processor_ppc_has_changed(pr); @@ -810,6 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type) status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, acpi_processor_notify); + sysfs_remove_link(&device->dev.kobj, "sysdev"); + acpi_processor_remove_fs(device); if (pr->cdev) { @@ -819,8 +861,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type) pr->cdev = NULL; } - processors[pr->id] = NULL; - processor_device_array[pr->id] = NULL; + per_cpu(processors, pr->id) = NULL; + per_cpu(processor_device_array, pr->id) = NULL; kfree(pr); return 0; @@ -1014,9 +1056,9 @@ static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) static int acpi_processor_handle_eject(struct acpi_processor *pr) { - if (cpu_online(pr->id)) { - return (-EINVAL); - } + if (cpu_online(pr->id)) + cpu_down(pr->id); + arch_unregister_cpu(pr->id); acpi_unmap_lsapic(pr->id); return (0); @@ -1068,8 +1110,6 @@ static int __init acpi_processor_init(void) { int result = 0; - - memset(&processors, 0, sizeof(processors)); memset(&errata, 0, sizeof(errata)); #ifdef CONFIG_SMP @@ -1083,6 +1123,11 @@ static int __init acpi_processor_init(void) return -ENOMEM; acpi_processor_dir->owner = THIS_MODULE; + /* + * Check whether the system is DMI table. If yes, OSPM + * should not use mwait for CPU-states. + */ + dmi_check_system(processor_idle_dmi_table); result = cpuidle_register_driver(&acpi_idle_driver); if (result < 0) goto out_proc; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 4976e5db2b3f..d592dbb1d12a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -41,6 +41,7 @@ #include <linux/pm_qos_params.h> #include <linux/clockchips.h> #include <linux/cpuidle.h> +#include <linux/cpuidle.h> /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -57,6 +58,7 @@ #include <acpi/acpi_bus.h> #include <acpi/processor.h> +#include <asm/processor.h> #define ACPI_PROCESSOR_COMPONENT 0x01000000 #define ACPI_PROCESSOR_CLASS "processor" @@ -401,7 +403,7 @@ static void acpi_processor_idle(void) */ local_irq_disable(); - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (!pr) { local_irq_enable(); return; @@ -955,6 +957,21 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) } else { continue; } + if (cx.type == ACPI_STATE_C1 && + (idle_halt || idle_nomwait)) { + /* + * In most cases the C1 space_id obtained from + * _CST object is FIXED_HARDWARE access mode. + * But when the option of idle=halt is added, + * the entry_method type should be changed from + * CSTATE_FFH to CSTATE_HALT. + * When the option of idle=nomwait is added, + * the C1 entry_method type should be + * CSTATE_HALT. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } } else { snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", cx.address); @@ -1431,7 +1448,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; @@ -1471,7 +1488,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, u32 t1, t2; int sleep_ticks = 0; - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; @@ -1549,7 +1566,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, u32 t1, t2; int sleep_ticks = 0; - pr = processors[smp_processor_id()]; + pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; @@ -1780,6 +1797,15 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, return 0; if (!first_run) { + if (idle_halt) { + /* + * When the boot option of "idle=halt" is added, halt + * is used for CPU IDLE. + * In such case C2/C3 is meaningless. So the max_cstate + * is set to one. + */ + max_cstate = 1; + } dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index d80b2d1441af..b4749969c6b4 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -89,7 +89,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb, if (event != CPUFREQ_INCOMPATIBLE) goto out; - pr = processors[policy->cpu]; + pr = per_cpu(processors, policy->cpu); if (!pr || !pr->performance) goto out; @@ -572,7 +572,7 @@ int acpi_processor_preregister_performance( /* Call _PSD for all CPUs */ for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) { /* Look only at processors in ACPI namespace */ continue; @@ -603,7 +603,7 @@ int acpi_processor_preregister_performance( * domain info. */ for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -624,7 +624,7 @@ int acpi_processor_preregister_performance( cpus_clear(covered_cpus); for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -651,7 +651,7 @@ int acpi_processor_preregister_performance( if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -680,7 +680,7 @@ int acpi_processor_preregister_performance( if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -697,7 +697,7 @@ int acpi_processor_preregister_performance( err_ret: for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr || !pr->performance) continue; @@ -728,7 +728,7 @@ acpi_processor_register_performance(struct acpi_processor_performance mutex_lock(&performance_mutex); - pr = processors[cpu]; + pr = per_cpu(processors, cpu); if (!pr) { mutex_unlock(&performance_mutex); return -ENODEV; @@ -766,7 +766,7 @@ acpi_processor_unregister_performance(struct acpi_processor_performance mutex_lock(&performance_mutex); - pr = processors[cpu]; + pr = per_cpu(processors, cpu); if (!pr) { mutex_unlock(&performance_mutex); return; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 28509fbba6f9..a56fc6c4394b 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -71,7 +71,7 @@ static int acpi_processor_update_tsd_coord(void) * coordination between all CPUs. */ for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -93,7 +93,7 @@ static int acpi_processor_update_tsd_coord(void) cpus_clear(covered_cpus); for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -119,7 +119,7 @@ static int acpi_processor_update_tsd_coord(void) if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -152,7 +152,7 @@ static int acpi_processor_update_tsd_coord(void) if (i == j) continue; - match_pr = processors[j]; + match_pr = per_cpu(processors, j); if (!match_pr) continue; @@ -172,7 +172,7 @@ static int acpi_processor_update_tsd_coord(void) err_ret: for_each_possible_cpu(i) { - pr = processors[i]; + pr = per_cpu(processors, i); if (!pr) continue; @@ -214,7 +214,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data) struct acpi_processor_throttling *p_throttling; cpu = p_tstate->cpu; - pr = processors[cpu]; + pr = per_cpu(processors, cpu); if (!pr) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); return 0; @@ -1035,7 +1035,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * cpus. */ for_each_cpu_mask_nr(i, online_throttling_cpus) { - match_pr = processors[i]; + match_pr = per_cpu(processors, i); /* * If the pointer is invalid, we will report the * error message and continue. @@ -1232,7 +1232,10 @@ static ssize_t acpi_processor_write_throttling(struct file *file, int result = 0; struct seq_file *m = file->private_data; struct acpi_processor *pr = m->private; - char state_string[12] = { '\0' }; + char state_string[5] = ""; + char *charp = NULL; + size_t state_val = 0; + char tmpbuf[5] = ""; if (!pr || (count > sizeof(state_string) - 1)) return -EINVAL; @@ -1241,10 +1244,23 @@ static ssize_t acpi_processor_write_throttling(struct file *file, return -EFAULT; state_string[count] = '\0'; + if ((count > 0) && (state_string[count-1] == '\n')) + state_string[count-1] = '\0'; - result = acpi_processor_set_throttling(pr, - simple_strtoul(state_string, - NULL, 0)); + charp = state_string; + if ((state_string[0] == 't') || (state_string[0] == 'T')) + charp++; + + state_val = simple_strtoul(charp, NULL, 0); + if (state_val >= pr->throttling.state_count) + return -EINVAL; + + snprintf(tmpbuf, 5, "%zu", state_val); + + if (strcmp(tmpbuf, charp) != 0) + return -EINVAL; + + result = acpi_processor_set_throttling(pr, state_val); if (result) return result; diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c new file mode 100644 index 000000000000..a6b662c00b67 --- /dev/null +++ b/drivers/acpi/reboot.c @@ -0,0 +1,50 @@ + +#include <linux/pci.h> +#include <linux/acpi.h> +#include <acpi/reboot.h> + +void acpi_reboot(void) +{ + struct acpi_generic_address *rr; + struct pci_bus *bus0; + u8 reset_value; + unsigned int devfn; + + if (acpi_disabled) + return; + + rr = &acpi_gbl_FADT.reset_register; + + /* Is the reset register supported? */ + if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || + rr->bit_width != 8 || rr->bit_offset != 0) + return; + + reset_value = acpi_gbl_FADT.reset_value; + + /* The reset register can only exist in I/O, Memory or PCI config space + * on a device on bus 0. */ + switch (rr->space_id) { + case ACPI_ADR_SPACE_PCI_CONFIG: + /* The reset register can only live on bus 0. */ + bus0 = pci_find_bus(0, 0); + if (!bus0) + return; + /* Form PCI device/function pair. */ + devfn = PCI_DEVFN((rr->address >> 32) & 0xffff, + (rr->address >> 16) & 0xffff); + printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG."); + /* Write the value that resets us. */ + pci_bus_write_config_byte(bus0, devfn, + (rr->address & 0xffff), reset_value); + break; + + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + case ACPI_ADR_SPACE_SYSTEM_IO: + printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n"); + acpi_hw_low_level_write(8, reset_value, rr); + break; + } + /* Wait ten seconds */ + acpi_os_stall(10000000); +} diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c index 8a112d11d491..f61ebc679e66 100644 --- a/drivers/acpi/resources/rscalc.c +++ b/drivers/acpi/resources/rscalc.c @@ -73,7 +73,7 @@ acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length); static u8 acpi_rs_count_set_bits(u16 bit_field) { - acpi_native_uint bits_set; + u8 bits_set; ACPI_FUNCTION_ENTRY(); @@ -84,7 +84,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field) bit_field &= (u16) (bit_field - 1); } - return ((u8) bits_set); + return bits_set; } /******************************************************************************* diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c index faddaee1bc07..7804a8c40e7a 100644 --- a/drivers/acpi/resources/rscreate.c +++ b/drivers/acpi/resources/rscreate.c @@ -181,9 +181,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, } /* - * Loop through the ACPI_INTERNAL_OBJECTS - Each object - * should be a package that in turn contains an - * acpi_integer Address, a u8 Pin, a Name and a u8 source_index. + * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a + * package that in turn contains an acpi_integer Address, a u8 Pin, + * a Name, and a u8 source_index. */ top_object_list = package_object->package.elements; number_of_elements = package_object->package.count; @@ -240,9 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, /* 1) First subobject: Dereference the PRT.Address */ obj_desc = sub_object_list[0]; - if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { - user_prt->address = obj_desc->integer.value; - } else { + if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%X].Address) Need Integer, found %s", index, @@ -250,12 +248,12 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, return_ACPI_STATUS(AE_BAD_DATA); } + user_prt->address = obj_desc->integer.value; + /* 2) Second subobject: Dereference the PRT.Pin */ obj_desc = sub_object_list[1]; - if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { - user_prt->pin = (u32) obj_desc->integer.value; - } else { + if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%X].Pin) Need Integer, found %s", index, @@ -284,6 +282,25 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, } } + user_prt->pin = (u32) obj_desc->integer.value; + + /* + * If the BIOS has erroneously reversed the _PRT source_name (index 2) + * and the source_index (index 3), fix it. _PRT is important enough to + * workaround this BIOS error. This also provides compatibility with + * other ACPI implementations. + */ + obj_desc = sub_object_list[3]; + if (!obj_desc + || (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) { + sub_object_list[3] = sub_object_list[2]; + sub_object_list[2] = obj_desc; + + ACPI_WARNING((AE_INFO, + "(PRT[%X].Source) SourceName and SourceIndex are reversed, fixed", + index)); + } + /* * 3) Third subobject: Dereference the PRT.source_name * The name may be unresolved (slack mode), so allow a null object @@ -364,9 +381,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, /* 4) Fourth subobject: Dereference the PRT.source_index */ obj_desc = sub_object_list[source_index_index]; - if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) { - user_prt->source_index = (u32) obj_desc->integer.value; - } else { + if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%X].SourceIndex) Need Integer, found %s", index, @@ -374,6 +389,8 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, return_ACPI_STATUS(AE_BAD_DATA); } + user_prt->source_index = (u32) obj_desc->integer.value; + /* Point to the next union acpi_operand_object in the top level package */ top_object_list++; diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c index de1ac3881b22..96a6c0353255 100644 --- a/drivers/acpi/resources/rsmisc.c +++ b/drivers/acpi/resources/rsmisc.c @@ -82,7 +82,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); - if (((acpi_native_uint) resource) & 0x3) { + if (((acpi_size) resource) & 0x3) { /* Each internal resource struct is expected to be 32-bit aligned */ diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c index befe2302f41b..f7b3bcd59ba7 100644 --- a/drivers/acpi/resources/rsutils.c +++ b/drivers/acpi/resources/rsutils.c @@ -62,7 +62,7 @@ ACPI_MODULE_NAME("rsutils") ******************************************************************************/ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list) { - acpi_native_uint i; + u8 i; u8 bit_count; ACPI_FUNCTION_ENTRY(); @@ -71,7 +71,7 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list) for (i = 0, bit_count = 0; mask; i++) { if (mask & 0x0001) { - list[bit_count] = (u8) i; + list[bit_count] = i; bit_count++; } @@ -96,8 +96,8 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list) u16 acpi_rs_encode_bitmask(u8 * list, u8 count) { - acpi_native_uint i; - acpi_native_uint mask; + u32 i; + u16 mask; ACPI_FUNCTION_ENTRY(); @@ -107,7 +107,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count) mask |= (0x1 << list[i]); } - return ((u16) mask); + return mask; } /******************************************************************************* @@ -130,7 +130,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count) void acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_ENTRY(); @@ -679,7 +679,6 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node, info->prefix_node = node; info->pathname = METHOD_NAME__SRS; info->parameters = args; - info->parameter_type = ACPI_PARAM_ARGS; info->flags = ACPI_IGNORE_RETURN_VALUE; /* diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 6d85289f1c12..f3132aa47a69 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -6,6 +6,8 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/acpi.h> +#include <linux/signal.h> +#include <linux/kthread.h> #include <acpi/acpi_drivers.h> #include <acpi/acinterp.h> /* for acpi_ex_eisa_id_to_string() */ @@ -92,17 +94,37 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha } static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); -static int acpi_eject_operation(acpi_handle handle, int lockable) +static int acpi_bus_hot_remove_device(void *context) { + struct acpi_device *device; + acpi_handle handle = context; struct acpi_object_list arg_list; union acpi_object arg; acpi_status status = AE_OK; - /* - * TBD: evaluate _PS3? - */ + if (acpi_bus_get_device(handle, &device)) + return 0; + + if (!device) + return 0; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Hot-removing device %s...\n", device->dev.bus_id)); + - if (lockable) { + if (acpi_bus_trim(device, 1)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Removing device failed\n")); + return -1; + } + + /* power off device */ + status = acpi_evaluate_object(handle, "_PS3", NULL, NULL); + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Power-off device failed\n")); + + if (device->flags.lockable) { arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; @@ -118,26 +140,22 @@ static int acpi_eject_operation(acpi_handle handle, int lockable) /* * TBD: _EJD support. */ - status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); - if (ACPI_FAILURE(status)) { - return (-ENODEV); - } + if (ACPI_FAILURE(status)) + return -ENODEV; - return (0); + return 0; } static ssize_t acpi_eject_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - int result; int ret = count; - int islockable; acpi_status status; - acpi_handle handle; acpi_object_type type = 0; struct acpi_device *acpi_device = to_acpi_device(d); + struct task_struct *task; if ((!count) || (buf[0] != '1')) { return -EINVAL; @@ -154,18 +172,12 @@ acpi_eject_store(struct device *d, struct device_attribute *attr, goto err; } - islockable = acpi_device->flags.lockable; - handle = acpi_device->handle; - - result = acpi_bus_trim(acpi_device, 1); - - if (!result) - result = acpi_eject_operation(handle, islockable); - - if (result) { - ret = -EBUSY; - } - err: + /* remove the device in another thread to fix the deadlock issue */ + task = kthread_run(acpi_bus_hot_remove_device, + acpi_device->handle, "acpi_hot_remove_device"); + if (IS_ERR(task)) + ret = PTR_ERR(task); +err: return ret; } @@ -691,9 +703,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) acpi_status status = 0; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package = NULL; - union acpi_object in_arg[3]; - struct acpi_object_list arg_list = { 3, in_arg }; - acpi_status psw_status = AE_OK; + int psw_error; struct acpi_device_id button_device_ids[] = { {"PNP0C0D", 0}, @@ -725,39 +735,11 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) * So it is necessary to call _DSW object first. Only when it is not * present will the _PSW object used. */ - /* - * Three agruments are needed for the _DSW object. - * Argument 0: enable/disable the wake capabilities - * When _DSW object is called to disable the wake capabilities, maybe - * the first argument is filled. The value of the other two agruments - * is meaningless. - */ - in_arg[0].type = ACPI_TYPE_INTEGER; - in_arg[0].integer.value = 0; - in_arg[1].type = ACPI_TYPE_INTEGER; - in_arg[1].integer.value = 0; - in_arg[2].type = ACPI_TYPE_INTEGER; - in_arg[2].integer.value = 0; - psw_status = acpi_evaluate_object(device->handle, "_DSW", - &arg_list, NULL); - if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND)) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in evaluate _DSW\n")); - /* - * When the _DSW object is not present, OSPM will call _PSW object. - */ - if (psw_status == AE_NOT_FOUND) { - /* - * Only one agruments is required for the _PSW object. - * agrument 0: enable/disable the wake capabilities - */ - arg_list.count = 1; - in_arg[0].integer.value = 0; - psw_status = acpi_evaluate_object(device->handle, "_PSW", - &arg_list, NULL); - if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND)) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in " - "evaluate _PSW\n")); - } + psw_error = acpi_device_sleep_wake(device, 0, 0, 0); + if (psw_error) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "error in _DSW or _PSW evaluation\n")); + /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) device->wakeup.flags.run_wake = 1; diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index 495c63a3e0af..0489a7d1d42c 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c @@ -24,10 +24,6 @@ u8 sleep_states[ACPI_S_STATE_COUNT]; -#ifdef CONFIG_PM_SLEEP -static u32 acpi_target_sleep_state = ACPI_STATE_S0; -#endif - static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP @@ -49,9 +45,96 @@ static int acpi_sleep_prepare(u32 acpi_state) return 0; } -#ifdef CONFIG_SUSPEND -static struct platform_suspend_ops acpi_suspend_ops; +#ifdef CONFIG_PM_SLEEP +static u32 acpi_target_sleep_state = ACPI_STATE_S0; + +/* + * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the + * user to request that behavior by using the 'acpi_old_suspend_ordering' + * kernel command line option that causes the following variable to be set. + */ +static bool old_suspend_ordering; +void __init acpi_old_suspend_ordering(void) +{ + old_suspend_ordering = true; +} + +/** + * acpi_pm_disable_gpes - Disable the GPEs. + */ +static int acpi_pm_disable_gpes(void) +{ + acpi_hw_disable_all_gpes(); + return 0; +} + +/** + * __acpi_pm_prepare - Prepare the platform to enter the target state. + * + * If necessary, set the firmware waking vector and do arch-specific + * nastiness to get the wakeup code to the waking vector. + */ +static int __acpi_pm_prepare(void) +{ + int error = acpi_sleep_prepare(acpi_target_sleep_state); + + if (error) + acpi_target_sleep_state = ACPI_STATE_S0; + return error; +} + +/** + * acpi_pm_prepare - Prepare the platform to enter the target sleep + * state and disable the GPEs. + */ +static int acpi_pm_prepare(void) +{ + int error = __acpi_pm_prepare(); + + if (!error) + acpi_hw_disable_all_gpes(); + return error; +} + +/** + * acpi_pm_finish - Instruct the platform to leave a sleep state. + * + * This is called after we wake back up (or if entering the sleep state + * failed). + */ +static void acpi_pm_finish(void) +{ + u32 acpi_state = acpi_target_sleep_state; + + if (acpi_state == ACPI_STATE_S0) + return; + + printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", + acpi_state); + acpi_disable_wakeup_device(acpi_state); + acpi_leave_sleep_state(acpi_state); + + /* reset firmware waking vector */ + acpi_set_firmware_waking_vector((acpi_physical_address) 0); + + acpi_target_sleep_state = ACPI_STATE_S0; +} + +/** + * acpi_pm_end - Finish up suspend sequence. + */ +static void acpi_pm_end(void) +{ + /* + * This is necessary in case acpi_pm_finish() is not called during a + * failing transition to a sleep state. + */ + acpi_target_sleep_state = ACPI_STATE_S0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_SUSPEND extern void do_suspend_lowlevel(void); static u32 acpi_suspend_states[] = { @@ -61,13 +144,10 @@ static u32 acpi_suspend_states[] = { [PM_SUSPEND_MAX] = ACPI_STATE_S5 }; -static int init_8259A_after_S1; - /** * acpi_suspend_begin - Set the target system sleep state to the state * associated with given @pm_state, if supported. */ - static int acpi_suspend_begin(suspend_state_t pm_state) { u32 acpi_state = acpi_suspend_states[pm_state]; @@ -84,25 +164,6 @@ static int acpi_suspend_begin(suspend_state_t pm_state) } /** - * acpi_suspend_prepare - Do preliminary suspend work. - * - * If necessary, set the firmware waking vector and do arch-specific - * nastiness to get the wakeup code to the waking vector. - */ - -static int acpi_suspend_prepare(void) -{ - int error = acpi_sleep_prepare(acpi_target_sleep_state); - - if (error) { - acpi_target_sleep_state = ACPI_STATE_S0; - return error; - } - - return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT; -} - -/** * acpi_suspend_enter - Actually enter a sleep state. * @pm_state: ignored * @@ -110,7 +171,6 @@ static int acpi_suspend_prepare(void) * assembly, which in turn call acpi_enter_sleep_state(). * It's unfortunate, but it works. Please fix if you're feeling frisky. */ - static int acpi_suspend_enter(suspend_state_t pm_state) { acpi_status status = AE_OK; @@ -167,46 +227,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state) return ACPI_SUCCESS(status) ? 0 : -EFAULT; } -/** - * acpi_suspend_finish - Instruct the platform to leave a sleep state. - * - * This is called after we wake back up (or if entering the sleep state - * failed). - */ - -static void acpi_suspend_finish(void) -{ - u32 acpi_state = acpi_target_sleep_state; - - acpi_disable_wakeup_device(acpi_state); - acpi_leave_sleep_state(acpi_state); - - /* reset firmware waking vector */ - acpi_set_firmware_waking_vector((acpi_physical_address) 0); - - acpi_target_sleep_state = ACPI_STATE_S0; - -#ifdef CONFIG_X86 - if (init_8259A_after_S1) { - printk("Broken toshiba laptop -> kicking interrupts\n"); - init_8259A(0); - } -#endif -} - -/** - * acpi_suspend_end - Finish up suspend sequence. - */ - -static void acpi_suspend_end(void) -{ - /* - * This is necessary in case acpi_suspend_finish() is not called during a - * failing transition to a sleep state. - */ - acpi_target_sleep_state = ACPI_STATE_S0; -} - static int acpi_suspend_state_valid(suspend_state_t pm_state) { u32 acpi_state; @@ -226,30 +246,39 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state) static struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, - .prepare = acpi_suspend_prepare, + .prepare = acpi_pm_prepare, .enter = acpi_suspend_enter, - .finish = acpi_suspend_finish, - .end = acpi_suspend_end, + .finish = acpi_pm_finish, + .end = acpi_pm_end, }; -/* - * Toshiba fails to preserve interrupts over S1, reinitialization - * of 8259 is needed after S1 resume. +/** + * acpi_suspend_begin_old - Set the target system sleep state to the + * state associated with given @pm_state, if supported, and + * execute the _PTS control method. This function is used if the + * pre-ACPI 2.0 suspend ordering has been requested. */ -static int __init init_ints_after_s1(const struct dmi_system_id *d) +static int acpi_suspend_begin_old(suspend_state_t pm_state) { - printk(KERN_WARNING "%s with broken S1 detected.\n", d->ident); - init_8259A_after_S1 = 1; - return 0; + int error = acpi_suspend_begin(pm_state); + + if (!error) + error = __acpi_pm_prepare(); + return error; } -static struct dmi_system_id __initdata acpisleep_dmi_table[] = { - { - .callback = init_ints_after_s1, - .ident = "Toshiba Satellite 4030cdt", - .matches = {DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),}, - }, - {}, +/* + * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has + * been requested. + */ +static struct platform_suspend_ops acpi_suspend_ops_old = { + .valid = acpi_suspend_state_valid, + .begin = acpi_suspend_begin_old, + .prepare = acpi_pm_disable_gpes, + .enter = acpi_suspend_enter, + .finish = acpi_pm_finish, + .end = acpi_pm_end, + .recover = acpi_pm_finish, }; #endif /* CONFIG_SUSPEND */ @@ -257,22 +286,9 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { static int acpi_hibernation_begin(void) { acpi_target_sleep_state = ACPI_STATE_S4; - return 0; } -static int acpi_hibernation_prepare(void) -{ - int error = acpi_sleep_prepare(ACPI_STATE_S4); - - if (error) { - acpi_target_sleep_state = ACPI_STATE_S0; - return error; - } - - return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT; -} - static int acpi_hibernation_enter(void) { acpi_status status = AE_OK; @@ -302,52 +318,55 @@ static void acpi_hibernation_leave(void) acpi_leave_sleep_state_prep(ACPI_STATE_S4); } -static void acpi_hibernation_finish(void) +static void acpi_pm_enable_gpes(void) { - acpi_disable_wakeup_device(ACPI_STATE_S4); - acpi_leave_sleep_state(ACPI_STATE_S4); - - /* reset firmware waking vector */ - acpi_set_firmware_waking_vector((acpi_physical_address) 0); - - acpi_target_sleep_state = ACPI_STATE_S0; + acpi_hw_enable_all_runtime_gpes(); } -static void acpi_hibernation_end(void) -{ - /* - * This is necessary in case acpi_hibernation_finish() is not called - * during a failing transition to the sleep state. - */ - acpi_target_sleep_state = ACPI_STATE_S0; -} +static struct platform_hibernation_ops acpi_hibernation_ops = { + .begin = acpi_hibernation_begin, + .end = acpi_pm_end, + .pre_snapshot = acpi_pm_prepare, + .finish = acpi_pm_finish, + .prepare = acpi_pm_prepare, + .enter = acpi_hibernation_enter, + .leave = acpi_hibernation_leave, + .pre_restore = acpi_pm_disable_gpes, + .restore_cleanup = acpi_pm_enable_gpes, +}; -static int acpi_hibernation_pre_restore(void) +/** + * acpi_hibernation_begin_old - Set the target system sleep state to + * ACPI_STATE_S4 and execute the _PTS control method. This + * function is used if the pre-ACPI 2.0 suspend ordering has been + * requested. + */ +static int acpi_hibernation_begin_old(void) { - acpi_status status; - - status = acpi_hw_disable_all_gpes(); - - return ACPI_SUCCESS(status) ? 0 : -EFAULT; -} + int error = acpi_sleep_prepare(ACPI_STATE_S4); -static void acpi_hibernation_restore_cleanup(void) -{ - acpi_hw_enable_all_runtime_gpes(); + if (!error) + acpi_target_sleep_state = ACPI_STATE_S4; + return error; } -static struct platform_hibernation_ops acpi_hibernation_ops = { - .begin = acpi_hibernation_begin, - .end = acpi_hibernation_end, - .pre_snapshot = acpi_hibernation_prepare, - .finish = acpi_hibernation_finish, - .prepare = acpi_hibernation_prepare, +/* + * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has + * been requested. + */ +static struct platform_hibernation_ops acpi_hibernation_ops_old = { + .begin = acpi_hibernation_begin_old, + .end = acpi_pm_end, + .pre_snapshot = acpi_pm_disable_gpes, + .finish = acpi_pm_finish, + .prepare = acpi_pm_disable_gpes, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, - .pre_restore = acpi_hibernation_pre_restore, - .restore_cleanup = acpi_hibernation_restore_cleanup, + .pre_restore = acpi_pm_disable_gpes, + .restore_cleanup = acpi_pm_enable_gpes, + .recover = acpi_pm_finish, }; -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATION */ int acpi_suspend(u32 acpi_state) { @@ -368,8 +387,8 @@ int acpi_suspend(u32 acpi_state) /** * acpi_pm_device_sleep_state - return preferred power state of ACPI device * in the system sleep state given by %acpi_target_sleep_state - * @dev: device to examine - * @wake: if set, the device should be able to wake up the system + * @dev: device to examine; its driver model wakeup flags control + * whether it should be able to wake up the system * @d_min_p: used to store the upper limit of allowed states range * Return value: preferred power state of the device on success, -ENODEV on * failure (ie. if there's no 'struct acpi_device' for @dev) @@ -387,7 +406,7 @@ int acpi_suspend(u32 acpi_state) * via @wake. */ -int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) +int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) { acpi_handle handle = DEVICE_ACPI_HANDLE(dev); struct acpi_device *adev; @@ -426,7 +445,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) * can wake the system. _S0W may be valid, too. */ if (acpi_target_sleep_state == ACPI_STATE_S0 || - (wake && adev->wakeup.state.enabled && + (device_may_wakeup(dev) && adev->wakeup.state.enabled && adev->wakeup.sleep_state <= acpi_target_sleep_state)) { acpi_status status; @@ -448,6 +467,31 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p) *d_min_p = d_min; return d_max; } + +/** + * acpi_pm_device_sleep_wake - enable or disable the system wake-up + * capability of given device + * @dev: device to handle + * @enable: 'true' - enable, 'false' - disable the wake-up capability + */ +int acpi_pm_device_sleep_wake(struct device *dev, bool enable) +{ + acpi_handle handle; + struct acpi_device *adev; + + if (!device_may_wakeup(dev)) + return -EINVAL; + + handle = DEVICE_ACPI_HANDLE(dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { + printk(KERN_DEBUG "ACPI handle has no context!\n"); + return -ENODEV; + } + + return enable ? + acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : + acpi_disable_wakeup_device_power(adev); +} #endif static void acpi_power_off_prepare(void) @@ -472,8 +516,6 @@ int __init acpi_sleep_init(void) u8 type_a, type_b; #ifdef CONFIG_SUSPEND int i = 0; - - dmi_check_system(acpisleep_dmi_table); #endif if (acpi_disabled) @@ -491,13 +533,15 @@ int __init acpi_sleep_init(void) } } - suspend_set_ops(&acpi_suspend_ops); + suspend_set_ops(old_suspend_ordering ? + &acpi_suspend_ops_old : &acpi_suspend_ops); #endif #ifdef CONFIG_HIBERNATION status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); if (ACPI_SUCCESS(status)) { - hibernation_set_ops(&acpi_hibernation_ops); + hibernation_set_ops(old_suspend_ordering ? + &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; printk(" S4"); } diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c index ed8e41becf0c..38655eb132dc 100644 --- a/drivers/acpi/sleep/wakeup.c +++ b/drivers/acpi/sleep/wakeup.c @@ -42,7 +42,7 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) continue; spin_unlock(&acpi_device_lock); - acpi_enable_wakeup_device_power(dev); + acpi_enable_wakeup_device_power(dev, sleep_state); spin_lock(&acpi_device_lock); } spin_unlock(&acpi_device_lock); @@ -66,13 +66,15 @@ void acpi_enable_wakeup_device(u8 sleep_state) list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); + if (!dev->wakeup.flags.valid) continue; + /* If users want to disable run-wake GPE, * we only disable it for wake and leave it for runtime */ - if (!dev->wakeup.state.enabled || - sleep_state > (u32) dev->wakeup.sleep_state) { + if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) + || sleep_state > (u32) dev->wakeup.sleep_state) { if (dev->wakeup.flags.run_wake) { spin_unlock(&acpi_device_lock); /* set_gpe_type will disable GPE, leave it like that */ @@ -110,8 +112,9 @@ void acpi_disable_wakeup_device(u8 sleep_state) if (!dev->wakeup.flags.valid) continue; - if (!dev->wakeup.state.enabled || - sleep_state > (u32) dev->wakeup.sleep_state) { + + if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) + || sleep_state > (u32) dev->wakeup.sleep_state) { if (dev->wakeup.flags.run_wake) { spin_unlock(&acpi_device_lock); acpi_set_gpe_type(dev->wakeup.gpe_device, diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 5bd2dec9a7ac..d8e3f153b295 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c @@ -167,7 +167,13 @@ static int acpi_system_sysfs_init(void) #define COUNT_ERROR 2 /* other */ #define NUM_COUNTERS_EXTRA 3 -static u32 *all_counters; +#define ACPI_EVENT_VALID 0x01 +struct event_counter { + u32 count; + u32 flags; +}; + +static struct event_counter *all_counters; static u32 num_gpes; static u32 num_counters; static struct attribute **all_attrs; @@ -202,9 +208,44 @@ static int count_num_gpes(void) return count; } +static int get_gpe_device(int index, acpi_handle *handle) +{ + struct acpi_gpe_xrupt_info *gpe_xrupt_info; + struct acpi_gpe_block_info *gpe_block; + acpi_cpu_flags flags; + struct acpi_namespace_node *node; + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; + while (gpe_xrupt_info) { + gpe_block = gpe_xrupt_info->gpe_block_list_head; + node = gpe_block->node; + while (gpe_block) { + index -= gpe_block->register_count * + ACPI_GPE_REGISTER_WIDTH; + if (index < 0) { + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + /* return NULL if it's FADT GPE */ + if (node->type != ACPI_TYPE_DEVICE) + *handle = NULL; + else + *handle = node; + return 0; + } + node = gpe_block->node; + gpe_block = gpe_block->next; + } + gpe_xrupt_info = gpe_xrupt_info->next; + } + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + + return -ENODEV; +} + static void delete_gpe_attr_array(void) { - u32 *tmp = all_counters; + struct event_counter *tmp = all_counters; all_counters = NULL; kfree(tmp); @@ -230,9 +271,10 @@ void acpi_os_gpe_count(u32 gpe_number) return; if (gpe_number < num_gpes) - all_counters[gpe_number]++; + all_counters[gpe_number].count++; else - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++; + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]. + count++; return; } @@ -243,44 +285,144 @@ void acpi_os_fixed_event_count(u32 event_number) return; if (event_number < ACPI_NUM_FIXED_EVENTS) - all_counters[num_gpes + event_number]++; + all_counters[num_gpes + event_number].count++; else - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++; + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]. + count++; return; } +static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle) +{ + int result = 0; + + if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) + goto end; + + if (index < num_gpes) { + result = get_gpe_device(index, handle); + if (result) { + ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, + "Invalid GPE 0x%x\n", index)); + goto end; + } + result = acpi_get_gpe_status(*handle, index, + ACPI_NOT_ISR, status); + } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) + result = acpi_get_event_status(index - num_gpes, status); + + /* + * sleep/power button GPE/Fixed Event is enabled after acpi_system_init, + * check the status at runtime and mark it as valid once it's enabled + */ + if (!result && (*status & ACPI_EVENT_FLAG_ENABLED)) + all_counters[index].flags |= ACPI_EVENT_VALID; +end: + return result; +} + static ssize_t counter_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI] = + int index = attr - counter_attrs; + int size; + acpi_handle handle; + acpi_event_status status; + int result = 0; + + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count = acpi_irq_handled; - all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE] = + all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count = acpi_gpe_count; - return sprintf(buf, "%d\n", all_counters[attr - counter_attrs]); + size = sprintf(buf, "%8d", all_counters[index].count); + + /* "gpe_all" or "sci" */ + if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) + goto end; + + result = get_status(index, &status, &handle); + if (result) + goto end; + + if (!(all_counters[index].flags & ACPI_EVENT_VALID)) + size += sprintf(buf + size, " invalid"); + else if (status & ACPI_EVENT_FLAG_ENABLED) + size += sprintf(buf + size, " enable"); + else + size += sprintf(buf + size, " disable"); + +end: + size += sprintf(buf + size, "\n"); + return result ? result : size; } /* * counter_set() sets the specified counter. * setting the total "sci" file to any value clears all counters. + * enable/disable/clear a gpe/fixed event in user space. */ static ssize_t counter_set(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t size) { int index = attr - counter_attrs; + acpi_event_status status; + acpi_handle handle; + int result = 0; if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { int i; for (i = 0; i < num_counters; ++i) - all_counters[i] = 0; + all_counters[i].count = 0; acpi_gpe_count = 0; acpi_irq_handled = 0; + goto end; + } + /* show the event status for both GPEs and Fixed Events */ + result = get_status(index, &status, &handle); + if (result) + goto end; + + if (!(all_counters[index].flags & ACPI_EVENT_VALID)) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Can not change Invalid GPE/Fixed Event status\n")); + return -EINVAL; + } + + if (index < num_gpes) { + if (!strcmp(buf, "disable\n") && + (status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_disable_gpe(handle, index, ACPI_NOT_ISR); + else if (!strcmp(buf, "enable\n") && + !(status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_enable_gpe(handle, index, ACPI_NOT_ISR); + else if (!strcmp(buf, "clear\n") && + (status & ACPI_EVENT_FLAG_SET)) + result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); + else + all_counters[index].count = strtoul(buf, NULL, 0); + } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { + int event = index - num_gpes; + if (!strcmp(buf, "disable\n") && + (status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_disable_event(event, ACPI_NOT_ISR); + else if (!strcmp(buf, "enable\n") && + !(status & ACPI_EVENT_FLAG_ENABLED)) + result = acpi_enable_event(event, ACPI_NOT_ISR); + else if (!strcmp(buf, "clear\n") && + (status & ACPI_EVENT_FLAG_SET)) + result = acpi_clear_event(event); + else + all_counters[index].count = strtoul(buf, NULL, 0); } else - all_counters[index] = strtoul(buf, NULL, 0); + all_counters[index].count = strtoul(buf, NULL, 0); - return size; + if (ACPI_FAILURE(result)) + result = -EINVAL; +end: + return result ? result : size; } void acpi_irq_stats_init(void) @@ -298,7 +440,8 @@ void acpi_irq_stats_init(void) if (all_attrs == NULL) return; - all_counters = kzalloc(sizeof(u32) * (num_counters), GFP_KERNEL); + all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), + GFP_KERNEL); if (all_counters == NULL) goto fail; diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c index 949d4114eb9f..ccb5b64bbef3 100644 --- a/drivers/acpi/tables/tbfadt.c +++ b/drivers/acpi/tables/tbfadt.c @@ -124,7 +124,7 @@ static struct acpi_fadt_info fadt_info_table[] = { static void inline acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, - u8 bit_width, u64 address) + u8 byte_width, u64 address) { /* @@ -136,7 +136,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, /* All other fields are byte-wide */ generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO; - generic_address->bit_width = bit_width; + generic_address->bit_width = byte_width << 3; generic_address->bit_offset = 0; generic_address->access_width = 0; } @@ -155,7 +155,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address, * ******************************************************************************/ -void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags) +void acpi_tb_parse_fadt(u32 table_index, u8 flags) { u32 length; struct acpi_table_header *table; @@ -280,7 +280,7 @@ static void acpi_tb_convert_fadt(void) { u8 pm1_register_length; struct acpi_generic_address *target; - acpi_native_uint i; + u32 i; /* Update the local FADT table header length */ @@ -343,9 +343,11 @@ static void acpi_tb_convert_fadt(void) * * The PM event blocks are split into two register blocks, first is the * PM Status Register block, followed immediately by the PM Enable Register - * block. Each is of length (pm1_event_length/2) + * block. Each is of length (xpm1x_event_block.bit_width/2) */ - pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length); + WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1a_event_block.bit_width)); + pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT + .xpm1a_event_block.bit_width); /* The PM1A register block is required */ @@ -360,14 +362,17 @@ static void acpi_tb_convert_fadt(void) /* The PM1B register block is optional, ignore if not present */ if (acpi_gbl_FADT.xpm1b_event_block.address) { + WARN_ON(ACPI_MOD_16(acpi_gbl_FADT.xpm1b_event_block.bit_width)); + pm1_register_length = (u8) ACPI_DIV_16(acpi_gbl_FADT + .xpm1b_event_block + .bit_width); acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable, pm1_register_length, (acpi_gbl_FADT.xpm1b_event_block. address + pm1_register_length)); /* Don't forget to copy space_id of the GAS */ acpi_gbl_xpm1b_enable.space_id = - acpi_gbl_FADT.xpm1a_event_block.space_id; - + acpi_gbl_FADT.xpm1b_event_block.space_id; } } @@ -396,7 +401,7 @@ static void acpi_tb_validate_fadt(void) u32 *address32; struct acpi_generic_address *address64; u8 length; - acpi_native_uint i; + u32 i; /* Examine all of the 64-bit extended address fields (X fields) */ diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c index 9ca3afc98c80..531584defbb8 100644 --- a/drivers/acpi/tables/tbfind.c +++ b/drivers/acpi/tables/tbfind.c @@ -65,10 +65,9 @@ ACPI_MODULE_NAME("tbfind") ******************************************************************************/ acpi_status acpi_tb_find_table(char *signature, - char *oem_id, - char *oem_table_id, acpi_native_uint * table_index) + char *oem_id, char *oem_table_id, u32 *table_index) { - acpi_native_uint i; + u32 i; acpi_status status; struct acpi_table_header header; diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c index 5336ce88f89f..b22185f55a16 100644 --- a/drivers/acpi/tables/tbinstal.c +++ b/drivers/acpi/tables/tbinstal.c @@ -107,11 +107,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc) ******************************************************************************/ acpi_status -acpi_tb_add_table(struct acpi_table_desc *table_desc, - acpi_native_uint * table_index) +acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index) { - acpi_native_uint i; - acpi_native_uint length; + u32 i; + u32 length; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(tb_add_table); @@ -207,8 +206,8 @@ acpi_status acpi_tb_resize_root_table_list(void) /* Increase the Table Array size */ - tables = ACPI_ALLOCATE_ZEROED((acpi_gbl_root_table_list.size + - ACPI_ROOT_TABLE_SIZE_INCREMENT) + tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list. + size + ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc)); if (!tables) { ACPI_ERROR((AE_INFO, @@ -220,7 +219,7 @@ acpi_status acpi_tb_resize_root_table_list(void) if (acpi_gbl_root_table_list.tables) { ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, - acpi_gbl_root_table_list.size * + (acpi_size) acpi_gbl_root_table_list.size * sizeof(struct acpi_table_desc)); if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { @@ -253,7 +252,7 @@ acpi_status acpi_tb_resize_root_table_list(void) acpi_status acpi_tb_store_table(acpi_physical_address address, struct acpi_table_header *table, - u32 length, u8 flags, acpi_native_uint * table_index) + u32 length, u8 flags, u32 *table_index) { acpi_status status = AE_OK; @@ -334,7 +333,7 @@ void acpi_tb_delete_table(struct acpi_table_desc *table_desc) void acpi_tb_terminate(void) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(tb_terminate); @@ -374,7 +373,7 @@ void acpi_tb_terminate(void) * ******************************************************************************/ -void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index) +void acpi_tb_delete_namespace_by_owner(u32 table_index) { acpi_owner_id owner_id; @@ -403,7 +402,7 @@ void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index) * ******************************************************************************/ -acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index) +acpi_status acpi_tb_allocate_owner_id(u32 table_index) { acpi_status status = AE_BAD_PARAMETER; @@ -431,7 +430,7 @@ acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index) * ******************************************************************************/ -acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index) +acpi_status acpi_tb_release_owner_id(u32 table_index) { acpi_status status = AE_BAD_PARAMETER; @@ -462,8 +461,7 @@ acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index) * ******************************************************************************/ -acpi_status -acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id) +acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id) { acpi_status status = AE_BAD_PARAMETER; @@ -490,7 +488,7 @@ acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id) * ******************************************************************************/ -u8 acpi_tb_is_table_loaded(acpi_native_uint table_index) +u8 acpi_tb_is_table_loaded(u32 table_index) { u8 is_loaded = FALSE; @@ -518,7 +516,7 @@ u8 acpi_tb_is_table_loaded(acpi_native_uint table_index) * ******************************************************************************/ -void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded) +void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded) { (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c index bc019b9b6a68..0cc92ef5236f 100644 --- a/drivers/acpi/tables/tbutils.c +++ b/drivers/acpi/tables/tbutils.c @@ -49,8 +49,8 @@ ACPI_MODULE_NAME("tbutils") /* Local prototypes */ static acpi_physical_address -acpi_tb_get_root_table_entry(u8 * table_entry, - acpi_native_uint table_entry_size); +acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size); + /******************************************************************************* * * FUNCTION: acpi_tb_check_xsdt @@ -238,7 +238,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length) * ******************************************************************************/ -u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length) +u8 acpi_tb_checksum(u8 *buffer, u32 length) { u8 sum = 0; u8 *end = buffer + length; @@ -268,7 +268,7 @@ u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length) void acpi_tb_install_table(acpi_physical_address address, - u8 flags, char *signature, acpi_native_uint table_index) + u8 flags, char *signature, u32 table_index) { struct acpi_table_header *table; @@ -336,8 +336,7 @@ acpi_tb_install_table(acpi_physical_address address, ******************************************************************************/ static acpi_physical_address -acpi_tb_get_root_table_entry(u8 * table_entry, - acpi_native_uint table_entry_size) +acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) { u64 address64; @@ -395,8 +394,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) { struct acpi_table_rsdp *rsdp; - acpi_native_uint table_entry_size; - acpi_native_uint i; + u32 table_entry_size; + u32 i; u32 table_count; struct acpi_table_header *table; acpi_physical_address address; diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c index 0e319604d3e7..fd7770aa1061 100644 --- a/drivers/acpi/tables/tbxface.c +++ b/drivers/acpi/tables/tbxface.c @@ -125,7 +125,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array, /* Root Table Array has been statically allocated by the host */ ACPI_MEMSET(initial_table_array, 0, - initial_table_count * + (acpi_size) initial_table_count * sizeof(struct acpi_table_desc)); acpi_gbl_root_table_list.tables = initial_table_array; @@ -183,9 +183,9 @@ acpi_status acpi_reallocate_root_table(void) return_ACPI_STATUS(AE_SUPPORT); } - new_size = - (acpi_gbl_root_table_list.count + - ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc); + new_size = ((acpi_size) acpi_gbl_root_table_list.count + + ACPI_ROOT_TABLE_SIZE_INCREMENT) * + sizeof(struct acpi_table_desc); /* Create new array and copy the old array */ @@ -222,7 +222,7 @@ acpi_status acpi_reallocate_root_table(void) acpi_status acpi_load_table(struct acpi_table_header *table_ptr) { acpi_status status; - acpi_native_uint table_index; + u32 table_index; struct acpi_table_desc table_desc; if (!table_ptr) @@ -264,11 +264,10 @@ ACPI_EXPORT_SYMBOL(acpi_load_table) *****************************************************************************/ acpi_status acpi_get_table_header(char *signature, - acpi_native_uint instance, - struct acpi_table_header * out_table_header) + u32 instance, struct acpi_table_header *out_table_header) { - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; struct acpi_table_header *header; /* Parameter validation */ @@ -378,10 +377,10 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id) *****************************************************************************/ acpi_status acpi_get_table(char *signature, - acpi_native_uint instance, struct acpi_table_header **out_table) + u32 instance, struct acpi_table_header **out_table) { - acpi_native_uint i; - acpi_native_uint j; + u32 i; + u32 j; acpi_status status; /* Parameter validation */ @@ -435,8 +434,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table) * ******************************************************************************/ acpi_status -acpi_get_table_by_index(acpi_native_uint table_index, - struct acpi_table_header ** table) +acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table) { acpi_status status; @@ -493,7 +491,7 @@ static acpi_status acpi_tb_load_namespace(void) { acpi_status status; struct acpi_table_header *table; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(tb_load_namespace); diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c index b8c0dfa084f6..2d157e0f98d2 100644 --- a/drivers/acpi/tables/tbxfroot.c +++ b/drivers/acpi/tables/tbxfroot.c @@ -118,7 +118,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) * ******************************************************************************/ -acpi_status acpi_find_root_pointer(acpi_native_uint * table_address) +acpi_status acpi_find_root_pointer(acpi_size *table_address) { u8 *table_ptr; u8 *mem_rover; @@ -153,7 +153,7 @@ acpi_status acpi_find_root_pointer(acpi_native_uint * table_address) * 1b) Search EBDA paragraphs (EBDA is required to be a * minimum of 1_k length) */ - table_ptr = acpi_os_map_memory((acpi_native_uint) + table_ptr = acpi_os_map_memory((acpi_physical_address) physical_address, ACPI_EBDA_WINDOW_SIZE); if (!table_ptr) { diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c index ede084829a70..3dfb8a442b26 100644 --- a/drivers/acpi/utilities/utalloc.c +++ b/drivers/acpi/utilities/utalloc.c @@ -309,7 +309,8 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer, * ******************************************************************************/ -void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line) +void *acpi_ut_allocate(acpi_size size, + u32 component, const char *module, u32 line) { void *allocation; @@ -353,7 +354,7 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line) ******************************************************************************/ void *acpi_ut_allocate_zeroed(acpi_size size, - u32 component, char *module, u32 line) + u32 component, const char *module, u32 line) { void *allocation; diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c index 655c290aca7b..53499ac90988 100644 --- a/drivers/acpi/utilities/utcopy.c +++ b/drivers/acpi/utilities/utcopy.c @@ -572,7 +572,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, acpi_status status = AE_OK; union acpi_operand_object *package_object; union acpi_operand_object **package_elements; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage); @@ -599,7 +599,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, /* Truncate package and delete it */ - package_object->package.count = (u32) i; + package_object->package.count = i; package_elements[i] = NULL; acpi_ut_remove_reference(package_object); return_ACPI_STATUS(status); diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c index f938f465efa4..fd66ecb6741e 100644 --- a/drivers/acpi/utilities/utdebug.c +++ b/drivers/acpi/utilities/utdebug.c @@ -157,7 +157,8 @@ void ACPI_INTERNAL_VAR_XFACE acpi_ut_debug_print(u32 requested_debug_level, u32 line_number, const char *function_name, - char *module_name, u32 component_id, char *format, ...) + const char *module_name, + u32 component_id, const char *format, ...) { acpi_thread_id thread_id; va_list args; @@ -228,7 +229,8 @@ void ACPI_INTERNAL_VAR_XFACE acpi_ut_debug_print_raw(u32 requested_debug_level, u32 line_number, const char *function_name, - char *module_name, u32 component_id, char *format, ...) + const char *module_name, + u32 component_id, const char *format, ...) { va_list args; @@ -261,7 +263,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw) ******************************************************************************/ void acpi_ut_trace(u32 line_number, - const char *function_name, char *module_name, u32 component_id) + const char *function_name, + const char *module_name, u32 component_id) { acpi_gbl_nesting_level++; @@ -293,7 +296,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace) void acpi_ut_trace_ptr(u32 line_number, const char *function_name, - char *module_name, u32 component_id, void *pointer) + const char *module_name, u32 component_id, void *pointer) { acpi_gbl_nesting_level++; acpi_ut_track_stack_ptr(); @@ -324,7 +327,7 @@ acpi_ut_trace_ptr(u32 line_number, void acpi_ut_trace_str(u32 line_number, const char *function_name, - char *module_name, u32 component_id, char *string) + const char *module_name, u32 component_id, char *string) { acpi_gbl_nesting_level++; @@ -356,7 +359,7 @@ acpi_ut_trace_str(u32 line_number, void acpi_ut_trace_u32(u32 line_number, const char *function_name, - char *module_name, u32 component_id, u32 integer) + const char *module_name, u32 component_id, u32 integer) { acpi_gbl_nesting_level++; @@ -386,7 +389,8 @@ acpi_ut_trace_u32(u32 line_number, void acpi_ut_exit(u32 line_number, - const char *function_name, char *module_name, u32 component_id) + const char *function_name, + const char *module_name, u32 component_id) { acpi_ut_debug_print(ACPI_LV_FUNCTIONS, @@ -417,7 +421,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit) void acpi_ut_status_exit(u32 line_number, const char *function_name, - char *module_name, u32 component_id, acpi_status status) + const char *module_name, + u32 component_id, acpi_status status) { if (ACPI_SUCCESS(status)) { @@ -458,7 +463,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) void acpi_ut_value_exit(u32 line_number, const char *function_name, - char *module_name, u32 component_id, acpi_integer value) + const char *module_name, + u32 component_id, acpi_integer value) { acpi_ut_debug_print(ACPI_LV_FUNCTIONS, @@ -490,7 +496,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) void acpi_ut_ptr_exit(u32 line_number, const char *function_name, - char *module_name, u32 component_id, u8 * ptr) + const char *module_name, u32 component_id, u8 *ptr) { acpi_ut_debug_print(ACPI_LV_FUNCTIONS, @@ -519,8 +525,8 @@ acpi_ut_ptr_exit(u32 line_number, void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) { - acpi_native_uint i = 0; - acpi_native_uint j; + u32 i = 0; + u32 j; u32 temp32; u8 buf_char; @@ -539,7 +545,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) /* Print current offset */ - acpi_os_printf("%6.4X: ", (u32) i); + acpi_os_printf("%6.4X: ", i); /* Print 16 hex chars */ @@ -549,7 +555,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) /* Dump fill spaces */ acpi_os_printf("%*s", ((display * 2) + 1), " "); - j += (acpi_native_uint) display; + j += display; continue; } @@ -557,32 +563,38 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) case DB_BYTE_DISPLAY: default: /* Default is BYTE display */ - acpi_os_printf("%02X ", buffer[i + j]); + acpi_os_printf("%02X ", + buffer[(acpi_size) i + j]); break; case DB_WORD_DISPLAY: - ACPI_MOVE_16_TO_32(&temp32, &buffer[i + j]); + ACPI_MOVE_16_TO_32(&temp32, + &buffer[(acpi_size) i + j]); acpi_os_printf("%04X ", temp32); break; case DB_DWORD_DISPLAY: - ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j]); + ACPI_MOVE_32_TO_32(&temp32, + &buffer[(acpi_size) i + j]); acpi_os_printf("%08X ", temp32); break; case DB_QWORD_DISPLAY: - ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j]); + ACPI_MOVE_32_TO_32(&temp32, + &buffer[(acpi_size) i + j]); acpi_os_printf("%08X", temp32); - ACPI_MOVE_32_TO_32(&temp32, &buffer[i + j + 4]); + ACPI_MOVE_32_TO_32(&temp32, + &buffer[(acpi_size) i + j + + 4]); acpi_os_printf("%08X ", temp32); break; } - j += (acpi_native_uint) display; + j += display; } /* @@ -596,7 +608,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) return; } - buf_char = buffer[i + j]; + buf_char = buffer[(acpi_size) i + j]; if (ACPI_IS_PRINT(buf_char)) { acpi_os_printf("%c", buf_char); } else { diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c index 1fbc35139e84..c5c791a575c9 100644 --- a/drivers/acpi/utilities/utdelete.c +++ b/drivers/acpi/utilities/utdelete.c @@ -442,7 +442,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) union acpi_generic_state *state_list = NULL; union acpi_operand_object *next_object = NULL; union acpi_generic_state *state; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object); diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c index 05e61be267d5..352747e49c7a 100644 --- a/drivers/acpi/utilities/uteval.c +++ b/drivers/acpi/utilities/uteval.c @@ -97,7 +97,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) acpi_status status; union acpi_operand_object *string_desc; union acpi_operand_object *return_desc; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ut_osi_implementation); @@ -217,7 +217,6 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, info->prefix_node = prefix_node; info->pathname = path; - info->parameter_type = ACPI_PARAM_ARGS; /* Evaluate the object/method */ @@ -514,7 +513,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node, u32 count; u32 size; struct acpi_compatible_id_list *cid_list; - acpi_native_uint i; + u32 i; ACPI_FUNCTION_TRACE(ut_execute_CID); diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c index 1f057b71db1a..f34be6773556 100644 --- a/drivers/acpi/utilities/utmisc.c +++ b/drivers/acpi/utilities/utmisc.c @@ -64,7 +64,7 @@ ACPI_MODULE_NAME("utmisc") ******************************************************************************/ const char *acpi_ut_validate_exception(acpi_status status) { - acpi_status sub_status; + u32 sub_status; const char *exception = NULL; ACPI_FUNCTION_ENTRY(); @@ -85,32 +85,28 @@ const char *acpi_ut_validate_exception(acpi_status status) case AE_CODE_PROGRAMMER: if (sub_status <= AE_CODE_PGM_MAX) { - exception = - acpi_gbl_exception_names_pgm[sub_status - 1]; + exception = acpi_gbl_exception_names_pgm[sub_status]; } break; case AE_CODE_ACPI_TABLES: if (sub_status <= AE_CODE_TBL_MAX) { - exception = - acpi_gbl_exception_names_tbl[sub_status - 1]; + exception = acpi_gbl_exception_names_tbl[sub_status]; } break; case AE_CODE_AML: if (sub_status <= AE_CODE_AML_MAX) { - exception = - acpi_gbl_exception_names_aml[sub_status - 1]; + exception = acpi_gbl_exception_names_aml[sub_status]; } break; case AE_CODE_CONTROL: if (sub_status <= AE_CODE_CTRL_MAX) { - exception = - acpi_gbl_exception_names_ctrl[sub_status - 1]; + exception = acpi_gbl_exception_names_ctrl[sub_status]; } break; @@ -165,9 +161,9 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table) acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) { - acpi_native_uint i; - acpi_native_uint j; - acpi_native_uint k; + u32 i; + u32 j; + u32 k; acpi_status status; ACPI_FUNCTION_TRACE(ut_allocate_owner_id); @@ -273,7 +269,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr) { acpi_owner_id owner_id = *owner_id_ptr; acpi_status status; - acpi_native_uint index; + u32 index; u32 bit; ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id); @@ -593,7 +589,7 @@ acpi_ut_display_init_pathname(u8 type, * ******************************************************************************/ -u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position) +u8 acpi_ut_valid_acpi_char(char character, u32 position) { if (!((character >= 'A' && character <= 'Z') || @@ -628,7 +624,7 @@ u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position) u8 acpi_ut_valid_acpi_name(u32 name) { - acpi_native_uint i; + u32 i; ACPI_FUNCTION_ENTRY(); @@ -657,7 +653,7 @@ u8 acpi_ut_valid_acpi_name(u32 name) acpi_name acpi_ut_repair_name(char *name) { - acpi_native_uint i; + u32 i; char new_name[ACPI_NAME_SIZE]; for (i = 0; i < ACPI_NAME_SIZE; i++) { @@ -1024,7 +1020,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE -acpi_ut_error(char *module_name, u32 line_number, char *format, ...) +acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...) { va_list args; @@ -1037,8 +1033,8 @@ acpi_ut_error(char *module_name, u32 line_number, char *format, ...) } void ACPI_INTERNAL_VAR_XFACE -acpi_ut_exception(char *module_name, - u32 line_number, acpi_status status, char *format, ...) +acpi_ut_exception(const char *module_name, + u32 line_number, acpi_status status, const char *format, ...) { va_list args; @@ -1054,7 +1050,8 @@ acpi_ut_exception(char *module_name, EXPORT_SYMBOL(acpi_ut_exception); void ACPI_INTERNAL_VAR_XFACE -acpi_ut_warning(char *module_name, u32 line_number, char *format, ...) +acpi_ut_warning(const char *module_name, + u32 line_number, const char *format, ...) { va_list args; @@ -1067,7 +1064,7 @@ acpi_ut_warning(char *module_name, u32 line_number, char *format, ...) } void ACPI_INTERNAL_VAR_XFACE -acpi_ut_info(char *module_name, u32 line_number, char *format, ...) +acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...) { va_list args; diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c index f7d602b1a894..7331dde9e1b3 100644 --- a/drivers/acpi/utilities/utmutex.c +++ b/drivers/acpi/utilities/utmutex.c @@ -218,7 +218,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) * the mutex ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ - for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) { + for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (i == mutex_id) { ACPI_ERROR((AE_INFO, @@ -315,7 +315,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) * ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ - for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) { + for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (i == mutex_id) { continue; diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c index e68466de8044..e25484495e65 100644 --- a/drivers/acpi/utilities/utobject.c +++ b/drivers/acpi/utilities/utobject.c @@ -83,7 +83,8 @@ acpi_ut_get_element_length(u8 object_type, * ******************************************************************************/ -union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name, +union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char + *module_name, u32 line_number, u32 component_id, acpi_object_type @@ -175,8 +176,8 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count) * Create the element array. Count+1 allows the array to be null * terminated. */ - package_elements = ACPI_ALLOCATE_ZEROED((acpi_size) - (count + 1) * sizeof(void *)); + package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count + + 1) * sizeof(void *)); if (!package_elements) { acpi_ut_remove_reference(package_desc); return_PTR(NULL); @@ -347,7 +348,7 @@ u8 acpi_ut_valid_internal_object(void *object) * ******************************************************************************/ -void *acpi_ut_allocate_object_desc_dbg(char *module_name, +void *acpi_ut_allocate_object_desc_dbg(const char *module_name, u32 line_number, u32 component_id) { union acpi_operand_object *object; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index d089c4519d45..64c889331f3b 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -631,6 +631,76 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) * device : video output device (LCD, CRT, ..) * * Return Value: + * Maximum brightness level + * + * Allocate and initialize device->brightness. + */ + +static int +acpi_video_init_brightness(struct acpi_video_device *device) +{ + union acpi_object *obj = NULL; + int i, max_level = 0, count = 0; + union acpi_object *o; + struct acpi_video_device_brightness *br = NULL; + + if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available " + "LCD brightness level\n")); + goto out; + } + + if (obj->package.count < 2) + goto out; + + br = kzalloc(sizeof(*br), GFP_KERNEL); + if (!br) { + printk(KERN_ERR "can't allocate memory\n"); + goto out; + } + + br->levels = kmalloc(obj->package.count * sizeof *(br->levels), + GFP_KERNEL); + if (!br->levels) + goto out_free; + + for (i = 0; i < obj->package.count; i++) { + o = (union acpi_object *)&obj->package.elements[i]; + if (o->type != ACPI_TYPE_INTEGER) { + printk(KERN_ERR PREFIX "Invalid data\n"); + continue; + } + br->levels[count] = (u32) o->integer.value; + + if (br->levels[count] > max_level) + max_level = br->levels[count]; + count++; + } + + if (count < 2) + goto out_free_levels; + + br->count = count; + device->brightness = br; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "found %d brightness levels\n", count)); + kfree(obj); + return max_level; + +out_free_levels: + kfree(br->levels); +out_free: + kfree(br); +out: + device->brightness = NULL; + kfree(obj); + return 0; +} + +/* + * Arg: + * device : video output device (LCD, CRT, ..) + * + * Return Value: * None * * Find out all required AML methods defined under the output @@ -640,10 +710,7 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) static void acpi_video_device_find_cap(struct acpi_video_device *device) { acpi_handle h_dummy1; - int i; u32 max_level = 0; - union acpi_object *obj = NULL; - struct acpi_video_device_brightness *br = NULL; memset(&device->cap, 0, sizeof(device->cap)); @@ -672,53 +739,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) device->cap._DSS = 1; } - if (ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { - - if (obj->package.count >= 2) { - int count = 0; - union acpi_object *o; - - br = kzalloc(sizeof(*br), GFP_KERNEL); - if (!br) { - printk(KERN_ERR "can't allocate memory\n"); - } else { - br->levels = kmalloc(obj->package.count * - sizeof *(br->levels), GFP_KERNEL); - if (!br->levels) - goto out; - - for (i = 0; i < obj->package.count; i++) { - o = (union acpi_object *)&obj->package. - elements[i]; - if (o->type != ACPI_TYPE_INTEGER) { - printk(KERN_ERR PREFIX "Invalid data\n"); - continue; - } - br->levels[count] = (u32) o->integer.value; - - if (br->levels[count] > max_level) - max_level = br->levels[count]; - count++; - } - out: - if (count < 2) { - kfree(br->levels); - kfree(br); - } else { - br->count = count; - device->brightness = br; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "found %d brightness levels\n", - count)); - } - } - } - - } else { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available LCD brightness level\n")); - } - - kfree(obj); + max_level = acpi_video_init_brightness(device); if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){ int result; @@ -1695,6 +1716,8 @@ static void acpi_video_switch_brightness(struct acpi_video_device *device, int event) { unsigned long level_current, level_next; + if (!device->brightness) + return; acpi_video_device_lcd_get_level_current(device, &level_current); level_next = acpi_video_get_next_level(device, level_current, event); acpi_video_device_lcd_set_level(device, level_next); diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index bc79df6e7cb0..a9e827356d06 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -16,10 +16,10 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/libata.h> +#include <linux/of_platform.h> #include <asm/types.h> #include <asm/prom.h> -#include <asm/of_platform.h> #include <asm/mpc52xx.h> diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 911ec600fe71..3f940393d6c7 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -453,6 +453,8 @@ int platform_driver_register(struct platform_driver *drv) drv->driver.suspend = platform_drv_suspend; if (drv->resume) drv->driver.resume = platform_drv_resume; + if (drv->pm) + drv->driver.pm = &drv->pm->base; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(platform_driver_register); @@ -560,7 +562,9 @@ static int platform_match(struct device *dev, struct device_driver *drv) return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0); } -static int platform_suspend(struct device *dev, pm_message_t mesg) +#ifdef CONFIG_PM_SLEEP + +static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) { int ret = 0; @@ -570,7 +574,7 @@ static int platform_suspend(struct device *dev, pm_message_t mesg) return ret; } -static int platform_suspend_late(struct device *dev, pm_message_t mesg) +static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg) { struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_device *pdev; @@ -583,7 +587,7 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg) return ret; } -static int platform_resume_early(struct device *dev) +static int platform_legacy_resume_early(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_device *pdev; @@ -596,7 +600,7 @@ static int platform_resume_early(struct device *dev) return ret; } -static int platform_resume(struct device *dev) +static int platform_legacy_resume(struct device *dev) { int ret = 0; @@ -606,15 +610,291 @@ static int platform_resume(struct device *dev) return ret; } +static int platform_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +static void platform_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#ifdef CONFIG_SUSPEND + +static int platform_pm_suspend(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->suspend) + ret = drv->pm->suspend(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int platform_pm_suspend_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->suspend_noirq) + ret = pdrv->pm->suspend_noirq(dev); + } else { + ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int platform_pm_resume(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->resume) + ret = drv->pm->resume(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +static int platform_pm_resume_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->resume_noirq) + ret = pdrv->pm->resume_noirq(dev); + } else { + ret = platform_legacy_resume_early(dev); + } + + return ret; +} + +#else /* !CONFIG_SUSPEND */ + +#define platform_pm_suspend NULL +#define platform_pm_resume NULL +#define platform_pm_suspend_noirq NULL +#define platform_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATION + +static int platform_pm_freeze(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze) + ret = drv->pm->freeze(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_FREEZE); + } + + return ret; +} + +static int platform_pm_freeze_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->freeze_noirq) + ret = pdrv->pm->freeze_noirq(dev); + } else { + ret = platform_legacy_suspend_late(dev, PMSG_FREEZE); + } + + return ret; +} + +static int platform_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->thaw) + ret = drv->pm->thaw(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +static int platform_pm_thaw_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->thaw_noirq) + ret = pdrv->pm->thaw_noirq(dev); + } else { + ret = platform_legacy_resume_early(dev); + } + + return ret; +} + +static int platform_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->poweroff) + ret = drv->pm->poweroff(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int platform_pm_poweroff_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->poweroff_noirq) + ret = pdrv->pm->poweroff_noirq(dev); + } else { + ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int platform_pm_restore(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm) { + if (drv->pm->restore) + ret = drv->pm->restore(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +static int platform_pm_restore_noirq(struct device *dev) +{ + struct platform_driver *pdrv; + int ret = 0; + + if (!dev->driver) + return 0; + + pdrv = to_platform_driver(dev->driver); + if (pdrv->pm) { + if (pdrv->pm->restore_noirq) + ret = pdrv->pm->restore_noirq(dev); + } else { + ret = platform_legacy_resume_early(dev); + } + + return ret; +} + +#else /* !CONFIG_HIBERNATION */ + +#define platform_pm_freeze NULL +#define platform_pm_thaw NULL +#define platform_pm_poweroff NULL +#define platform_pm_restore NULL +#define platform_pm_freeze_noirq NULL +#define platform_pm_thaw_noirq NULL +#define platform_pm_poweroff_noirq NULL +#define platform_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATION */ + +struct pm_ext_ops platform_pm_ops = { + .base = { + .prepare = platform_pm_prepare, + .complete = platform_pm_complete, + .suspend = platform_pm_suspend, + .resume = platform_pm_resume, + .freeze = platform_pm_freeze, + .thaw = platform_pm_thaw, + .poweroff = platform_pm_poweroff, + .restore = platform_pm_restore, + }, + .suspend_noirq = platform_pm_suspend_noirq, + .resume_noirq = platform_pm_resume_noirq, + .freeze_noirq = platform_pm_freeze_noirq, + .thaw_noirq = platform_pm_thaw_noirq, + .poweroff_noirq = platform_pm_poweroff_noirq, + .restore_noirq = platform_pm_restore_noirq, +}; + +#define PLATFORM_PM_OPS_PTR &platform_pm_ops + +#else /* !CONFIG_PM_SLEEP */ + +#define PLATFORM_PM_OPS_PTR NULL + +#endif /* !CONFIG_PM_SLEEP */ + struct bus_type platform_bus_type = { .name = "platform", .dev_attrs = platform_dev_attrs, .match = platform_match, .uevent = platform_uevent, - .suspend = platform_suspend, - .suspend_late = platform_suspend_late, - .resume_early = platform_resume_early, - .resume = platform_resume, + .pm = PLATFORM_PM_OPS_PTR, }; EXPORT_SYMBOL_GPL(platform_bus_type); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 45cc3d9eacb8..3250c5257b74 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -12,11 +12,9 @@ * and add it to the list of power-controlled devices. sysfs entries for * controlling device power management will also be added. * - * A different set of lists than the global subsystem list are used to - * keep track of power info because we use different lists to hold - * devices based on what stage of the power management process they - * are in. The power domain dependencies may also differ from the - * ancestral dependencies that the subsystem list maintains. + * A separate list is used for keeping track of power info, because the power + * domain dependencies may differ from the ancestral dependencies that the + * subsystem list maintains. */ #include <linux/device.h> @@ -30,31 +28,40 @@ #include "power.h" /* - * The entries in the dpm_active list are in a depth first order, simply + * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and * are inserted at the back of the list on discovery. * - * All the other lists are kept in the same order, for consistency. - * However the lists aren't always traversed in the same order. - * Semaphores must be acquired from the top (i.e., front) down - * and released in the opposite order. Devices must be suspended - * from the bottom (i.e., end) up and resumed in the opposite order. - * That way no parent will be suspended while it still has an active - * child. - * * Since device_pm_add() may be called with a device semaphore held, * we must never try to acquire a device semaphore while holding * dpm_list_mutex. */ -LIST_HEAD(dpm_active); -static LIST_HEAD(dpm_off); -static LIST_HEAD(dpm_off_irq); +LIST_HEAD(dpm_list); static DEFINE_MUTEX(dpm_list_mtx); -/* 'true' if all devices have been suspended, protected by dpm_list_mtx */ -static bool all_sleeping; +/* + * Set once the preparation of devices for a PM transition has started, reset + * before starting to resume devices. Protected by dpm_list_mtx. + */ +static bool transition_started; + +/** + * device_pm_lock - lock the list of active devices used by the PM core + */ +void device_pm_lock(void) +{ + mutex_lock(&dpm_list_mtx); +} + +/** + * device_pm_unlock - unlock the list of active devices used by the PM core + */ +void device_pm_unlock(void) +{ + mutex_unlock(&dpm_list_mtx); +} /** * device_pm_add - add a device to the list of active devices @@ -68,17 +75,25 @@ int device_pm_add(struct device *dev) dev->bus ? dev->bus->name : "No Bus", kobject_name(&dev->kobj)); mutex_lock(&dpm_list_mtx); - if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { - if (dev->parent->power.sleeping) - dev_warn(dev, "parent %s is sleeping\n", + if (dev->parent) { + if (dev->parent->power.status >= DPM_SUSPENDING) { + dev_warn(dev, "parent %s is sleeping, will not add\n", dev->parent->bus_id); - else - dev_warn(dev, "all devices are sleeping\n"); + WARN_ON(true); + } + } else if (transition_started) { + /* + * We refuse to register parentless devices while a PM + * transition is in progress in order to avoid leaving them + * unhandled down the road + */ WARN_ON(true); } error = dpm_sysfs_add(dev); - if (!error) - list_add_tail(&dev->power.entry, &dpm_active); + if (!error) { + dev->power.status = DPM_ON; + list_add_tail(&dev->power.entry, &dpm_list); + } mutex_unlock(&dpm_list_mtx); return error; } @@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev) mutex_unlock(&dpm_list_mtx); } +/** + * pm_op - execute the PM operation appropiate for given PM event + * @dev: Device. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + */ +static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state) +{ + int error = 0; + + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + if (ops->suspend) { + error = ops->suspend(dev); + suspend_report_result(ops->suspend, error); + } + break; + case PM_EVENT_RESUME: + if (ops->resume) { + error = ops->resume(dev); + suspend_report_result(ops->resume, error); + } + break; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATION + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + if (ops->freeze) { + error = ops->freeze(dev); + suspend_report_result(ops->freeze, error); + } + break; + case PM_EVENT_HIBERNATE: + if (ops->poweroff) { + error = ops->poweroff(dev); + suspend_report_result(ops->poweroff, error); + } + break; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + if (ops->thaw) { + error = ops->thaw(dev); + suspend_report_result(ops->thaw, error); + } + break; + case PM_EVENT_RESTORE: + if (ops->restore) { + error = ops->restore(dev); + suspend_report_result(ops->restore, error); + } + break; +#endif /* CONFIG_HIBERNATION */ + default: + error = -EINVAL; + } + return error; +} + +/** + * pm_noirq_op - execute the PM operation appropiate for given PM event + * @dev: Device. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + * + * The operation is executed with interrupts disabled by the only remaining + * functional CPU in the system. + */ +static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops, + pm_message_t state) +{ + int error = 0; + + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + if (ops->suspend_noirq) { + error = ops->suspend_noirq(dev); + suspend_report_result(ops->suspend_noirq, error); + } + break; + case PM_EVENT_RESUME: + if (ops->resume_noirq) { + error = ops->resume_noirq(dev); + suspend_report_result(ops->resume_noirq, error); + } + break; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATION + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + if (ops->freeze_noirq) { + error = ops->freeze_noirq(dev); + suspend_report_result(ops->freeze_noirq, error); + } + break; + case PM_EVENT_HIBERNATE: + if (ops->poweroff_noirq) { + error = ops->poweroff_noirq(dev); + suspend_report_result(ops->poweroff_noirq, error); + } + break; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + if (ops->thaw_noirq) { + error = ops->thaw_noirq(dev); + suspend_report_result(ops->thaw_noirq, error); + } + break; + case PM_EVENT_RESTORE: + if (ops->restore_noirq) { + error = ops->restore_noirq(dev); + suspend_report_result(ops->restore_noirq, error); + } + break; +#endif /* CONFIG_HIBERNATION */ + default: + error = -EINVAL; + } + return error; +} + +static char *pm_verb(int event) +{ + switch (event) { + case PM_EVENT_SUSPEND: + return "suspend"; + case PM_EVENT_RESUME: + return "resume"; + case PM_EVENT_FREEZE: + return "freeze"; + case PM_EVENT_QUIESCE: + return "quiesce"; + case PM_EVENT_HIBERNATE: + return "hibernate"; + case PM_EVENT_THAW: + return "thaw"; + case PM_EVENT_RESTORE: + return "restore"; + case PM_EVENT_RECOVER: + return "recover"; + default: + return "(unknown PM event)"; + } +} + +static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) +{ + dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), + ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? + ", may wakeup" : ""); +} + +static void pm_dev_err(struct device *dev, pm_message_t state, char *info, + int error) +{ + printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", + kobject_name(&dev->kobj), pm_verb(state.event), info, error); +} + /*------------------------- Resume routines -------------------------*/ /** - * resume_device_early - Power on one device (early resume). + * resume_device_noirq - Power on one device (early resume). * @dev: Device. + * @state: PM transition of the system being carried out. * * Must be called with interrupts disabled. */ -static int resume_device_early(struct device *dev) +static int resume_device_noirq(struct device *dev, pm_message_t state) { int error = 0; TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->bus && dev->bus->resume_early) { - dev_dbg(dev, "EARLY resume\n"); + if (!dev->bus) + goto End; + + if (dev->bus->pm) { + pm_dev_dbg(dev, state, "EARLY "); + error = pm_noirq_op(dev, dev->bus->pm, state); + } else if (dev->bus->resume_early) { + pm_dev_dbg(dev, state, "legacy EARLY "); error = dev->bus->resume_early(dev); } - + End: TRACE_RESUME(error); return error; } /** * dpm_power_up - Power on all regular (non-sysdev) devices. + * @state: PM transition of the system being carried out. * - * Walk the dpm_off_irq list and power each device up. This - * is used for devices that required they be powered down with - * interrupts disabled. As devices are powered on, they are moved - * to the dpm_off list. + * Execute the appropriate "noirq resume" callback for all devices marked + * as DPM_OFF_IRQ. * * Must be called with interrupts disabled and only one CPU running. */ -static void dpm_power_up(void) +static void dpm_power_up(pm_message_t state) { + struct device *dev; - while (!list_empty(&dpm_off_irq)) { - struct list_head *entry = dpm_off_irq.next; - struct device *dev = to_device(entry); + list_for_each_entry(dev, &dpm_list, power.entry) + if (dev->power.status > DPM_OFF) { + int error; - list_move_tail(entry, &dpm_off); - resume_device_early(dev); - } + dev->power.status = DPM_OFF; + error = resume_device_noirq(dev, state); + if (error) + pm_dev_err(dev, state, " early", error); + } } /** * device_power_up - Turn on all devices that need special attention. + * @state: PM transition of the system being carried out. * * Power on system devices, then devices that required we shut them down * with interrupts disabled. * * Must be called with interrupts disabled. */ -void device_power_up(void) +void device_power_up(pm_message_t state) { sysdev_resume(); - dpm_power_up(); + dpm_power_up(state); } EXPORT_SYMBOL_GPL(device_power_up); /** * resume_device - Restore state for one device. * @dev: Device. - * + * @state: PM transition of the system being carried out. */ -static int resume_device(struct device *dev) +static int resume_device(struct device *dev, pm_message_t state) { int error = 0; @@ -175,21 +360,40 @@ static int resume_device(struct device *dev) down(&dev->sem); - if (dev->bus && dev->bus->resume) { - dev_dbg(dev,"resuming\n"); - error = dev->bus->resume(dev); + if (dev->bus) { + if (dev->bus->pm) { + pm_dev_dbg(dev, state, ""); + error = pm_op(dev, &dev->bus->pm->base, state); + } else if (dev->bus->resume) { + pm_dev_dbg(dev, state, "legacy "); + error = dev->bus->resume(dev); + } + if (error) + goto End; } - if (!error && dev->type && dev->type->resume) { - dev_dbg(dev,"resuming\n"); - error = dev->type->resume(dev); + if (dev->type) { + if (dev->type->pm) { + pm_dev_dbg(dev, state, "type "); + error = pm_op(dev, dev->type->pm, state); + } else if (dev->type->resume) { + pm_dev_dbg(dev, state, "legacy type "); + error = dev->type->resume(dev); + } + if (error) + goto End; } - if (!error && dev->class && dev->class->resume) { - dev_dbg(dev,"class resume\n"); - error = dev->class->resume(dev); + if (dev->class) { + if (dev->class->pm) { + pm_dev_dbg(dev, state, "class "); + error = pm_op(dev, dev->class->pm, state); + } else if (dev->class->resume) { + pm_dev_dbg(dev, state, "legacy class "); + error = dev->class->resume(dev); + } } - + End: up(&dev->sem); TRACE_RESUME(error); @@ -198,78 +402,161 @@ static int resume_device(struct device *dev) /** * dpm_resume - Resume every device. + * @state: PM transition of the system being carried out. * - * Resume the devices that have either not gone through - * the late suspend, or that did go through it but also - * went through the early resume. + * Execute the appropriate "resume" callback for all devices the status of + * which indicates that they are inactive. + */ +static void dpm_resume(pm_message_t state) +{ + struct list_head list; + + INIT_LIST_HEAD(&list); + mutex_lock(&dpm_list_mtx); + transition_started = false; + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.next); + + get_device(dev); + if (dev->power.status >= DPM_OFF) { + int error; + + dev->power.status = DPM_RESUMING; + mutex_unlock(&dpm_list_mtx); + + error = resume_device(dev, state); + + mutex_lock(&dpm_list_mtx); + if (error) + pm_dev_err(dev, state, "", error); + } else if (dev->power.status == DPM_SUSPENDING) { + /* Allow new children of the device to be registered */ + dev->power.status = DPM_RESUMING; + } + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &list); + put_device(dev); + } + list_splice(&list, &dpm_list); + mutex_unlock(&dpm_list_mtx); +} + +/** + * complete_device - Complete a PM transition for given device + * @dev: Device. + * @state: PM transition of the system being carried out. + */ +static void complete_device(struct device *dev, pm_message_t state) +{ + down(&dev->sem); + + if (dev->class && dev->class->pm && dev->class->pm->complete) { + pm_dev_dbg(dev, state, "completing class "); + dev->class->pm->complete(dev); + } + + if (dev->type && dev->type->pm && dev->type->pm->complete) { + pm_dev_dbg(dev, state, "completing type "); + dev->type->pm->complete(dev); + } + + if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) { + pm_dev_dbg(dev, state, "completing "); + dev->bus->pm->base.complete(dev); + } + + up(&dev->sem); +} + +/** + * dpm_complete - Complete a PM transition for all devices. + * @state: PM transition of the system being carried out. * - * Take devices from the dpm_off_list, resume them, - * and put them on the dpm_locked list. + * Execute the ->complete() callbacks for all devices that are not marked + * as DPM_ON. */ -static void dpm_resume(void) +static void dpm_complete(pm_message_t state) { + struct list_head list; + + INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); - all_sleeping = false; - while(!list_empty(&dpm_off)) { - struct list_head *entry = dpm_off.next; - struct device *dev = to_device(entry); + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.prev); - list_move_tail(entry, &dpm_active); - dev->power.sleeping = false; - mutex_unlock(&dpm_list_mtx); - resume_device(dev); - mutex_lock(&dpm_list_mtx); + get_device(dev); + if (dev->power.status > DPM_ON) { + dev->power.status = DPM_ON; + mutex_unlock(&dpm_list_mtx); + + complete_device(dev, state); + + mutex_lock(&dpm_list_mtx); + } + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &list); + put_device(dev); } + list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); } /** * device_resume - Restore state of each device in system. + * @state: PM transition of the system being carried out. * * Resume all the devices, unlock them all, and allow new * devices to be registered once again. */ -void device_resume(void) +void device_resume(pm_message_t state) { might_sleep(); - dpm_resume(); + dpm_resume(state); + dpm_complete(state); } EXPORT_SYMBOL_GPL(device_resume); /*------------------------- Suspend routines -------------------------*/ -static inline char *suspend_verb(u32 event) +/** + * resume_event - return a PM message representing the resume event + * corresponding to given sleep state. + * @sleep_state: PM message representing a sleep state. + */ +static pm_message_t resume_event(pm_message_t sleep_state) { - switch (event) { - case PM_EVENT_SUSPEND: return "suspend"; - case PM_EVENT_FREEZE: return "freeze"; - case PM_EVENT_PRETHAW: return "prethaw"; - default: return "(unknown suspend event)"; + switch (sleep_state.event) { + case PM_EVENT_SUSPEND: + return PMSG_RESUME; + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return PMSG_RECOVER; + case PM_EVENT_HIBERNATE: + return PMSG_RESTORE; } -} - -static void -suspend_device_dbg(struct device *dev, pm_message_t state, char *info) -{ - dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event), - ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ? - ", may wakeup" : ""); + return PMSG_ON; } /** - * suspend_device_late - Shut down one device (late suspend). + * suspend_device_noirq - Shut down one device (late suspend). * @dev: Device. - * @state: Power state device is entering. + * @state: PM transition of the system being carried out. * * This is called with interrupts off and only a single CPU running. */ -static int suspend_device_late(struct device *dev, pm_message_t state) +static int suspend_device_noirq(struct device *dev, pm_message_t state) { int error = 0; - if (dev->bus && dev->bus->suspend_late) { - suspend_device_dbg(dev, state, "LATE "); + if (!dev->bus) + return 0; + + if (dev->bus->pm) { + pm_dev_dbg(dev, state, "LATE "); + error = pm_noirq_op(dev, dev->bus->pm, state); + } else if (dev->bus->suspend_late) { + pm_dev_dbg(dev, state, "legacy LATE "); error = dev->bus->suspend_late(dev, state); suspend_report_result(dev->bus->suspend_late, error); } @@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state) /** * device_power_down - Shut down special devices. - * @state: Power state to enter. + * @state: PM transition of the system being carried out. * - * Power down devices that require interrupts to be disabled - * and move them from the dpm_off list to the dpm_off_irq list. + * Power down devices that require interrupts to be disabled. * Then power down system devices. * * Must be called with interrupts disabled and only one CPU running. */ int device_power_down(pm_message_t state) { + struct device *dev; int error = 0; - while (!list_empty(&dpm_off)) { - struct list_head *entry = dpm_off.prev; - struct device *dev = to_device(entry); - - error = suspend_device_late(dev, state); + list_for_each_entry_reverse(dev, &dpm_list, power.entry) { + error = suspend_device_noirq(dev, state); if (error) { - printk(KERN_ERR "Could not power down device %s: " - "error %d\n", - kobject_name(&dev->kobj), error); + pm_dev_err(dev, state, " late", error); break; } - if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_off_irq); + dev->power.status = DPM_OFF_IRQ; } - if (!error) error = sysdev_suspend(state); if (error) - dpm_power_up(); + dpm_power_up(resume_event(state)); return error; } EXPORT_SYMBOL_GPL(device_power_down); @@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down); /** * suspend_device - Save state of one device. * @dev: Device. - * @state: Power state device is entering. + * @state: PM transition of the system being carried out. */ static int suspend_device(struct device *dev, pm_message_t state) { @@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state) down(&dev->sem); - if (dev->class && dev->class->suspend) { - suspend_device_dbg(dev, state, "class "); - error = dev->class->suspend(dev, state); - suspend_report_result(dev->class->suspend, error); + if (dev->class) { + if (dev->class->pm) { + pm_dev_dbg(dev, state, "class "); + error = pm_op(dev, dev->class->pm, state); + } else if (dev->class->suspend) { + pm_dev_dbg(dev, state, "legacy class "); + error = dev->class->suspend(dev, state); + suspend_report_result(dev->class->suspend, error); + } + if (error) + goto End; } - if (!error && dev->type && dev->type->suspend) { - suspend_device_dbg(dev, state, "type "); - error = dev->type->suspend(dev, state); - suspend_report_result(dev->type->suspend, error); + if (dev->type) { + if (dev->type->pm) { + pm_dev_dbg(dev, state, "type "); + error = pm_op(dev, dev->type->pm, state); + } else if (dev->type->suspend) { + pm_dev_dbg(dev, state, "legacy type "); + error = dev->type->suspend(dev, state); + suspend_report_result(dev->type->suspend, error); + } + if (error) + goto End; } - if (!error && dev->bus && dev->bus->suspend) { - suspend_device_dbg(dev, state, ""); - error = dev->bus->suspend(dev, state); - suspend_report_result(dev->bus->suspend, error); + if (dev->bus) { + if (dev->bus->pm) { + pm_dev_dbg(dev, state, ""); + error = pm_op(dev, &dev->bus->pm->base, state); + } else if (dev->bus->suspend) { + pm_dev_dbg(dev, state, "legacy "); + error = dev->bus->suspend(dev, state); + suspend_report_result(dev->bus->suspend, error); + } } - + End: up(&dev->sem); return error; @@ -349,67 +648,139 @@ static int suspend_device(struct device *dev, pm_message_t state) /** * dpm_suspend - Suspend every device. - * @state: Power state to put each device in. - * - * Walk the dpm_locked list. Suspend each device and move it - * to the dpm_off list. + * @state: PM transition of the system being carried out. * - * (For historical reasons, if it returns -EAGAIN, that used to mean - * that the device would be called again with interrupts disabled. - * These days, we use the "suspend_late()" callback for that, so we - * print a warning and consider it an error). + * Execute the appropriate "suspend" callbacks for all devices. */ static int dpm_suspend(pm_message_t state) { + struct list_head list; int error = 0; + INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_active)) { - struct list_head *entry = dpm_active.prev; - struct device *dev = to_device(entry); + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.prev); - WARN_ON(dev->parent && dev->parent->power.sleeping); - - dev->power.sleeping = true; + get_device(dev); mutex_unlock(&dpm_list_mtx); + error = suspend_device(dev, state); + mutex_lock(&dpm_list_mtx); if (error) { - printk(KERN_ERR "Could not suspend device %s: " - "error %d%s\n", - kobject_name(&dev->kobj), - error, - (error == -EAGAIN ? - " (please convert to suspend_late)" : - "")); - dev->power.sleeping = false; + pm_dev_err(dev, state, "", error); + put_device(dev); break; } + dev->power.status = DPM_OFF; if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_off); + list_move(&dev->power.entry, &list); + put_device(dev); } - if (!error) - all_sleeping = true; + list_splice(&list, dpm_list.prev); mutex_unlock(&dpm_list_mtx); + return error; +} + +/** + * prepare_device - Execute the ->prepare() callback(s) for given device. + * @dev: Device. + * @state: PM transition of the system being carried out. + */ +static int prepare_device(struct device *dev, pm_message_t state) +{ + int error = 0; + + down(&dev->sem); + + if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) { + pm_dev_dbg(dev, state, "preparing "); + error = dev->bus->pm->base.prepare(dev); + suspend_report_result(dev->bus->pm->base.prepare, error); + if (error) + goto End; + } + + if (dev->type && dev->type->pm && dev->type->pm->prepare) { + pm_dev_dbg(dev, state, "preparing type "); + error = dev->type->pm->prepare(dev); + suspend_report_result(dev->type->pm->prepare, error); + if (error) + goto End; + } + + if (dev->class && dev->class->pm && dev->class->pm->prepare) { + pm_dev_dbg(dev, state, "preparing class "); + error = dev->class->pm->prepare(dev); + suspend_report_result(dev->class->pm->prepare, error); + } + End: + up(&dev->sem); + + return error; +} + +/** + * dpm_prepare - Prepare all devices for a PM transition. + * @state: PM transition of the system being carried out. + * + * Execute the ->prepare() callback for all devices. + */ +static int dpm_prepare(pm_message_t state) +{ + struct list_head list; + int error = 0; + + INIT_LIST_HEAD(&list); + mutex_lock(&dpm_list_mtx); + transition_started = true; + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.next); + + get_device(dev); + dev->power.status = DPM_PREPARING; + mutex_unlock(&dpm_list_mtx); + error = prepare_device(dev, state); + + mutex_lock(&dpm_list_mtx); + if (error) { + dev->power.status = DPM_ON; + if (error == -EAGAIN) { + put_device(dev); + continue; + } + printk(KERN_ERR "PM: Failed to prepare device %s " + "for power transition: error %d\n", + kobject_name(&dev->kobj), error); + put_device(dev); + break; + } + dev->power.status = DPM_SUSPENDING; + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &list); + put_device(dev); + } + list_splice(&list, &dpm_list); + mutex_unlock(&dpm_list_mtx); return error; } /** * device_suspend - Save state and stop all devices in system. - * @state: new power management state + * @state: PM transition of the system being carried out. * - * Prevent new devices from being registered, then lock all devices - * and suspend them. + * Prepare and suspend all devices. */ int device_suspend(pm_message_t state) { int error; might_sleep(); - error = dpm_suspend(state); - if (error) - device_resume(); + error = dpm_prepare(state); + if (!error) + error = dpm_suspend(state); return error; } EXPORT_SYMBOL_GPL(device_suspend); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index a6894f2a4b99..a3252c0e2887 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -4,7 +4,7 @@ * main.c */ -extern struct list_head dpm_active; /* The active device list */ +extern struct list_head dpm_list; /* The active device list */ static inline struct device *to_device(struct list_head *entry) { diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d11f74b038db..596aeecfdffe 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -6,9 +6,6 @@ #include <linux/string.h> #include "power.h" -int (*platform_enable_wakeup)(struct device *dev, int is_on); - - /* * wakeup - Report/change current wakeup option for device * diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 87a7f1d02578..9b1b20b59e0a 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value) static int show_dev_hash(unsigned int value) { int match = 0; - struct list_head * entry = dpm_active.prev; + struct list_head *entry = dpm_list.prev; - while (entry != &dpm_active) { + while (entry != &dpm_list) { struct device * dev = to_device(entry); unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); if (hash == value) { diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 0d1d2133d9bc..61ad8d639ba3 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -433,4 +433,16 @@ config VIRTIO_BLK This is the virtual block driver for virtio. It can be used with lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. +config BLK_DEV_HD + bool "Very old hard disk (MFM/RLL/IDE) driver" + depends on HAVE_IDE + depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN + help + This is a very old hard disk driver that lacks the enhanced + functionality of the newer ones. + + It is required for systems with ancient MFM/RLL/ESDI drives. + + If unsure, say N. + endif # BLK_DEV diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 5e584306be99..204332b29578 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -29,5 +29,6 @@ obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o obj-$(CONFIG_VIODASD) += viodasd.o obj-$(CONFIG_BLK_DEV_SX8) += sx8.o obj-$(CONFIG_BLK_DEV_UB) += ub.o +obj-$(CONFIG_BLK_DEV_HD) += hd.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o diff --git a/drivers/ide/legacy/hd.c b/drivers/block/hd.c index abdedf56643e..682243bf2e46 100644 --- a/drivers/ide/legacy/hd.c +++ b/drivers/block/hd.c @@ -37,7 +37,6 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/ioport.h> -#include <linux/mc146818rtc.h> /* CMOS defines */ #include <linux/init.h> #include <linux/blkpg.h> #include <linux/hdreg.h> @@ -812,4 +811,4 @@ static int __init parse_hd_setup(char *line) } __setup("hd=", parse_hd_setup); -module_init(hd_init); +late_initcall(hd_init); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 2d854bb9373e..650e6b44ce65 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -649,6 +649,14 @@ config HVCS which will also be compiled when this driver is built as a module. +config IBM_BSR + tristate "IBM POWER Barrier Synchronization Register support" + depends on PPC_PSERIES + help + This devices exposes a hardware mechanism for fast synchronization + of threads across a large system which avoids bouncing a cacheline + between several cores on a system + source "drivers/char/ipmi/Kconfig" config DS1620 diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 81630a68475c..0e0d12a06462 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_MMTIMER) += mmtimer.o obj-$(CONFIG_VIOCONS) += viocons.o obj-$(CONFIG_VIOTAPE) += viotape.o obj-$(CONFIG_HVCS) += hvcs.o +obj-$(CONFIG_IBM_BSR) += bsr.o obj-$(CONFIG_SGI_MBCS) += mbcs.o obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o obj-$(CONFIG_BFIN_OTP) += bfin-otp.o diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index da8a1658a273..aaca40283be9 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -59,6 +59,55 @@ struct apm_queue { }; /* + * thread states (for threads using a writable /dev/apm_bios fd): + * + * SUSPEND_NONE: nothing happening + * SUSPEND_PENDING: suspend event queued for thread and pending to be read + * SUSPEND_READ: suspend event read, pending acknowledgement + * SUSPEND_ACKED: acknowledgement received from thread (via ioctl), + * waiting for resume + * SUSPEND_ACKTO: acknowledgement timeout + * SUSPEND_DONE: thread had acked suspend and is now notified of + * resume + * + * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume + * + * A thread migrates in one of three paths: + * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE + * -6-> ACKTO -7-> NONE + * NONE -8-> WAIT -9-> NONE + * + * While in PENDING or READ, the thread is accounted for in the + * suspend_acks_pending counter. + * + * The transitions are invoked as follows: + * 1: suspend event is signalled from the core PM code + * 2: the suspend event is read from the fd by the userspace thread + * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack) + * 4: core PM code signals that we have resumed + * 5: APM_IOC_SUSPEND ioctl returns + * + * 6: the notifier invoked from the core PM code timed out waiting + * for all relevant threds to enter ACKED state and puts those + * that haven't into ACKTO + * 7: those threads issue APM_IOC_SUSPEND ioctl too late, + * get an error + * + * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend), + * ioctl code invokes pm_suspend() + * 9: pm_suspend() returns indicating resume + */ +enum apm_suspend_state { + SUSPEND_NONE, + SUSPEND_PENDING, + SUSPEND_READ, + SUSPEND_ACKED, + SUSPEND_ACKTO, + SUSPEND_WAIT, + SUSPEND_DONE, +}; + +/* * The per-file APM data */ struct apm_user { @@ -69,13 +118,7 @@ struct apm_user { unsigned int reader: 1; int suspend_result; - unsigned int suspend_state; -#define SUSPEND_NONE 0 /* no suspend pending */ -#define SUSPEND_PENDING 1 /* suspend pending read */ -#define SUSPEND_READ 2 /* suspend read, pending ack */ -#define SUSPEND_ACKED 3 /* suspend acked */ -#define SUSPEND_WAIT 4 /* waiting for suspend */ -#define SUSPEND_DONE 5 /* suspend completed */ + enum apm_suspend_state suspend_state; struct apm_queue queue; }; @@ -83,7 +126,8 @@ struct apm_user { /* * Local variables */ -static int suspends_pending; +static atomic_t suspend_acks_pending = ATOMIC_INIT(0); +static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0); static int apm_disabled; static struct task_struct *kapmd_tsk; @@ -166,78 +210,6 @@ static void queue_event(apm_event_t event) wake_up_interruptible(&apm_waitqueue); } -/* - * queue_suspend_event - queue an APM suspend event. - * - * Check that we're in a state where we can suspend. If not, - * return -EBUSY. Otherwise, queue an event to all "writer" - * users. If there are no "writer" users, return '1' to - * indicate that we can immediately suspend. - */ -static int queue_suspend_event(apm_event_t event, struct apm_user *sender) -{ - struct apm_user *as; - int ret = 1; - - mutex_lock(&state_lock); - down_read(&user_list_lock); - - /* - * If a thread is still processing, we can't suspend, so reject - * the request. - */ - list_for_each_entry(as, &apm_user_list, list) { - if (as != sender && as->reader && as->writer && as->suser && - as->suspend_state != SUSPEND_NONE) { - ret = -EBUSY; - goto out; - } - } - - list_for_each_entry(as, &apm_user_list, list) { - if (as != sender && as->reader && as->writer && as->suser) { - as->suspend_state = SUSPEND_PENDING; - suspends_pending++; - queue_add_event(&as->queue, event); - ret = 0; - } - } - out: - up_read(&user_list_lock); - mutex_unlock(&state_lock); - wake_up_interruptible(&apm_waitqueue); - return ret; -} - -static void apm_suspend(void) -{ - struct apm_user *as; - int err = pm_suspend(PM_SUSPEND_MEM); - - /* - * Anyone on the APM queues will think we're still suspended. - * Send a message so everyone knows we're now awake again. - */ - queue_event(APM_NORMAL_RESUME); - - /* - * Finally, wake up anyone who is sleeping on the suspend. - */ - mutex_lock(&state_lock); - down_read(&user_list_lock); - list_for_each_entry(as, &apm_user_list, list) { - if (as->suspend_state == SUSPEND_WAIT || - as->suspend_state == SUSPEND_ACKED) { - as->suspend_result = err; - as->suspend_state = SUSPEND_DONE; - } - } - up_read(&user_list_lock); - mutex_unlock(&state_lock); - - wake_up(&apm_suspend_waitqueue); -} - static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) { struct apm_user *as = fp->private_data; @@ -308,25 +280,22 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) as->suspend_result = -EINTR; - if (as->suspend_state == SUSPEND_READ) { - int pending; - + switch (as->suspend_state) { + case SUSPEND_READ: /* * If we read a suspend command from /dev/apm_bios, * then the corresponding APM_IOC_SUSPEND ioctl is * interpreted as an acknowledge. */ as->suspend_state = SUSPEND_ACKED; - suspends_pending--; - pending = suspends_pending == 0; + atomic_dec(&suspend_acks_pending); mutex_unlock(&state_lock); /* - * If there are no further acknowledges required, - * suspend the system. + * suspend_acks_pending changed, the notifier needs to + * be woken up for this */ - if (pending) - apm_suspend(); + wake_up(&apm_suspend_waitqueue); /* * Wait for the suspend/resume to complete. If there @@ -342,35 +311,21 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) * try_to_freeze() in freezer_count() will not trigger */ freezer_count(); - } else { + break; + case SUSPEND_ACKTO: + as->suspend_result = -ETIMEDOUT; + mutex_unlock(&state_lock); + break; + default: as->suspend_state = SUSPEND_WAIT; mutex_unlock(&state_lock); /* * Otherwise it is a request to suspend the system. - * Queue an event for all readers, and expect an - * acknowledge from all writers who haven't already - * acknowledged. - */ - err = queue_suspend_event(APM_USER_SUSPEND, as); - if (err < 0) { - /* - * Avoid taking the lock here - this - * should be fine. - */ - as->suspend_state = SUSPEND_NONE; - break; - } - - if (err > 0) - apm_suspend(); - - /* - * Wait for the suspend/resume to complete. If there - * are pending acknowledges, we wait here for them. + * Just invoke pm_suspend(), we'll handle it from + * there via the notifier. */ - wait_event_freezable(apm_suspend_waitqueue, - as->suspend_state == SUSPEND_DONE); + as->suspend_result = pm_suspend(PM_SUSPEND_MEM); } mutex_lock(&state_lock); @@ -386,7 +341,6 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) static int apm_release(struct inode * inode, struct file * filp) { struct apm_user *as = filp->private_data; - int pending = 0; filp->private_data = NULL; @@ -396,18 +350,15 @@ static int apm_release(struct inode * inode, struct file * filp) /* * We are now unhooked from the chain. As far as new - * events are concerned, we no longer exist. However, we - * need to balance suspends_pending, which means the - * possibility of sleeping. + * events are concerned, we no longer exist. */ mutex_lock(&state_lock); - if (as->suspend_state != SUSPEND_NONE) { - suspends_pending -= 1; - pending = suspends_pending == 0; - } + if (as->suspend_state == SUSPEND_PENDING || + as->suspend_state == SUSPEND_READ) + atomic_dec(&suspend_acks_pending); mutex_unlock(&state_lock); - if (pending) - apm_suspend(); + + wake_up(&apm_suspend_waitqueue); kfree(as); return 0; @@ -545,7 +496,6 @@ static int kapmd(void *arg) { do { apm_event_t event; - int ret; wait_event_interruptible(kapmd_wait, !queue_empty(&kapmd_queue) || kthread_should_stop()); @@ -570,20 +520,13 @@ static int kapmd(void *arg) case APM_USER_SUSPEND: case APM_SYS_SUSPEND: - ret = queue_suspend_event(event, NULL); - if (ret < 0) { - /* - * We were busy. Try again in 50ms. - */ - queue_add_event(&kapmd_queue, event); - msleep(50); - } - if (ret > 0) - apm_suspend(); + pm_suspend(PM_SUSPEND_MEM); break; case APM_CRITICAL_SUSPEND: - apm_suspend(); + atomic_inc(&userspace_notification_inhibit); + pm_suspend(PM_SUSPEND_MEM); + atomic_dec(&userspace_notification_inhibit); break; } } while (1); @@ -591,6 +534,120 @@ static int kapmd(void *arg) return 0; } +static int apm_suspend_notifier(struct notifier_block *nb, + unsigned long event, + void *dummy) +{ + struct apm_user *as; + int err; + + /* short-cut emergency suspends */ + if (atomic_read(&userspace_notification_inhibit)) + return NOTIFY_DONE; + + switch (event) { + case PM_SUSPEND_PREPARE: + /* + * Queue an event to all "writer" users that we want + * to suspend and need their ack. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state != SUSPEND_WAIT && as->reader && + as->writer && as->suser) { + as->suspend_state = SUSPEND_PENDING; + atomic_inc(&suspend_acks_pending); + queue_add_event(&as->queue, APM_USER_SUSPEND); + } + } + + up_read(&user_list_lock); + mutex_unlock(&state_lock); + wake_up_interruptible(&apm_waitqueue); + + /* + * Wait for the the suspend_acks_pending variable to drop to + * zero, meaning everybody acked the suspend event (or the + * process was killed.) + * + * If the app won't answer within a short while we assume it + * locked up and ignore it. + */ + err = wait_event_interruptible_timeout( + apm_suspend_waitqueue, + atomic_read(&suspend_acks_pending) == 0, + 5*HZ); + + /* timed out */ + if (err == 0) { + /* + * Move anybody who timed out to "ack timeout" state. + * + * We could time out and the userspace does the ACK + * right after we time out but before we enter the + * locked section here, but that's fine. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state == SUSPEND_PENDING || + as->suspend_state == SUSPEND_READ) { + as->suspend_state = SUSPEND_ACKTO; + atomic_dec(&suspend_acks_pending); + } + } + up_read(&user_list_lock); + mutex_unlock(&state_lock); + } + + /* let suspend proceed */ + if (err >= 0) + return NOTIFY_OK; + + /* interrupted by signal */ + return NOTIFY_BAD; + + case PM_POST_SUSPEND: + /* + * Anyone on the APM queues will think we're still suspended. + * Send a message so everyone knows we're now awake again. + */ + queue_event(APM_NORMAL_RESUME); + + /* + * Finally, wake up anyone who is sleeping on the suspend. + */ + mutex_lock(&state_lock); + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->suspend_state == SUSPEND_ACKED) { + /* + * TODO: maybe grab error code, needs core + * changes to push the error to the notifier + * chain (could use the second parameter if + * implemented) + */ + as->suspend_result = 0; + as->suspend_state = SUSPEND_DONE; + } + } + up_read(&user_list_lock); + mutex_unlock(&state_lock); + + wake_up(&apm_suspend_waitqueue); + return NOTIFY_OK; + + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block apm_notif_block = { + .notifier_call = apm_suspend_notifier, +}; + static int __init apm_init(void) { int ret; @@ -604,7 +661,7 @@ static int __init apm_init(void) if (IS_ERR(kapmd_tsk)) { ret = PTR_ERR(kapmd_tsk); kapmd_tsk = NULL; - return ret; + goto out; } wake_up_process(kapmd_tsk); @@ -613,16 +670,27 @@ static int __init apm_init(void) #endif ret = misc_register(&apm_device); - if (ret != 0) { - remove_proc_entry("apm", NULL); - kthread_stop(kapmd_tsk); - } + if (ret) + goto out_stop; + ret = register_pm_notifier(&apm_notif_block); + if (ret) + goto out_unregister; + + return 0; + + out_unregister: + misc_deregister(&apm_device); + out_stop: + remove_proc_entry("apm", NULL); + kthread_stop(kapmd_tsk); + out: return ret; } static void __exit apm_exit(void) { + unregister_pm_notifier(&apm_notif_block); misc_deregister(&apm_device); remove_proc_entry("apm", NULL); diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c new file mode 100644 index 000000000000..b650b4e48e50 --- /dev/null +++ b/drivers/char/bsr.c @@ -0,0 +1,312 @@ +/* IBM POWER Barrier Synchronization Register Driver + * + * Copyright IBM Corporation 2008 + * + * Author: Sonny Rao <sonnyrao@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/module.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <asm/io.h> + +/* + This driver exposes a special register which can be used for fast + synchronization across a large SMP machine. The hardware is exposed + as an array of bytes where each process will write to one of the bytes to + indicate it has finished the current stage and this update is broadcast to + all processors without having to bounce a cacheline between them. In + POWER5 and POWER6 there is one of these registers per SMP, but it is + presented in two forms; first, it is given as a whole and then as a number + of smaller registers which alias to parts of the single whole register. + This can potentially allow multiple groups of processes to each have their + own private synchronization device. + + Note that this hardware *must* be written to using *only* single byte writes. + It may be read using 1, 2, 4, or 8 byte loads which must be aligned since + this region is treated as cache-inhibited processes should also use a + full sync before and after writing to the BSR to ensure all stores and + the BSR update have made it to all chips in the system +*/ + +/* This is arbitrary number, up to Power6 it's been 17 or fewer */ +#define BSR_MAX_DEVS (32) + +struct bsr_dev { + u64 bsr_addr; /* Real address */ + u64 bsr_len; /* length of mem region we can map */ + unsigned bsr_bytes; /* size of the BSR reg itself */ + unsigned bsr_stride; /* interval at which BSR repeats in the page */ + unsigned bsr_type; /* maps to enum below */ + unsigned bsr_num; /* bsr id number for its type */ + int bsr_minor; + + dev_t bsr_dev; + struct cdev bsr_cdev; + struct device *bsr_device; + char bsr_name[32]; + +}; + +static unsigned num_bsr_devs; +static struct bsr_dev *bsr_devs; +static struct class *bsr_class; +static int bsr_major; + +enum { + BSR_8 = 0, + BSR_16 = 1, + BSR_64 = 2, + BSR_128 = 3, + BSR_UNKNOWN = 4, + BSR_MAX = 5, +}; + +static unsigned bsr_types[BSR_MAX]; + +static ssize_t +bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct bsr_dev *bsr_dev = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", bsr_dev->bsr_bytes); +} + +static ssize_t +bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct bsr_dev *bsr_dev = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", bsr_dev->bsr_stride); +} + +static ssize_t +bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct bsr_dev *bsr_dev = dev_get_drvdata(dev); + return sprintf(buf, "%lu\n", bsr_dev->bsr_len); +} + +static struct device_attribute bsr_dev_attrs[] = { + __ATTR(bsr_size, S_IRUGO, bsr_size_show, NULL), + __ATTR(bsr_stride, S_IRUGO, bsr_stride_show, NULL), + __ATTR(bsr_length, S_IRUGO, bsr_len_show, NULL), + __ATTR_NULL +}; + +static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long size = vma->vm_end - vma->vm_start; + struct bsr_dev *dev = filp->private_data; + + if (size > dev->bsr_len || (size & (PAGE_SIZE-1))) + return -EINVAL; + + vma->vm_flags |= (VM_IO | VM_DONTEXPAND); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT, + size, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static int bsr_open(struct inode * inode, struct file * filp) +{ + struct cdev *cdev = inode->i_cdev; + struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev); + + filp->private_data = dev; + return 0; +} + +const static struct file_operations bsr_fops = { + .owner = THIS_MODULE, + .mmap = bsr_mmap, + .open = bsr_open, +}; + +static void bsr_cleanup_devs(void) +{ + int i; + for (i=0 ; i < num_bsr_devs; i++) { + struct bsr_dev *cur = bsr_devs + i; + if (cur->bsr_device) { + cdev_del(&cur->bsr_cdev); + device_del(cur->bsr_device); + } + } + + kfree(bsr_devs); +} + +static int bsr_create_devs(struct device_node *bn) +{ + int bsr_stride_len, bsr_bytes_len; + const u32 *bsr_stride; + const u32 *bsr_bytes; + unsigned i; + + bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len); + bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len); + + if (!bsr_stride || !bsr_bytes || + (bsr_stride_len != bsr_bytes_len)) { + printk(KERN_ERR "bsr of-node has missing/incorrect property\n"); + return -ENODEV; + } + + num_bsr_devs = bsr_bytes_len / sizeof(u32); + + /* only a warning, its informational since we'll fail and exit */ + WARN_ON(num_bsr_devs > BSR_MAX_DEVS); + + bsr_devs = kzalloc(sizeof(struct bsr_dev) * num_bsr_devs, GFP_KERNEL); + if (!bsr_devs) + return -ENOMEM; + + for (i = 0 ; i < num_bsr_devs; i++) { + struct bsr_dev *cur = bsr_devs + i; + struct resource res; + int result; + + result = of_address_to_resource(bn, i, &res); + if (result < 0) { + printk(KERN_ERR "bsr of-node has invalid reg property\n"); + goto out_err; + } + + cur->bsr_minor = i; + cur->bsr_addr = res.start; + cur->bsr_len = res.end - res.start + 1; + cur->bsr_bytes = bsr_bytes[i]; + cur->bsr_stride = bsr_stride[i]; + cur->bsr_dev = MKDEV(bsr_major, i); + + switch(cur->bsr_bytes) { + case 8: + cur->bsr_type = BSR_8; + break; + case 16: + cur->bsr_type = BSR_16; + break; + case 64: + cur->bsr_type = BSR_64; + break; + case 128: + cur->bsr_type = BSR_128; + break; + default: + cur->bsr_type = BSR_UNKNOWN; + printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes); + } + + cur->bsr_num = bsr_types[cur->bsr_type]; + bsr_types[cur->bsr_type] = cur->bsr_num + 1; + snprintf(cur->bsr_name, 32, "bsr%d_%d", + cur->bsr_bytes, cur->bsr_num); + + cdev_init(&cur->bsr_cdev, &bsr_fops); + result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1); + if (result) + goto out_err; + + cur->bsr_device = device_create_drvdata(bsr_class, NULL, + cur->bsr_dev, + cur, cur->bsr_name); + if (!cur->bsr_device) { + printk(KERN_ERR "device_create failed for %s\n", + cur->bsr_name); + cdev_del(&cur->bsr_cdev); + goto out_err; + } + } + + return 0; + + out_err: + + bsr_cleanup_devs(); + return -ENODEV; +} + +static int __init bsr_init(void) +{ + struct device_node *np; + dev_t bsr_dev = MKDEV(bsr_major, 0); + int ret = -ENODEV; + int result; + + np = of_find_compatible_node(NULL, "ibm,bsr", "ibm,bsr"); + if (!np) + goto out_err; + + bsr_class = class_create(THIS_MODULE, "bsr"); + if (IS_ERR(bsr_class)) { + printk(KERN_ERR "class_create() failed for bsr_class\n"); + goto out_err_1; + } + bsr_class->dev_attrs = bsr_dev_attrs; + + result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr"); + bsr_major = MAJOR(bsr_dev); + if (result < 0) { + printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n"); + goto out_err_2; + } + + if ((ret = bsr_create_devs(np)) < 0) + goto out_err_3; + + of_node_put(np); + + return 0; + + out_err_3: + unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS); + + out_err_2: + class_destroy(bsr_class); + + out_err_1: + of_node_put(np); + + out_err: + + return ret; +} + +static void __exit bsr_exit(void) +{ + + bsr_cleanup_devs(); + + if (bsr_class) + class_destroy(bsr_class); + + if (bsr_major) + unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS); +} + +module_init(bsr_init); +module_exit(bsr_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>"); diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 44160d5ebca0..2f9759d625cc 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -675,12 +675,6 @@ static int hvc_poll(struct hvc_struct *hp) return poll_mask; } -#if defined(CONFIG_XMON) && defined(CONFIG_SMP) -extern cpumask_t cpus_in_xmon; -#else -static const cpumask_t cpus_in_xmon = CPU_MASK_NONE; -#endif - /* * This kthread is either polling or interrupt driven. This is determined by * calling hvc_poll() who determines whether a console adapter support @@ -698,7 +692,7 @@ static int khvcd(void *unused) hvc_kicked = 0; try_to_freeze(); wmb(); - if (cpus_empty(cpus_in_xmon)) { + if (!cpus_are_in_xmon()) { spin_lock(&hvc_structs_lock); list_for_each_entry(hp, &hvc_structs, next) { poll_mask |= hvc_poll(hp); diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h index 8c59818050e6..42ffb17e15df 100644 --- a/drivers/char/hvc_console.h +++ b/drivers/char/hvc_console.h @@ -60,4 +60,14 @@ extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq, /* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ extern int __devexit hvc_remove(struct hvc_struct *hp); + +#if defined(CONFIG_XMON) && defined(CONFIG_SMP) +#include <asm/xmon.h> +#else +static inline int cpus_are_in_xmon(void) +{ + return 0; +} +#endif + #endif // HVC_CONSOLE_H diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index 6d50e9bc700b..7fa61dd1d9d9 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -24,7 +24,7 @@ #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> -#include <asm/of_platform.h> +#include <linux/of_platform.h> #include <asm/io.h> #define SDCRNG_CTL_REG 0x00 diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 59ca35156d81..e4a4fbd37d7a 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -1439,7 +1439,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) DEBUGP(4, dev, "CMM_ABSENT flag set\n"); goto out; } - rc = EINVAL; + rc = -EINVAL; if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { DEBUGP(4, dev, "ioctype mismatch\n"); diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index 977f7d35e769..e5da98d8f9cd 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c @@ -678,6 +678,17 @@ free_op: return ret; } +static long viotap_unlocked_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + long rc; + + lock_kernel(); + rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg); + unlock_kernel(); + return rc; +} + static int viotap_open(struct inode *inode, struct file *file) { HvLpEvent_Rc hvrc; @@ -786,12 +797,12 @@ free_op: } const struct file_operations viotap_fops = { - .owner = THIS_MODULE, - .read = viotap_read, - .write = viotap_write, - .ioctl = viotap_ioctl, - .open = viotap_open, - .release = viotap_release, + .owner = THIS_MODULE, + .read = viotap_read, + .write = viotap_write, + .unlocked_ioctl = viotap_unlocked_ioctl, + .open = viotap_open, + .release = viotap_release, }; /* Handle interrupt events for tape */ diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c index 466b9ee92797..f97b5b356875 100644 --- a/drivers/hwmon/ad7418.c +++ b/drivers/hwmon/ad7418.c @@ -23,12 +23,9 @@ #include "lm75.h" -#define DRV_VERSION "0.3" +#define DRV_VERSION "0.4" -/* Addresses to scan */ -static const unsigned short normal_i2c[] = { 0x28, I2C_CLIENT_END }; -/* Insmod parameters */ -I2C_CLIENT_INSMOD_3(ad7416, ad7417, ad7418); +enum chips { ad7416, ad7417, ad7418 }; /* AD7418 registers */ #define AD7418_REG_TEMP_IN 0x00 @@ -46,7 +43,6 @@ static const u8 AD7418_REG_TEMP[] = { AD7418_REG_TEMP_IN, AD7418_REG_TEMP_OS }; struct ad7418_data { - struct i2c_client client; struct device *hwmon_dev; struct attribute_group attrs; enum chips type; @@ -58,16 +54,25 @@ struct ad7418_data { u16 in[4]; }; -static int ad7418_attach_adapter(struct i2c_adapter *adapter); -static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind); -static int ad7418_detach_client(struct i2c_client *client); +static int ad7418_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int ad7418_remove(struct i2c_client *client); + +static const struct i2c_device_id ad7418_id[] = { + { "ad7416", ad7416 }, + { "ad7417", ad7417 }, + { "ad7418", ad7418 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ad7418_id); static struct i2c_driver ad7418_driver = { .driver = { .name = "ad7418", }, - .attach_adapter = ad7418_attach_adapter, - .detach_client = ad7418_detach_client, + .probe = ad7418_probe, + .remove = ad7418_remove, + .id_table = ad7418_id, }; /* All registers are word-sized, except for the configuration registers. @@ -192,13 +197,6 @@ static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 1); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 2); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 3); -static int ad7418_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, ad7418_detect); -} - static struct attribute *ad7416_attributes[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, @@ -225,98 +223,46 @@ static struct attribute *ad7418_attributes[] = { NULL }; -static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind) +static int ad7418_probe(struct i2c_client *client, + const struct i2c_device_id *id) { - struct i2c_client *client; + struct i2c_adapter *adapter = client->adapter; struct ad7418_data *data; - int err = 0; + int err; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_SMBUS_WORD_DATA)) + I2C_FUNC_SMBUS_WORD_DATA)) { + err = -EOPNOTSUPP; goto exit; + } if (!(data = kzalloc(sizeof(struct ad7418_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; } - client = &data->client; - client->addr = address; - client->adapter = adapter; - client->driver = &ad7418_driver; - i2c_set_clientdata(client, data); mutex_init(&data->lock); - - /* AD7418 has a curious behaviour on registers 6 and 7. They - * both always read 0xC071 and are not documented on the datasheet. - * We use them to detect the chip. - */ - if (kind <= 0) { - int reg, reg6, reg7; - - /* the AD7416 lies within this address range, but I have - * no means to check. - */ - if (address >= 0x48 && address <= 0x4f) { - /* XXX add tests for AD7416 here */ - /* data->type = ad7416; */ - } - /* here we might have AD7417 or AD7418 */ - else if (address >= 0x28 && address <= 0x2f) { - reg6 = i2c_smbus_read_word_data(client, 0x06); - reg7 = i2c_smbus_read_word_data(client, 0x07); - - if (address == 0x28 && reg6 == 0xC071 && reg7 == 0xC071) - data->type = ad7418; - - /* XXX add tests for AD7417 here */ - - - /* both AD7417 and AD7418 have bits 0-5 of - * the CONF2 register at 0 - */ - reg = i2c_smbus_read_byte_data(client, - AD7418_REG_CONF2); - if (reg & 0x3F) - data->type = any_chip; /* detection failed */ - } - } else { - dev_dbg(&adapter->dev, "detection forced\n"); - } - - if (kind > 0) - data->type = kind; - else if (kind < 0 && data->type == any_chip) { - err = -ENODEV; - goto exit_free; - } + data->type = id->driver_data; switch (data->type) { - case any_chip: case ad7416: data->adc_max = 0; data->attrs.attrs = ad7416_attributes; - strlcpy(client->name, "ad7416", I2C_NAME_SIZE); break; case ad7417: data->adc_max = 4; data->attrs.attrs = ad7417_attributes; - strlcpy(client->name, "ad7417", I2C_NAME_SIZE); break; case ad7418: data->adc_max = 1; data->attrs.attrs = ad7418_attributes; - strlcpy(client->name, "ad7418", I2C_NAME_SIZE); break; } - if ((err = i2c_attach_client(client))) - goto exit_free; - dev_info(&client->dev, "%s chip found\n", client->name); /* Initialize the AD7418 chip */ @@ -324,7 +270,7 @@ static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind) /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -336,20 +282,17 @@ static int ad7418_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&client->dev.kobj, &data->attrs); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int ad7418_detach_client(struct i2c_client *client) +static int ad7418_remove(struct i2c_client *client) { struct ad7418_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &data->attrs); - i2c_detach_client(client); kfree(data); return 0; } diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index ecbf69484bf5..b11e06f644b1 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -78,7 +78,6 @@ clearing it. Weird, ey? --Phil */ /* Each client has this additional data */ struct adm1021_data { - struct i2c_client client; struct device *hwmon_dev; enum chips type; @@ -98,23 +97,42 @@ struct adm1021_data { u8 remote_temp_offset_prec; }; -static int adm1021_attach_adapter(struct i2c_adapter *adapter); -static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind); +static int adm1021_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adm1021_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void adm1021_init_client(struct i2c_client *client); -static int adm1021_detach_client(struct i2c_client *client); +static int adm1021_remove(struct i2c_client *client); static struct adm1021_data *adm1021_update_device(struct device *dev); /* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */ static int read_only; +static const struct i2c_device_id adm1021_id[] = { + { "adm1021", adm1021 }, + { "adm1023", adm1023 }, + { "max1617", max1617 }, + { "max1617a", max1617a }, + { "thmc10", thmc10 }, + { "lm84", lm84 }, + { "gl523sm", gl523sm }, + { "mc1066", mc1066 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adm1021_id); + /* This is the driver that will be inserted */ static struct i2c_driver adm1021_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adm1021", }, - .attach_adapter = adm1021_attach_adapter, - .detach_client = adm1021_detach_client, + .probe = adm1021_probe, + .remove = adm1021_remove, + .id_table = adm1021_id, + .detect = adm1021_detect, + .address_data = &addr_data, }; static ssize_t show_temp(struct device *dev, @@ -216,13 +234,6 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2); static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); -static int adm1021_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, adm1021_detect); -} - static struct attribute *adm1021_attributes[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, @@ -243,36 +254,21 @@ static const struct attribute_group adm1021_group = { .attrs = adm1021_attributes, }; -static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adm1021_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { + struct i2c_adapter *adapter = client->adapter; int i; - struct i2c_client *client; - struct adm1021_data *data; - int err = 0; const char *type_name = ""; int conv_rate, status, config; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_debug("adm1021: detect failed, " "smbus byte data not supported!\n"); - goto error0; - } - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access adm1021 register values. */ - - if (!(data = kzalloc(sizeof(struct adm1021_data), GFP_KERNEL))) { - pr_debug("adm1021: detect failed, kzalloc failed!\n"); - err = -ENOMEM; - goto error0; + return -ENODEV; } - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &adm1021_driver; status = i2c_smbus_read_byte_data(client, ADM1021_REG_STATUS); conv_rate = i2c_smbus_read_byte_data(client, ADM1021_REG_CONV_RATE_R); @@ -284,8 +280,7 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind) || (conv_rate & 0xF8) != 0x00) { pr_debug("adm1021: detect failed, " "chip not detected!\n"); - err = -ENODEV; - goto error1; + return -ENODEV; } } @@ -336,24 +331,36 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind) type_name = "mc1066"; } pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n", - type_name, i2c_adapter_id(adapter), address); + type_name, i2c_adapter_id(adapter), client->addr); + strlcpy(info->type, type_name, I2C_NAME_SIZE); - /* Fill in the remaining client fields */ - strlcpy(client->name, type_name, I2C_NAME_SIZE); - data->type = kind; - mutex_init(&data->update_lock); + return 0; +} - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto error1; +static int adm1021_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adm1021_data *data; + int err; + + data = kzalloc(sizeof(struct adm1021_data), GFP_KERNEL); + if (!data) { + pr_debug("adm1021: detect failed, kzalloc failed!\n"); + err = -ENOMEM; + goto error0; + } + + i2c_set_clientdata(client, data); + data->type = id->driver_data; + mutex_init(&data->update_lock); /* Initialize the ADM1021 chip */ - if (kind != lm84 && !read_only) + if (data->type != lm84 && !read_only) adm1021_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &adm1021_group))) - goto error2; + goto error1; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -365,8 +372,6 @@ static int adm1021_detect(struct i2c_adapter *adapter, int address, int kind) error3: sysfs_remove_group(&client->dev.kobj, &adm1021_group); -error2: - i2c_detach_client(client); error1: kfree(data); error0: @@ -382,17 +387,13 @@ static void adm1021_init_client(struct i2c_client *client) i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04); } -static int adm1021_detach_client(struct i2c_client *client) +static int adm1021_remove(struct i2c_client *client) { struct adm1021_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adm1021_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c index 1d76de7d75c7..4db04d603ec9 100644 --- a/drivers/hwmon/adm1025.c +++ b/drivers/hwmon/adm1025.c @@ -2,7 +2,7 @@ * adm1025.c * * Copyright (C) 2000 Chen-Yuan Wu <gwu@esoft.com> - * Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org> + * Copyright (C) 2003-2008 Jean Delvare <khali@linux-fr.org> * * The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6 * voltages (including its own power source) and up to two temperatures @@ -109,22 +109,35 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 }; * Functions declaration */ -static int adm1025_attach_adapter(struct i2c_adapter *adapter); -static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind); +static int adm1025_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adm1025_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void adm1025_init_client(struct i2c_client *client); -static int adm1025_detach_client(struct i2c_client *client); +static int adm1025_remove(struct i2c_client *client); static struct adm1025_data *adm1025_update_device(struct device *dev); /* * Driver data (common to all clients) */ +static const struct i2c_device_id adm1025_id[] = { + { "adm1025", adm1025 }, + { "ne1619", ne1619 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adm1025_id); + static struct i2c_driver adm1025_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adm1025", }, - .attach_adapter = adm1025_attach_adapter, - .detach_client = adm1025_detach_client, + .probe = adm1025_probe, + .remove = adm1025_remove, + .id_table = adm1025_id, + .detect = adm1025_detect, + .address_data = &addr_data, }; /* @@ -132,7 +145,6 @@ static struct i2c_driver adm1025_driver = { */ struct adm1025_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -344,13 +356,6 @@ static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm); * Real code */ -static int adm1025_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, adm1025_detect); -} - static struct attribute *adm1025_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, @@ -403,31 +408,16 @@ static const struct attribute_group adm1025_group_in4 = { .attrs = adm1025_attributes_in4, }; -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adm1025_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct adm1025_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; const char *name = ""; u8 config; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct adm1025_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &adm1025_driver; + return -ENODEV; /* * Now we do the remaining detection. A negative kind means that @@ -448,8 +438,8 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) ADM1025_REG_STATUS2) & 0xBC) != 0x00) { dev_dbg(&adapter->dev, "ADM1025 detection failed at 0x%02x.\n", - address); - goto exit_free; + client->addr); + return -ENODEV; } } @@ -465,7 +455,7 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) } } else if (man_id == 0xA1) { /* Philips */ - if (address != 0x2E + if (client->addr != 0x2E && (chip_id & 0xF0) == 0x20) { /* NE1619 */ kind = ne1619; } @@ -475,7 +465,7 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) dev_info(&adapter->dev, "Unsupported chip (man_id=0x%02X, " "chip_id=0x%02X).\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } @@ -484,23 +474,36 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) } else if (kind == ne1619) { name = "ne1619"; } + strlcpy(info->type, name, I2C_NAME_SIZE); - /* We can fill in the remaining client fields */ - strlcpy(client->name, name, I2C_NAME_SIZE); - mutex_init(&data->update_lock); + return 0; +} - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; +static int adm1025_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adm1025_data *data; + int err; + u8 config; + + data = kzalloc(sizeof(struct adm1025_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); /* Initialize the ADM1025 chip */ adm1025_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &adm1025_group))) - goto exit_detach; + goto exit_free; /* Pin 11 is either in4 (+12V) or VID4 */ + config = i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG); if (!(config & 0x20)) { if ((err = sysfs_create_group(&client->dev.kobj, &adm1025_group_in4))) @@ -518,8 +521,6 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&client->dev.kobj, &adm1025_group); sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: @@ -568,18 +569,14 @@ static void adm1025_init_client(struct i2c_client *client) (reg&0x7E)|0x01); } -static int adm1025_detach_client(struct i2c_client *client) +static int adm1025_remove(struct i2c_client *client) { struct adm1025_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adm1025_group); sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index 904c6ce9d83f..7fe2441fc845 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -259,7 +259,6 @@ struct pwm_data { }; struct adm1026_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; @@ -293,10 +292,11 @@ struct adm1026_data { u8 config3; /* Register value */ }; -static int adm1026_attach_adapter(struct i2c_adapter *adapter); -static int adm1026_detect(struct i2c_adapter *adapter, int address, - int kind); -static int adm1026_detach_client(struct i2c_client *client); +static int adm1026_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adm1026_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int adm1026_remove(struct i2c_client *client); static int adm1026_read_value(struct i2c_client *client, u8 reg); static int adm1026_write_value(struct i2c_client *client, u8 reg, int value); static void adm1026_print_gpio(struct i2c_client *client); @@ -305,22 +305,24 @@ static struct adm1026_data *adm1026_update_device(struct device *dev); static void adm1026_init_client(struct i2c_client *client); +static const struct i2c_device_id adm1026_id[] = { + { "adm1026", adm1026 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adm1026_id); + static struct i2c_driver adm1026_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adm1026", }, - .attach_adapter = adm1026_attach_adapter, - .detach_client = adm1026_detach_client, + .probe = adm1026_probe, + .remove = adm1026_remove, + .id_table = adm1026_id, + .detect = adm1026_detect, + .address_data = &addr_data, }; -static int adm1026_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) { - return 0; - } - return i2c_probe(adapter, &addr_data, adm1026_detect); -} - static int adm1026_read_value(struct i2c_client *client, u8 reg) { int res; @@ -1647,48 +1649,32 @@ static const struct attribute_group adm1026_group_in8_9 = { .attrs = adm1026_attributes_in8_9, }; -static int adm1026_detect(struct i2c_adapter *adapter, int address, - int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adm1026_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { + struct i2c_adapter *adapter = client->adapter; + int address = client->addr; int company, verstep; - struct i2c_client *client; - struct adm1026_data *data; - int err = 0; - const char *type_name = ""; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { /* We need to be able to do byte I/O */ - goto exit; + return -ENODEV; }; - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access adm1026_{read,write}_value. */ - - if (!(data = kzalloc(sizeof(struct adm1026_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &adm1026_driver; - /* Now, we do the remaining detection. */ company = adm1026_read_value(client, ADM1026_REG_COMPANY); verstep = adm1026_read_value(client, ADM1026_REG_VERSTEP); - dev_dbg(&client->dev, "Detecting device at %d,0x%02x with" + dev_dbg(&adapter->dev, "Detecting device at %d,0x%02x with" " COMPANY: 0x%02x and VERSTEP: 0x%02x\n", i2c_adapter_id(client->adapter), client->addr, company, verstep); /* If auto-detecting, Determine the chip type. */ if (kind <= 0) { - dev_dbg(&client->dev, "Autodetecting device at %d,0x%02x " + dev_dbg(&adapter->dev, "Autodetecting device at %d,0x%02x " "...\n", i2c_adapter_id(adapter), address); if (company == ADM1026_COMPANY_ANALOG_DEV && verstep == ADM1026_VERSTEP_ADM1026) { @@ -1704,7 +1690,7 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address, verstep); kind = any_chip; } else { - dev_dbg(&client->dev, ": Autodetection " + dev_dbg(&adapter->dev, ": Autodetection " "failed\n"); /* Not an ADM1026 ... */ if (kind == 0) { /* User used force=x,y */ @@ -1713,33 +1699,29 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address, "force_adm1026.\n", i2c_adapter_id(adapter), address); } - goto exitfree; + return -ENODEV; } } + strlcpy(info->type, "adm1026", I2C_NAME_SIZE); - /* Fill in the chip specific driver values */ - switch (kind) { - case any_chip : - type_name = "adm1026"; - break; - case adm1026 : - type_name = "adm1026"; - break; - default : - dev_err(&adapter->dev, ": Internal error, invalid " - "kind (%d)!\n", kind); - err = -EFAULT; - goto exitfree; + return 0; +} + +static int adm1026_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adm1026_data *data; + int err; + + data = kzalloc(sizeof(struct adm1026_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; } - strlcpy(client->name, type_name, I2C_NAME_SIZE); - /* Fill in the remaining client fields */ + i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exitfree; - /* Set the VRM version */ data->vrm = vid_which_vrm(); @@ -1748,7 +1730,7 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address, /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &adm1026_group))) - goto exitdetach; + goto exitfree; if (data->config1 & CFG1_AIN8_9) err = sysfs_create_group(&client->dev.kobj, &adm1026_group_in8_9); @@ -1773,15 +1755,13 @@ exitremove: sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9); else sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3); -exitdetach: - i2c_detach_client(client); exitfree: kfree(data); exit: return err; } -static int adm1026_detach_client(struct i2c_client *client) +static int adm1026_remove(struct i2c_client *client) { struct adm1026_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); @@ -1790,7 +1770,6 @@ static int adm1026_detach_client(struct i2c_client *client) sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9); else sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3); - i2c_detach_client(client); kfree(data); return 0; } diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index 2c6608d453c2..ba84ca5923f9 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -115,9 +115,11 @@ static const u8 ADM1029_REG_FAN_DIV[] = { * Functions declaration */ -static int adm1029_attach_adapter(struct i2c_adapter *adapter); -static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind); -static int adm1029_detach_client(struct i2c_client *client); +static int adm1029_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adm1029_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int adm1029_remove(struct i2c_client *client); static struct adm1029_data *adm1029_update_device(struct device *dev); static int adm1029_init_client(struct i2c_client *client); @@ -125,12 +127,22 @@ static int adm1029_init_client(struct i2c_client *client); * Driver data (common to all clients) */ +static const struct i2c_device_id adm1029_id[] = { + { "adm1029", adm1029 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adm1029_id); + static struct i2c_driver adm1029_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adm1029", }, - .attach_adapter = adm1029_attach_adapter, - .detach_client = adm1029_detach_client, + .probe = adm1029_probe, + .remove = adm1029_remove, + .id_table = adm1029_id, + .detect = adm1029_detect, + .address_data = &addr_data, }; /* @@ -138,7 +150,6 @@ static struct i2c_driver adm1029_driver = { */ struct adm1029_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -284,37 +295,14 @@ static const struct attribute_group adm1029_group = { * Real code */ -static int adm1029_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adm1029_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, adm1029_detect); -} + struct i2c_adapter *adapter = client->adapter; -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ - -static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *client; - struct adm1029_data *data; - int err = 0; - const char *name = ""; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct adm1029_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &adm1029_driver; + return -ENODEV; /* Now we do the detection and identification. A negative kind * means that the driver was loaded with no force parameter @@ -362,32 +350,41 @@ static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind) if (kind <= 0) { /* identification failed */ pr_debug("adm1029: Unsupported chip (man_id=0x%02X, " "chip_id=0x%02X)\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } + strlcpy(info->type, "adm1029", I2C_NAME_SIZE); - if (kind == adm1029) { - name = "adm1029"; + return 0; +} + +static int adm1029_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adm1029_data *data; + int err; + + data = kzalloc(sizeof(struct adm1029_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; } - /* We can fill in the remaining client fields */ - strlcpy(client->name, name, I2C_NAME_SIZE); + i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; - /* * Initialize the ADM1029 chip * Check config register */ - if (adm1029_init_client(client) == 0) - goto exit_detach; + if (adm1029_init_client(client) == 0) { + err = -ENODEV; + goto exit_free; + } /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &adm1029_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -399,8 +396,6 @@ static int adm1029_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&client->dev.kobj, &adm1029_group); - exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: @@ -424,17 +419,13 @@ static int adm1029_init_client(struct i2c_client *client) return 1; } -static int adm1029_detach_client(struct i2c_client *client) +static int adm1029_remove(struct i2c_client *client) { struct adm1029_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adm1029_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 2bffcab7dc9f..789441830cd8 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -70,7 +70,6 @@ typedef u8 auto_chan_table_t[8][2]; /* Each client has this additional data */ struct adm1031_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; int chip_type; @@ -99,19 +98,32 @@ struct adm1031_data { s8 temp_crit[3]; }; -static int adm1031_attach_adapter(struct i2c_adapter *adapter); -static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind); +static int adm1031_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adm1031_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void adm1031_init_client(struct i2c_client *client); -static int adm1031_detach_client(struct i2c_client *client); +static int adm1031_remove(struct i2c_client *client); static struct adm1031_data *adm1031_update_device(struct device *dev); +static const struct i2c_device_id adm1031_id[] = { + { "adm1030", adm1030 }, + { "adm1031", adm1031 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adm1031_id); + /* This is the driver that will be inserted */ static struct i2c_driver adm1031_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adm1031", }, - .attach_adapter = adm1031_attach_adapter, - .detach_client = adm1031_detach_client, + .probe = adm1031_probe, + .remove = adm1031_remove, + .id_table = adm1031_id, + .detect = adm1031_detect, + .address_data = &addr_data, }; static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg) @@ -693,13 +705,6 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); -static int adm1031_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, adm1031_detect); -} - static struct attribute *adm1031_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, @@ -770,27 +775,15 @@ static const struct attribute_group adm1031_group_opt = { .attrs = adm1031_attributes_opt, }; -/* This function is called by i2c_probe */ -static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adm1031_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct adm1031_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; const char *name = ""; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct adm1031_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &adm1031_driver; + return -ENODEV; if (kind < 0) { int id, co; @@ -798,7 +791,7 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) co = i2c_smbus_read_byte_data(client, 0x3e); if (!((id == 0x31 || id == 0x30) && co == 0x41)) - goto exit_free; + return -ENODEV; kind = (id == 0x30) ? adm1030 : adm1031; } @@ -809,28 +802,43 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) * auto fan control helper table. */ if (kind == adm1030) { name = "adm1030"; - data->chan_select_table = &auto_channel_select_table_adm1030; } else if (kind == adm1031) { name = "adm1031"; - data->chan_select_table = &auto_channel_select_table_adm1031; } - data->chip_type = kind; + strlcpy(info->type, name, I2C_NAME_SIZE); - strlcpy(client->name, name, I2C_NAME_SIZE); + return 0; +} + +static int adm1031_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adm1031_data *data; + int err; + + data = kzalloc(sizeof(struct adm1031_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->chip_type = id->driver_data; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; + if (data->chip_type == adm1030) + data->chan_select_table = &auto_channel_select_table_adm1030; + else + data->chan_select_table = &auto_channel_select_table_adm1031; /* Initialize the ADM1031 chip */ adm1031_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group))) - goto exit_detach; + goto exit_free; - if (kind == adm1031) { + if (data->chip_type == adm1031) { if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group_opt))) goto exit_remove; @@ -847,25 +855,19 @@ static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&client->dev.kobj, &adm1031_group); sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int adm1031_detach_client(struct i2c_client *client) +static int adm1031_remove(struct i2c_client *client) { struct adm1031_data *data = i2c_get_clientdata(client); - int ret; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adm1031_group); sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt); - if ((ret = i2c_detach_client(client)) != 0) { - return ret; - } kfree(data); return 0; } diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c index 149ef25252e7..2444b15f2e9d 100644 --- a/drivers/hwmon/adm9240.c +++ b/drivers/hwmon/adm9240.c @@ -130,25 +130,37 @@ static inline unsigned int AOUT_FROM_REG(u8 reg) return SCALE(reg, 1250, 255); } -static int adm9240_attach_adapter(struct i2c_adapter *adapter); -static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind); +static int adm9240_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adm9240_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void adm9240_init_client(struct i2c_client *client); -static int adm9240_detach_client(struct i2c_client *client); +static int adm9240_remove(struct i2c_client *client); static struct adm9240_data *adm9240_update_device(struct device *dev); /* driver data */ +static const struct i2c_device_id adm9240_id[] = { + { "adm9240", adm9240 }, + { "ds1780", ds1780 }, + { "lm81", lm81 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adm9240_id); + static struct i2c_driver adm9240_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adm9240", }, - .attach_adapter = adm9240_attach_adapter, - .detach_client = adm9240_detach_client, + .probe = adm9240_probe, + .remove = adm9240_remove, + .id_table = adm9240_id, + .detect = adm9240_detect, + .address_data = &addr_data, }; /* per client data */ struct adm9240_data { - enum chips type; - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; @@ -532,28 +544,17 @@ static const struct attribute_group adm9240_group = { /*** sensor chip detect and driver install ***/ -static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adm9240_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct adm9240_data *data; - int err = 0; + struct i2c_adapter *adapter = new_client->adapter; const char *name = ""; + int address = new_client->addr; u8 man_id, die_rev; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &adm9240_driver; - new_client->flags = 0; + return -ENODEV; if (kind == 0) { kind = adm9240; @@ -566,7 +567,7 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind) != address) { dev_err(&adapter->dev, "detect fail: address match, " "0x%02x\n", address); - goto exit_free; + return -ENODEV; } /* check known chip manufacturer */ @@ -581,7 +582,7 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind) } else { dev_err(&adapter->dev, "detect fail: unknown manuf, " "0x%02x\n", man_id); - goto exit_free; + return -ENODEV; } /* successful detect, print chip info */ @@ -600,20 +601,31 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind) } else if (kind == lm81) { name = "lm81"; } + strlcpy(info->type, name, I2C_NAME_SIZE); - /* fill in the remaining client fields and attach */ - strlcpy(new_client->name, name, I2C_NAME_SIZE); - data->type = kind; - mutex_init(&data->update_lock); + return 0; +} - if ((err = i2c_attach_client(new_client))) - goto exit_free; +static int adm9240_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct adm9240_data *data; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); + mutex_init(&data->update_lock); adm9240_init_client(new_client); /* populate sysfs filesystem */ if ((err = sysfs_create_group(&new_client->dev.kobj, &adm9240_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -625,32 +637,19 @@ static int adm9240_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&new_client->dev.kobj, &adm9240_group); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: return err; } -static int adm9240_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, adm9240_detect); -} - -static int adm9240_detach_client(struct i2c_client *client) +static int adm9240_remove(struct i2c_client *client) { struct adm9240_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adm9240_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index 5c8b6e0ff47c..5c39b4af1b23 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -64,7 +64,6 @@ static unsigned int ads7828_lsb_resol; /* resolution of the ADC sample lsb */ /* Each client has this additional data */ struct ads7828_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; /* mutex protect updates */ char valid; /* !=0 if following fields are valid */ @@ -73,7 +72,10 @@ struct ads7828_data { }; /* Function declaration - necessary due to function dependencies */ -static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind); +static int ads7828_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int ads7828_probe(struct i2c_client *client, + const struct i2c_device_id *id); /* The ADS7828 returns the 12-bit sample in two bytes, these are read as a word then byte-swapped */ @@ -156,58 +158,43 @@ static const struct attribute_group ads7828_group = { .attrs = ads7828_attributes, }; -static int ads7828_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, ads7828_detect); -} - -static int ads7828_detach_client(struct i2c_client *client) +static int ads7828_remove(struct i2c_client *client) { struct ads7828_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &ads7828_group); - i2c_detach_client(client); kfree(i2c_get_clientdata(client)); return 0; } +static const struct i2c_device_id ads7828_id[] = { + { "ads7828", ads7828 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ads7828_id); + /* This is the driver that will be inserted */ static struct i2c_driver ads7828_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "ads7828", }, - .attach_adapter = ads7828_attach_adapter, - .detach_client = ads7828_detach_client, + .probe = ads7828_probe, + .remove = ads7828_remove, + .id_table = ads7828_id, + .detect = ads7828_detect, + .address_data = &addr_data, }; -/* This function is called by i2c_probe */ -static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int ads7828_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct ads7828_data *data; - int err = 0; - const char *name = ""; + struct i2c_adapter *adapter = client->adapter; /* Check we have a valid client */ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access ads7828_read_value. */ - data = kzalloc(sizeof(struct ads7828_data), GFP_KERNEL); - if (!data) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &ads7828_driver; + return -ENODEV; /* Now, we do the remaining detection. There is no identification dedicated register so attempt to sanity check using knowledge of @@ -225,32 +212,34 @@ static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind) printk(KERN_DEBUG "%s : Doesn't look like an ads7828 device\n", __func__); - goto exit_free; + return -ENODEV; } } } + strlcpy(info->type, "ads7828", I2C_NAME_SIZE); - /* Determine the chip type - only one kind supported! */ - if (kind <= 0) - kind = ads7828; + return 0; +} - if (kind == ads7828) - name = "ads7828"; +static int ads7828_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ads7828_data *data; + int err; - /* Fill in the remaining client fields, put it into the global list */ - strlcpy(client->name, name, I2C_NAME_SIZE); + data = kzalloc(sizeof(struct ads7828_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - err = i2c_attach_client(client); - if (err) - goto exit_free; - /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &ads7828_group); if (err) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -262,8 +251,6 @@ static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&client->dev.kobj, &ads7828_group); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 6b5325f33a2c..d368d8f845e1 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -138,7 +138,6 @@ I2C_CLIENT_INSMOD_1(adt7470); #define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID) struct adt7470_data { - struct i2c_client client; struct device *hwmon_dev; struct attribute_group attrs; struct mutex lock; @@ -164,16 +163,28 @@ struct adt7470_data { u8 pwm_auto_temp[ADT7470_PWM_COUNT]; }; -static int adt7470_attach_adapter(struct i2c_adapter *adapter); -static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind); -static int adt7470_detach_client(struct i2c_client *client); +static int adt7470_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adt7470_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int adt7470_remove(struct i2c_client *client); + +static const struct i2c_device_id adt7470_id[] = { + { "adt7470", adt7470 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adt7470_id); static struct i2c_driver adt7470_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adt7470", }, - .attach_adapter = adt7470_attach_adapter, - .detach_client = adt7470_detach_client, + .probe = adt7470_probe, + .remove = adt7470_remove, + .id_table = adt7470_id, + .detect = adt7470_detect, + .address_data = &addr_data, }; /* @@ -1004,64 +1015,52 @@ static struct attribute *adt7470_attr[] = NULL }; -static int adt7470_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, adt7470_detect); -} - -static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adt7470_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct adt7470_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct adt7470_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - client->addr = address; - client->adapter = adapter; - client->driver = &adt7470_driver; - - i2c_set_clientdata(client, data); - - mutex_init(&data->lock); + return -ENODEV; if (kind <= 0) { int vendor, device, revision; vendor = i2c_smbus_read_byte_data(client, ADT7470_REG_VENDOR); - if (vendor != ADT7470_VENDOR) { - err = -ENODEV; - goto exit_free; - } + if (vendor != ADT7470_VENDOR) + return -ENODEV; device = i2c_smbus_read_byte_data(client, ADT7470_REG_DEVICE); - if (device != ADT7470_DEVICE) { - err = -ENODEV; - goto exit_free; - } + if (device != ADT7470_DEVICE) + return -ENODEV; revision = i2c_smbus_read_byte_data(client, ADT7470_REG_REVISION); - if (revision != ADT7470_REVISION) { - err = -ENODEV; - goto exit_free; - } + if (revision != ADT7470_REVISION) + return -ENODEV; } else dev_dbg(&adapter->dev, "detection forced\n"); - strlcpy(client->name, "adt7470", I2C_NAME_SIZE); + strlcpy(info->type, "adt7470", I2C_NAME_SIZE); - if ((err = i2c_attach_client(client))) - goto exit_free; + return 0; +} + +static int adt7470_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adt7470_data *data; + int err; + + data = kzalloc(sizeof(struct adt7470_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->lock); dev_info(&client->dev, "%s chip found\n", client->name); @@ -1071,7 +1070,7 @@ static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind) /* Register sysfs hooks */ data->attrs.attrs = adt7470_attr; if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -1083,21 +1082,18 @@ static int adt7470_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&client->dev.kobj, &data->attrs); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int adt7470_detach_client(struct i2c_client *client) +static int adt7470_remove(struct i2c_client *client) { struct adt7470_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &data->attrs); - i2c_detach_client(client); kfree(data); return 0; } diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c index 93dbf5e7ff8a..ce4a7cb5a116 100644 --- a/drivers/hwmon/adt7473.c +++ b/drivers/hwmon/adt7473.c @@ -143,7 +143,6 @@ I2C_CLIENT_INSMOD_1(adt7473); #define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID) struct adt7473_data { - struct i2c_client client; struct device *hwmon_dev; struct attribute_group attrs; struct mutex lock; @@ -178,16 +177,28 @@ struct adt7473_data { u8 max_duty_at_overheat; }; -static int adt7473_attach_adapter(struct i2c_adapter *adapter); -static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind); -static int adt7473_detach_client(struct i2c_client *client); +static int adt7473_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int adt7473_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int adt7473_remove(struct i2c_client *client); + +static const struct i2c_device_id adt7473_id[] = { + { "adt7473", adt7473 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adt7473_id); static struct i2c_driver adt7473_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "adt7473", }, - .attach_adapter = adt7473_attach_adapter, - .detach_client = adt7473_detach_client, + .probe = adt7473_probe, + .remove = adt7473_remove, + .id_table = adt7473_id, + .detect = adt7473_detect, + .address_data = &addr_data, }; /* @@ -1042,66 +1053,52 @@ static struct attribute *adt7473_attr[] = NULL }; -static int adt7473_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int adt7473_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, adt7473_detect); -} - -static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *client; - struct adt7473_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - data = kzalloc(sizeof(struct adt7473_data), GFP_KERNEL); - if (!data) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - client->addr = address; - client->adapter = adapter; - client->driver = &adt7473_driver; - - i2c_set_clientdata(client, data); - - mutex_init(&data->lock); + return -ENODEV; if (kind <= 0) { int vendor, device, revision; vendor = i2c_smbus_read_byte_data(client, ADT7473_REG_VENDOR); - if (vendor != ADT7473_VENDOR) { - err = -ENODEV; - goto exit_free; - } + if (vendor != ADT7473_VENDOR) + return -ENODEV; device = i2c_smbus_read_byte_data(client, ADT7473_REG_DEVICE); - if (device != ADT7473_DEVICE) { - err = -ENODEV; - goto exit_free; - } + if (device != ADT7473_DEVICE) + return -ENODEV; revision = i2c_smbus_read_byte_data(client, ADT7473_REG_REVISION); - if (revision != ADT7473_REV_68 && revision != ADT7473_REV_69) { - err = -ENODEV; - goto exit_free; - } + if (revision != ADT7473_REV_68 && revision != ADT7473_REV_69) + return -ENODEV; } else dev_dbg(&adapter->dev, "detection forced\n"); - strlcpy(client->name, "adt7473", I2C_NAME_SIZE); + strlcpy(info->type, "adt7473", I2C_NAME_SIZE); - err = i2c_attach_client(client); - if (err) - goto exit_free; + return 0; +} + +static int adt7473_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adt7473_data *data; + int err; + + data = kzalloc(sizeof(struct adt7473_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->lock); dev_info(&client->dev, "%s chip found\n", client->name); @@ -1112,7 +1109,7 @@ static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind) data->attrs.attrs = adt7473_attr; err = sysfs_create_group(&client->dev.kobj, &data->attrs); if (err) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -1124,21 +1121,18 @@ static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&client->dev.kobj, &data->attrs); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int adt7473_detach_client(struct i2c_client *client) +static int adt7473_remove(struct i2c_client *client) { struct adt7473_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &data->attrs); - i2c_detach_client(client); kfree(data); return 0; } diff --git a/drivers/hwmon/ams/ams-core.c b/drivers/hwmon/ams/ams-core.c index a112a03e8f29..fbefa82a015c 100644 --- a/drivers/hwmon/ams/ams-core.c +++ b/drivers/hwmon/ams/ams-core.c @@ -23,8 +23,8 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> +#include <linux/of_platform.h> #include <asm/pmac_pfunc.h> -#include <asm/of_platform.h> #include "ams.h" diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c index fe2eea4d799b..8a45a2e6ba8a 100644 --- a/drivers/hwmon/asb100.c +++ b/drivers/hwmon/asb100.c @@ -176,10 +176,8 @@ static u8 DIV_TO_REG(long val) data is pointed to by client->data. The structure itself is dynamically allocated, at the same time the client itself is allocated. */ struct asb100_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex lock; - enum chips type; struct mutex update_lock; unsigned long last_updated; /* In jiffies */ @@ -206,18 +204,30 @@ struct asb100_data { static int asb100_read_value(struct i2c_client *client, u16 reg); static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val); -static int asb100_attach_adapter(struct i2c_adapter *adapter); -static int asb100_detect(struct i2c_adapter *adapter, int address, int kind); -static int asb100_detach_client(struct i2c_client *client); +static int asb100_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int asb100_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int asb100_remove(struct i2c_client *client); static struct asb100_data *asb100_update_device(struct device *dev); static void asb100_init_client(struct i2c_client *client); +static const struct i2c_device_id asb100_id[] = { + { "asb100", asb100 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, asb100_id); + static struct i2c_driver asb100_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "asb100", }, - .attach_adapter = asb100_attach_adapter, - .detach_client = asb100_detach_client, + .probe = asb100_probe, + .remove = asb100_remove, + .id_table = asb100_id, + .detect = asb100_detect, + .address_data = &addr_data, }; /* 7 Voltages */ @@ -619,35 +629,13 @@ static const struct attribute_group asb100_group = { .attrs = asb100_attributes, }; -/* This function is called when: - asb100_driver is inserted (when this module is loaded), for each - available adapter - when a new adapter is inserted (and asb100_driver is still present) - */ -static int asb100_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, asb100_detect); -} - -static int asb100_detect_subclients(struct i2c_adapter *adapter, int address, - int kind, struct i2c_client *client) +static int asb100_detect_subclients(struct i2c_client *client) { int i, id, err; + int address = client->addr; + unsigned short sc_addr[2]; struct asb100_data *data = i2c_get_clientdata(client); - - data->lm75[0] = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); - if (!(data->lm75[0])) { - err = -ENOMEM; - goto ERROR_SC_0; - } - - data->lm75[1] = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); - if (!(data->lm75[1])) { - err = -ENOMEM; - goto ERROR_SC_1; - } + struct i2c_adapter *adapter = client->adapter; id = i2c_adapter_id(adapter); @@ -665,37 +653,34 @@ static int asb100_detect_subclients(struct i2c_adapter *adapter, int address, asb100_write_value(client, ASB100_REG_I2C_SUBADDR, (force_subclients[2] & 0x07) | ((force_subclients[3] & 0x07) << 4)); - data->lm75[0]->addr = force_subclients[2]; - data->lm75[1]->addr = force_subclients[3]; + sc_addr[0] = force_subclients[2]; + sc_addr[1] = force_subclients[3]; } else { int val = asb100_read_value(client, ASB100_REG_I2C_SUBADDR); - data->lm75[0]->addr = 0x48 + (val & 0x07); - data->lm75[1]->addr = 0x48 + ((val >> 4) & 0x07); + sc_addr[0] = 0x48 + (val & 0x07); + sc_addr[1] = 0x48 + ((val >> 4) & 0x07); } - if (data->lm75[0]->addr == data->lm75[1]->addr) { + if (sc_addr[0] == sc_addr[1]) { dev_err(&client->dev, "duplicate addresses 0x%x " - "for subclients\n", data->lm75[0]->addr); + "for subclients\n", sc_addr[0]); err = -ENODEV; goto ERROR_SC_2; } - for (i = 0; i <= 1; i++) { - i2c_set_clientdata(data->lm75[i], NULL); - data->lm75[i]->adapter = adapter; - data->lm75[i]->driver = &asb100_driver; - strlcpy(data->lm75[i]->name, "asb100 subclient", I2C_NAME_SIZE); - } - - if ((err = i2c_attach_client(data->lm75[0]))) { + data->lm75[0] = i2c_new_dummy(adapter, sc_addr[0]); + if (!data->lm75[0]) { dev_err(&client->dev, "subclient %d registration " - "at address 0x%x failed.\n", i, data->lm75[0]->addr); + "at address 0x%x failed.\n", 1, sc_addr[0]); + err = -ENOMEM; goto ERROR_SC_2; } - if ((err = i2c_attach_client(data->lm75[1]))) { + data->lm75[1] = i2c_new_dummy(adapter, sc_addr[1]); + if (!data->lm75[1]) { dev_err(&client->dev, "subclient %d registration " - "at address 0x%x failed.\n", i, data->lm75[1]->addr); + "at address 0x%x failed.\n", 2, sc_addr[1]); + err = -ENOMEM; goto ERROR_SC_3; } @@ -703,55 +688,31 @@ static int asb100_detect_subclients(struct i2c_adapter *adapter, int address, /* Undo inits in case of errors */ ERROR_SC_3: - i2c_detach_client(data->lm75[0]); + i2c_unregister_device(data->lm75[0]); ERROR_SC_2: - kfree(data->lm75[1]); -ERROR_SC_1: - kfree(data->lm75[0]); -ERROR_SC_0: return err; } -static int asb100_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int asb100_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - int err; - struct i2c_client *client; - struct asb100_data *data; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_debug("asb100.o: detect failed, " "smbus byte data not supported!\n"); - err = -ENODEV; - goto ERROR0; + return -ENODEV; } - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access asb100_{read,write}_value. */ - - if (!(data = kzalloc(sizeof(struct asb100_data), GFP_KERNEL))) { - pr_debug("asb100.o: detect failed, kzalloc failed!\n"); - err = -ENOMEM; - goto ERROR0; - } - - client = &data->client; - mutex_init(&data->lock); - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &asb100_driver; - - /* Now, we do the remaining detection. */ - /* The chip may be stuck in some other bank than bank 0. This may make reading other information impossible. Specify a force=... or force_*=... parameter, and the chip will be reset to the right bank. */ if (kind < 0) { - int val1 = asb100_read_value(client, ASB100_REG_BANK); - int val2 = asb100_read_value(client, ASB100_REG_CHIPMAN); + int val1 = i2c_smbus_read_byte_data(client, ASB100_REG_BANK); + int val2 = i2c_smbus_read_byte_data(client, ASB100_REG_CHIPMAN); /* If we're in bank 0 */ if ((!(val1 & 0x07)) && @@ -761,48 +722,60 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind) ((val1 & 0x80) && (val2 != 0x06)))) { pr_debug("asb100.o: detect failed, " "bad chip id 0x%02x!\n", val2); - err = -ENODEV; - goto ERROR1; + return -ENODEV; } } /* kind < 0 */ /* We have either had a force parameter, or we have already detected Winbond. Put it now into bank 0 and Vendor ID High Byte */ - asb100_write_value(client, ASB100_REG_BANK, - (asb100_read_value(client, ASB100_REG_BANK) & 0x78) | 0x80); + i2c_smbus_write_byte_data(client, ASB100_REG_BANK, + (i2c_smbus_read_byte_data(client, ASB100_REG_BANK) & 0x78) + | 0x80); /* Determine the chip type. */ if (kind <= 0) { - int val1 = asb100_read_value(client, ASB100_REG_WCHIPID); - int val2 = asb100_read_value(client, ASB100_REG_CHIPMAN); + int val1 = i2c_smbus_read_byte_data(client, ASB100_REG_WCHIPID); + int val2 = i2c_smbus_read_byte_data(client, ASB100_REG_CHIPMAN); if ((val1 == 0x31) && (val2 == 0x06)) kind = asb100; else { if (kind == 0) - dev_warn(&client->dev, "ignoring " + dev_warn(&adapter->dev, "ignoring " "'force' parameter for unknown chip " "at adapter %d, address 0x%02x.\n", - i2c_adapter_id(adapter), address); - err = -ENODEV; - goto ERROR1; + i2c_adapter_id(adapter), client->addr); + return -ENODEV; } } - /* Fill in remaining client fields and put it into the global list */ - strlcpy(client->name, "asb100", I2C_NAME_SIZE); - data->type = kind; - mutex_init(&data->update_lock); + strlcpy(info->type, "asb100", I2C_NAME_SIZE); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto ERROR1; + return 0; +} + +static int asb100_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int err; + struct asb100_data *data; + + data = kzalloc(sizeof(struct asb100_data), GFP_KERNEL); + if (!data) { + pr_debug("asb100.o: probe failed, kzalloc failed!\n"); + err = -ENOMEM; + goto ERROR0; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->lock); + mutex_init(&data->update_lock); /* Attach secondary lm75 clients */ - if ((err = asb100_detect_subclients(adapter, address, kind, - client))) - goto ERROR2; + err = asb100_detect_subclients(client); + if (err) + goto ERROR1; /* Initialize the chip */ asb100_init_client(client); @@ -827,39 +800,25 @@ static int asb100_detect(struct i2c_adapter *adapter, int address, int kind) ERROR4: sysfs_remove_group(&client->dev.kobj, &asb100_group); ERROR3: - i2c_detach_client(data->lm75[1]); - i2c_detach_client(data->lm75[0]); - kfree(data->lm75[1]); - kfree(data->lm75[0]); -ERROR2: - i2c_detach_client(client); + i2c_unregister_device(data->lm75[1]); + i2c_unregister_device(data->lm75[0]); ERROR1: kfree(data); ERROR0: return err; } -static int asb100_detach_client(struct i2c_client *client) +static int asb100_remove(struct i2c_client *client) { struct asb100_data *data = i2c_get_clientdata(client); - int err; - - /* main client */ - if (data) { - hwmon_device_unregister(data->hwmon_dev); - sysfs_remove_group(&client->dev.kobj, &asb100_group); - } - if ((err = i2c_detach_client(client))) - return err; + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &asb100_group); - /* main client */ - if (data) - kfree(data); + i2c_unregister_device(data->lm75[1]); + i2c_unregister_device(data->lm75[0]); - /* subclient */ - else - kfree(client); + kfree(data); return 0; } diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c index 01c17e387f03..d191118ba0cb 100644 --- a/drivers/hwmon/atxp1.c +++ b/drivers/hwmon/atxp1.c @@ -46,21 +46,32 @@ static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; I2C_CLIENT_INSMOD_1(atxp1); -static int atxp1_attach_adapter(struct i2c_adapter * adapter); -static int atxp1_detach_client(struct i2c_client * client); +static int atxp1_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int atxp1_remove(struct i2c_client *client); static struct atxp1_data * atxp1_update_device(struct device *dev); -static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind); +static int atxp1_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); + +static const struct i2c_device_id atxp1_id[] = { + { "atxp1", atxp1 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, atxp1_id); static struct i2c_driver atxp1_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "atxp1", }, - .attach_adapter = atxp1_attach_adapter, - .detach_client = atxp1_detach_client, + .probe = atxp1_probe, + .remove = atxp1_remove, + .id_table = atxp1_id, + .detect = atxp1_detect, + .address_data = &addr_data, }; struct atxp1_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; unsigned long last_updated; @@ -263,35 +274,16 @@ static const struct attribute_group atxp1_group = { }; -static int atxp1_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int atxp1_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, &atxp1_detect); -}; + struct i2c_adapter *adapter = new_client->adapter; -static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client * new_client; - struct atxp1_data * data; - int err = 0; u8 temp; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - new_client = &data->client; - i2c_set_clientdata(new_client, data); - - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &atxp1_driver; - new_client->flags = 0; + return -ENODEV; /* Detect ATXP1, checking if vendor ID registers are all zero */ if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) && @@ -305,35 +297,46 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind) if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) && (i2c_smbus_read_byte_data(new_client, 0x11) == temp) )) - goto exit_free; + return -ENODEV; } /* Get VRM */ - data->vrm = vid_which_vrm(); + temp = vid_which_vrm(); - if ((data->vrm != 90) && (data->vrm != 91)) { - dev_err(&new_client->dev, "Not supporting VRM %d.%d\n", - data->vrm / 10, data->vrm % 10); - goto exit_free; + if ((temp != 90) && (temp != 91)) { + dev_err(&adapter->dev, "atxp1: Not supporting VRM %d.%d\n", + temp / 10, temp % 10); + return -ENODEV; } - strncpy(new_client->name, "atxp1", I2C_NAME_SIZE); - - data->valid = 0; + strlcpy(info->type, "atxp1", I2C_NAME_SIZE); - mutex_init(&data->update_lock); + return 0; +} - err = i2c_attach_client(new_client); +static int atxp1_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct atxp1_data *data; + int err; - if (err) - { - dev_err(&new_client->dev, "Attach client error.\n"); - goto exit_free; + data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; } + /* Get VRM */ + data->vrm = vid_which_vrm(); + + i2c_set_clientdata(new_client, data); + data->valid = 0; + + mutex_init(&data->update_lock); + /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &atxp1_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -348,30 +351,22 @@ static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &atxp1_group); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: return err; }; -static int atxp1_detach_client(struct i2c_client * client) +static int atxp1_remove(struct i2c_client *client) { struct atxp1_data * data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &atxp1_group); - err = i2c_detach_client(client); - - if (err) - dev_err(&client->dev, "Failed to detach client.\n"); - else - kfree(data); + kfree(data); - return err; + return 0; }; static int __init atxp1_init(void) diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index 5f300ffed657..7415381601c3 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -72,7 +72,6 @@ static const u8 DS1621_REG_TEMP[3] = { /* Each client has this additional data */ struct ds1621_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ @@ -82,20 +81,32 @@ struct ds1621_data { u8 conf; /* Register encoding, combined */ }; -static int ds1621_attach_adapter(struct i2c_adapter *adapter); -static int ds1621_detect(struct i2c_adapter *adapter, int address, - int kind); +static int ds1621_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int ds1621_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void ds1621_init_client(struct i2c_client *client); -static int ds1621_detach_client(struct i2c_client *client); +static int ds1621_remove(struct i2c_client *client); static struct ds1621_data *ds1621_update_client(struct device *dev); +static const struct i2c_device_id ds1621_id[] = { + { "ds1621", ds1621 }, + { "ds1625", ds1621 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ds1621_id); + /* This is the driver that will be inserted */ static struct i2c_driver ds1621_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "ds1621", }, - .attach_adapter = ds1621_attach_adapter, - .detach_client = ds1621_detach_client, + .probe = ds1621_probe, + .remove = ds1621_remove, + .id_table = ds1621_id, + .detect = ds1621_detect, + .address_data = &addr_data, }; /* All registers are word-sized, except for the configuration register. @@ -199,40 +210,18 @@ static const struct attribute_group ds1621_group = { }; -static int ds1621_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, ds1621_detect); -} - -/* This function is called by i2c_probe */ -static int ds1621_detect(struct i2c_adapter *adapter, int address, - int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int ds1621_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { + struct i2c_adapter *adapter = client->adapter; int conf, temp; - struct i2c_client *client; - struct ds1621_data *data; - int i, err = 0; + int i; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_WRITE_BYTE)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access ds1621_{read,write}_value. */ - if (!(data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &ds1621_driver; + return -ENODEV; /* Now, we do the remaining detection. It is lousy. */ if (kind < 0) { @@ -241,29 +230,41 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address, improbable in our case. */ conf = ds1621_read_value(client, DS1621_REG_CONF); if (conf & DS1621_REG_CONFIG_NVB) - goto exit_free; + return -ENODEV; /* The 7 lowest bits of a temperature should always be 0. */ - for (i = 0; i < ARRAY_SIZE(data->temp); i++) { + for (i = 0; i < ARRAY_SIZE(DS1621_REG_TEMP); i++) { temp = ds1621_read_value(client, DS1621_REG_TEMP[i]); if (temp & 0x007f) - goto exit_free; + return -ENODEV; } } - /* Fill in remaining client fields and put it into the global list */ - strlcpy(client->name, "ds1621", I2C_NAME_SIZE); - mutex_init(&data->update_lock); + strlcpy(info->type, "ds1621", I2C_NAME_SIZE); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; + return 0; +} + +static int ds1621_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ds1621_data *data; + int err; + + data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); /* Initialize the DS1621 chip */ ds1621_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &ds1621_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -275,25 +276,19 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address, exit_remove_files: sysfs_remove_group(&client->dev.kobj, &ds1621_group); - exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int ds1621_detach_client(struct i2c_client *client) +static int ds1621_remove(struct i2c_client *client) { struct ds1621_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &ds1621_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index dc1f30e432ea..1692de369969 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -87,7 +87,6 @@ I2C_CLIENT_INSMOD_2(f75373, f75375); struct f75375_data { unsigned short addr; - struct i2c_client *client; struct device *hwmon_dev; const char *name; @@ -114,21 +113,12 @@ struct f75375_data { s8 temp_max_hyst[2]; }; -static int f75375_attach_adapter(struct i2c_adapter *adapter); -static int f75375_detect(struct i2c_adapter *adapter, int address, int kind); -static int f75375_detach_client(struct i2c_client *client); +static int f75375_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static int f75375_probe(struct i2c_client *client, const struct i2c_device_id *id); static int f75375_remove(struct i2c_client *client); -static struct i2c_driver f75375_legacy_driver = { - .driver = { - .name = "f75375_legacy", - }, - .attach_adapter = f75375_attach_adapter, - .detach_client = f75375_detach_client, -}; - static const struct i2c_device_id f75375_id[] = { { "f75373", f75373 }, { "f75375", f75375 }, @@ -137,12 +127,15 @@ static const struct i2c_device_id f75375_id[] = { MODULE_DEVICE_TABLE(i2c, f75375_id); static struct i2c_driver f75375_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "f75375", }, .probe = f75375_probe, .remove = f75375_remove, .id_table = f75375_id, + .detect = f75375_detect, + .address_data = &addr_data, }; static inline int f75375_read8(struct i2c_client *client, u8 reg) @@ -607,22 +600,6 @@ static const struct attribute_group f75375_group = { .attrs = f75375_attributes, }; -static int f75375_detach_client(struct i2c_client *client) -{ - int err; - - f75375_remove(client); - err = i2c_detach_client(client); - if (err) { - dev_err(&client->dev, - "Client deregistration failed, " - "client not detached.\n"); - return err; - } - kfree(client); - return 0; -} - static void f75375_init(struct i2c_client *client, struct f75375_data *data, struct f75375s_platform_data *f75375s_pdata) { @@ -651,7 +628,6 @@ static int f75375_probe(struct i2c_client *client, return -ENOMEM; i2c_set_clientdata(client, data); - data->client = client; mutex_init(&data->update_lock); data->kind = id->driver_data; @@ -700,29 +676,13 @@ static int f75375_remove(struct i2c_client *client) return 0; } -static int f75375_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, f75375_detect); -} - -/* This function is called by i2c_probe */ -static int f75375_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int f75375_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; + struct i2c_adapter *adapter = client->adapter; u8 version = 0; - int err = 0; const char *name = ""; - struct i2c_device_id id; - - if (!(client = kzalloc(sizeof(*client), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - client->addr = address; - client->adapter = adapter; - client->driver = &f75375_legacy_driver; if (kind < 0) { u16 vendid = f75375_read16(client, F75375_REG_VENDOR); @@ -736,7 +696,7 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind) dev_err(&adapter->dev, "failed,%02X,%02X,%02X\n", chipid, version, vendid); - goto exit_free; + return -ENODEV; } } @@ -746,43 +706,18 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind) name = "f75373"; } dev_info(&adapter->dev, "found %s version: %02X\n", name, version); - strlcpy(client->name, name, I2C_NAME_SIZE); - - if ((err = i2c_attach_client(client))) - goto exit_free; - - strlcpy(id.name, name, I2C_NAME_SIZE); - id.driver_data = kind; - if ((err = f75375_probe(client, &id)) < 0) - goto exit_detach; + strlcpy(info->type, name, I2C_NAME_SIZE); return 0; - -exit_detach: - i2c_detach_client(client); -exit_free: - kfree(client); -exit: - return err; } static int __init sensors_f75375_init(void) { - int status; - status = i2c_add_driver(&f75375_driver); - if (status) - return status; - - status = i2c_add_driver(&f75375_legacy_driver); - if (status) - i2c_del_driver(&f75375_driver); - - return status; + return i2c_add_driver(&f75375_driver); } static void __exit sensors_f75375_exit(void) { - i2c_del_driver(&f75375_legacy_driver); i2c_del_driver(&f75375_driver); } diff --git a/drivers/hwmon/fscher.c b/drivers/hwmon/fscher.c index ed26b66e0831..12c70e402cb2 100644 --- a/drivers/hwmon/fscher.c +++ b/drivers/hwmon/fscher.c @@ -106,9 +106,11 @@ I2C_CLIENT_INSMOD_1(fscher); * Functions declaration */ -static int fscher_attach_adapter(struct i2c_adapter *adapter); -static int fscher_detect(struct i2c_adapter *adapter, int address, int kind); -static int fscher_detach_client(struct i2c_client *client); +static int fscher_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int fscher_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int fscher_remove(struct i2c_client *client); static struct fscher_data *fscher_update_device(struct device *dev); static void fscher_init_client(struct i2c_client *client); @@ -119,12 +121,21 @@ static int fscher_write_value(struct i2c_client *client, u8 reg, u8 value); * Driver data (common to all clients) */ +static const struct i2c_device_id fscher_id[] = { + { "fscher", fscher }, + { } +}; + static struct i2c_driver fscher_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "fscher", }, - .attach_adapter = fscher_attach_adapter, - .detach_client = fscher_detach_client, + .probe = fscher_probe, + .remove = fscher_remove, + .id_table = fscher_id, + .detect = fscher_detect, + .address_data = &addr_data, }; /* @@ -132,7 +143,6 @@ static struct i2c_driver fscher_driver = { */ struct fscher_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -283,38 +293,14 @@ static const struct attribute_group fscher_group = { * Real code */ -static int fscher_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, fscher_detect); -} - -static int fscher_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int fscher_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct fscher_data *data; - int err = 0; + struct i2c_adapter *adapter = new_client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - * client structure, even though we cannot fill it completely yet. - * But it allows us to access i2c_smbus_read_byte_data. */ - if (!(data = kzalloc(sizeof(struct fscher_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* The common I2C client data is placed right before the - * Hermes-specific data. */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &fscher_driver; - new_client->flags = 0; + return -ENODEV; /* Do the remaining detection unless force or force_fscher parameter */ if (kind < 0) { @@ -324,24 +310,35 @@ static int fscher_detect(struct i2c_adapter *adapter, int address, int kind) FSCHER_REG_IDENT_1) != 0x45) /* 'E' */ || (i2c_smbus_read_byte_data(new_client, FSCHER_REG_IDENT_2) != 0x52)) /* 'R' */ - goto exit_free; + return -ENODEV; + } + + strlcpy(info->type, "fscher", I2C_NAME_SIZE); + + return 0; +} + +static int fscher_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct fscher_data *data; + int err; + + data = kzalloc(sizeof(struct fscher_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; } - /* Fill in the remaining client fields and put it into the - * global list */ - strlcpy(new_client->name, "fscher", I2C_NAME_SIZE); + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - fscher_init_client(new_client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &fscher_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -353,25 +350,19 @@ static int fscher_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &fscher_group); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: return err; } -static int fscher_detach_client(struct i2c_client *client) +static int fscher_remove(struct i2c_client *client) { struct fscher_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &fscher_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index bd89d270a5ed..967170368933 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -171,20 +171,37 @@ static const int FSCHMD_NO_TEMP_SENSORS[5] = { 3, 3, 4, 3, 5 }; * Functions declarations */ -static int fschmd_attach_adapter(struct i2c_adapter *adapter); -static int fschmd_detach_client(struct i2c_client *client); +static int fschmd_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int fschmd_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int fschmd_remove(struct i2c_client *client); static struct fschmd_data *fschmd_update_device(struct device *dev); /* * Driver data (common to all clients) */ +static const struct i2c_device_id fschmd_id[] = { + { "fscpos", fscpos }, + { "fscher", fscher }, + { "fscscy", fscscy }, + { "fschrc", fschrc }, + { "fschmd", fschmd }, + { } +}; +MODULE_DEVICE_TABLE(i2c, fschmd_id); + static struct i2c_driver fschmd_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = FSCHMD_NAME, }, - .attach_adapter = fschmd_attach_adapter, - .detach_client = fschmd_detach_client, + .probe = fschmd_probe, + .remove = fschmd_remove, + .id_table = fschmd_id, + .detect = fschmd_detect, + .address_data = &addr_data, }; /* @@ -192,7 +209,6 @@ static struct i2c_driver fschmd_driver = { */ struct fschmd_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; int kind; @@ -269,7 +285,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute v = SENSORS_LIMIT(v, -128, 127) + 128; mutex_lock(&data->update_lock); - i2c_smbus_write_byte_data(&data->client, + i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_TEMP_LIMIT[data->kind][index], v); data->temp_max[index] = v; mutex_unlock(&data->update_lock); @@ -346,14 +362,14 @@ static ssize_t store_fan_div(struct device *dev, struct device_attribute mutex_lock(&data->update_lock); - reg = i2c_smbus_read_byte_data(&data->client, + reg = i2c_smbus_read_byte_data(to_i2c_client(dev), FSCHMD_REG_FAN_RIPPLE[data->kind][index]); /* bits 2..7 reserved => mask with 0x03 */ reg &= ~0x03; reg |= v; - i2c_smbus_write_byte_data(&data->client, + i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_FAN_RIPPLE[data->kind][index], reg); data->fan_ripple[index] = reg; @@ -416,7 +432,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev, mutex_lock(&data->update_lock); - i2c_smbus_write_byte_data(&data->client, + i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_FAN_MIN[data->kind][index], v); data->fan_min[index] = v; @@ -448,14 +464,14 @@ static ssize_t store_alert_led(struct device *dev, mutex_lock(&data->update_lock); - reg = i2c_smbus_read_byte_data(&data->client, FSCHMD_REG_CONTROL); + reg = i2c_smbus_read_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL); if (v) reg |= FSCHMD_CONTROL_ALERT_LED_MASK; else reg &= ~FSCHMD_CONTROL_ALERT_LED_MASK; - i2c_smbus_write_byte_data(&data->client, FSCHMD_REG_CONTROL, reg); + i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL, reg); data->global_control = reg; @@ -600,32 +616,15 @@ static void fschmd_dmi_decode(const struct dmi_header *header) } } -static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind) +static int fschmd_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct fschmd_data *data; - u8 revision; - const char * const names[5] = { "Poseidon", "Hermes", "Scylla", - "Heracles", "Heimdall" }; + struct i2c_adapter *adapter = client->adapter; const char * const client_names[5] = { "fscpos", "fscher", "fscscy", "fschrc", "fschmd" }; - int i, err = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return 0; - - /* OK. For now, we presume we have a valid client. We now create the - * client structure, even though we cannot fill it completely yet. - * But it allows us to access i2c_smbus_read_byte_data. */ - if (!(data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL))) - return -ENOMEM; - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &fschmd_driver; - mutex_init(&data->update_lock); + return -ENODEV; /* Detect & Identify the chip */ if (kind <= 0) { @@ -650,9 +649,31 @@ static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind) else if (!strcmp(id, "HMD")) kind = fschmd; else - goto exit_free; + return -ENODEV; } + strlcpy(info->type, client_names[kind - 1], I2C_NAME_SIZE); + + return 0; +} + +static int fschmd_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct fschmd_data *data; + u8 revision; + const char * const names[5] = { "Poseidon", "Hermes", "Scylla", + "Heracles", "Heimdall" }; + int i, err; + enum chips kind = id->driver_data; + + data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + if (kind == fscpos) { /* The Poseidon has hardwired temp limits, fill these in for the alarm resetting code */ @@ -674,11 +695,6 @@ static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind) /* i2c kind goes from 1-5, we want from 0-4 to address arrays */ data->kind = kind - 1; - strlcpy(client->name, client_names[data->kind], I2C_NAME_SIZE); - - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; for (i = 0; i < ARRAY_SIZE(fschmd_attr); i++) { err = device_create_file(&client->dev, @@ -726,25 +742,14 @@ static int fschmd_detect(struct i2c_adapter *adapter, int address, int kind) return 0; exit_detach: - fschmd_detach_client(client); /* will also free data for us */ - return err; - -exit_free: - kfree(data); + fschmd_remove(client); /* will also free data for us */ return err; } -static int fschmd_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, fschmd_detect); -} - -static int fschmd_detach_client(struct i2c_client *client) +static int fschmd_remove(struct i2c_client *client) { struct fschmd_data *data = i2c_get_clientdata(client); - int i, err; + int i; /* Check if registered in case we're called from fschmd_detect to cleanup after an error */ @@ -760,9 +765,6 @@ static int fschmd_detach_client(struct i2c_client *client) device_remove_file(&client->dev, &fschmd_fan_attr[i].dev_attr); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c index 00f48484e54b..8a7bcf500b4e 100644 --- a/drivers/hwmon/fscpos.c +++ b/drivers/hwmon/fscpos.c @@ -87,9 +87,11 @@ static u8 FSCPOS_REG_TEMP_STATE[] = { 0x71, 0x81, 0x91 }; /* * Functions declaration */ -static int fscpos_attach_adapter(struct i2c_adapter *adapter); -static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind); -static int fscpos_detach_client(struct i2c_client *client); +static int fscpos_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int fscpos_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int fscpos_remove(struct i2c_client *client); static int fscpos_read_value(struct i2c_client *client, u8 reg); static int fscpos_write_value(struct i2c_client *client, u8 reg, u8 value); @@ -101,19 +103,27 @@ static void reset_fan_alarm(struct i2c_client *client, int nr); /* * Driver data (common to all clients) */ +static const struct i2c_device_id fscpos_id[] = { + { "fscpos", fscpos }, + { } +}; + static struct i2c_driver fscpos_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "fscpos", }, - .attach_adapter = fscpos_attach_adapter, - .detach_client = fscpos_detach_client, + .probe = fscpos_probe, + .remove = fscpos_remove, + .id_table = fscpos_id, + .detect = fscpos_detect, + .address_data = &addr_data, }; /* * Client data (each client gets its own) */ struct fscpos_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* 0 until following fields are valid */ @@ -470,39 +480,14 @@ static const struct attribute_group fscpos_group = { .attrs = fscpos_attributes, }; -static int fscpos_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, fscpos_detect); -} - -static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int fscpos_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct fscpos_data *data; - int err = 0; + struct i2c_adapter *adapter = new_client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - /* - * OK. For now, we presume we have a valid client. We now create the - * client structure, even though we cannot fill it completely yet. - * But it allows us to access fscpos_{read,write}_value. - */ - - if (!(data = kzalloc(sizeof(struct fscpos_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &fscpos_driver; - new_client->flags = 0; + return -ENODEV; /* Do the remaining detection unless force or force_fscpos parameter */ if (kind < 0) { @@ -512,22 +497,30 @@ static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind) != 0x45) /* 'E' */ || (fscpos_read_value(new_client, FSCPOS_REG_IDENT_2) != 0x47))/* 'G' */ - { - dev_dbg(&new_client->dev, "fscpos detection failed\n"); - goto exit_free; - } + return -ENODEV; } - /* Fill in the remaining client fields and put it in the global list */ - strlcpy(new_client->name, "fscpos", I2C_NAME_SIZE); + strlcpy(info->type, "fscpos", I2C_NAME_SIZE); + return 0; +} + +static int fscpos_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct fscpos_data *data; + int err; + + data = kzalloc(sizeof(struct fscpos_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* Inizialize the fscpos chip */ fscpos_init_client(new_client); @@ -536,7 +529,7 @@ static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind) /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &fscpos_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -548,24 +541,19 @@ static int fscpos_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &fscpos_group); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: return err; } -static int fscpos_detach_client(struct i2c_client *client) +static int fscpos_remove(struct i2c_client *client) { struct fscpos_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &fscpos_group); - if ((err = i2c_detach_client(client))) - return err; kfree(data); return 0; } diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c index 33e9e8a8d1ce..7820df45d77a 100644 --- a/drivers/hwmon/gl518sm.c +++ b/drivers/hwmon/gl518sm.c @@ -114,7 +114,6 @@ static inline u8 FAN_TO_REG(long rpm, int div) /* Each client has this additional data */ struct gl518_data { - struct i2c_client client; struct device *hwmon_dev; enum chips type; @@ -138,21 +137,33 @@ struct gl518_data { u8 beep_enable; /* Boolean */ }; -static int gl518_attach_adapter(struct i2c_adapter *adapter); -static int gl518_detect(struct i2c_adapter *adapter, int address, int kind); +static int gl518_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int gl518_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void gl518_init_client(struct i2c_client *client); -static int gl518_detach_client(struct i2c_client *client); +static int gl518_remove(struct i2c_client *client); static int gl518_read_value(struct i2c_client *client, u8 reg); static int gl518_write_value(struct i2c_client *client, u8 reg, u16 value); static struct gl518_data *gl518_update_device(struct device *dev); +static const struct i2c_device_id gl518_id[] = { + { "gl518sm", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, gl518_id); + /* This is the driver that will be inserted */ static struct i2c_driver gl518_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "gl518sm", }, - .attach_adapter = gl518_attach_adapter, - .detach_client = gl518_detach_client, + .probe = gl518_probe, + .remove = gl518_remove, + .id_table = gl518_id, + .detect = gl518_detect, + .address_data = &addr_data, }; /* @@ -472,46 +483,23 @@ static const struct attribute_group gl518_group_r80 = { * Real code */ -static int gl518_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, gl518_detect); -} - -static int gl518_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int gl518_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { + struct i2c_adapter *adapter = client->adapter; int i; - struct i2c_client *client; - struct gl518_data *data; - int err = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access gl518_{read,write}_value. */ - - if (!(data = kzalloc(sizeof(struct gl518_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - - client->addr = address; - client->adapter = adapter; - client->driver = &gl518_driver; + return -ENODEV; /* Now, we do the remaining detection. */ if (kind < 0) { if ((gl518_read_value(client, GL518_REG_CHIP_ID) != 0x80) || (gl518_read_value(client, GL518_REG_CONF) & 0x80)) - goto exit_free; + return -ENODEV; } /* Determine the chip type. */ @@ -526,19 +514,32 @@ static int gl518_detect(struct i2c_adapter *adapter, int address, int kind) dev_info(&adapter->dev, "Ignoring 'force' parameter for unknown " "chip at adapter %d, address 0x%02x\n", - i2c_adapter_id(adapter), address); - goto exit_free; + i2c_adapter_id(adapter), client->addr); + return -ENODEV; } } - /* Fill in the remaining client fields */ - strlcpy(client->name, "gl518sm", I2C_NAME_SIZE); - data->type = kind; - mutex_init(&data->update_lock); + strlcpy(info->type, "gl518sm", I2C_NAME_SIZE); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; + return 0; +} + +static int gl518_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct gl518_data *data; + int err, revision; + + data = kzalloc(sizeof(struct gl518_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + revision = gl518_read_value(client, GL518_REG_REVISION); + data->type = revision == 0x80 ? gl518sm_r80 : gl518sm_r00; + mutex_init(&data->update_lock); /* Initialize the GL518SM chip */ data->alarm_mask = 0xff; @@ -546,7 +547,7 @@ static int gl518_detect(struct i2c_adapter *adapter, int address, int kind) /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &gl518_group))) - goto exit_detach; + goto exit_free; if (data->type == gl518sm_r80) if ((err = sysfs_create_group(&client->dev.kobj, &gl518_group_r80))) @@ -564,8 +565,6 @@ exit_remove_files: sysfs_remove_group(&client->dev.kobj, &gl518_group); if (data->type == gl518sm_r80) sysfs_remove_group(&client->dev.kobj, &gl518_group_r80); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: @@ -591,19 +590,15 @@ static void gl518_init_client(struct i2c_client *client) gl518_write_value(client, GL518_REG_CONF, 0x40 | regvalue); } -static int gl518_detach_client(struct i2c_client *client) +static int gl518_remove(struct i2c_client *client) { struct gl518_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &gl518_group); if (data->type == gl518sm_r80) sysfs_remove_group(&client->dev.kobj, &gl518_group_r80); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c index 8984ef141627..19616f2242b0 100644 --- a/drivers/hwmon/gl520sm.c +++ b/drivers/hwmon/gl520sm.c @@ -79,26 +79,37 @@ static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 }; * Function declarations */ -static int gl520_attach_adapter(struct i2c_adapter *adapter); -static int gl520_detect(struct i2c_adapter *adapter, int address, int kind); +static int gl520_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int gl520_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void gl520_init_client(struct i2c_client *client); -static int gl520_detach_client(struct i2c_client *client); +static int gl520_remove(struct i2c_client *client); static int gl520_read_value(struct i2c_client *client, u8 reg); static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value); static struct gl520_data *gl520_update_device(struct device *dev); /* Driver data */ +static const struct i2c_device_id gl520_id[] = { + { "gl520sm", gl520sm }, + { } +}; +MODULE_DEVICE_TABLE(i2c, gl520_id); + static struct i2c_driver gl520_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "gl520sm", }, - .attach_adapter = gl520_attach_adapter, - .detach_client = gl520_detach_client, + .probe = gl520_probe, + .remove = gl520_remove, + .id_table = gl520_id, + .detect = gl520_detect, + .address_data = &addr_data, }; /* Client data */ struct gl520_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until the following fields are valid */ @@ -669,37 +680,15 @@ static const struct attribute_group gl520_group_opt = { * Real code */ -static int gl520_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, gl520_detect); -} - -static int gl520_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int gl520_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct gl520_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access gl520_{read,write}_value. */ - - if (!(data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &gl520_driver; + return -ENODEV; /* Determine the chip type. */ if (kind < 0) { @@ -707,24 +696,36 @@ static int gl520_detect(struct i2c_adapter *adapter, int address, int kind) ((gl520_read_value(client, GL520_REG_REVISION) & 0x7f) != 0x00) || ((gl520_read_value(client, GL520_REG_CONF) & 0x80) != 0x00)) { dev_dbg(&client->dev, "Unknown chip type, skipping\n"); - goto exit_free; + return -ENODEV; } } - /* Fill in the remaining client fields */ - strlcpy(client->name, "gl520sm", I2C_NAME_SIZE); - mutex_init(&data->update_lock); + strlcpy(info->type, "gl520sm", I2C_NAME_SIZE); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; + return 0; +} + +static int gl520_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct gl520_data *data; + int err; + + data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); /* Initialize the GL520SM chip */ gl520_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &gl520_group))) - goto exit_detach; + goto exit_free; if (data->two_temps) { if ((err = device_create_file(&client->dev, @@ -764,8 +765,6 @@ static int gl520_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&client->dev.kobj, &gl520_group); sysfs_remove_group(&client->dev.kobj, &gl520_group_opt); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: @@ -811,18 +810,14 @@ static void gl520_init_client(struct i2c_client *client) gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask); } -static int gl520_detach_client(struct i2c_client *client) +static int gl520_remove(struct i2c_client *client) { struct gl520_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &gl520_group); sysfs_remove_group(&client->dev.kobj, &gl520_group_opt); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 116287008083..3195a265f0e9 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -1,7 +1,7 @@ /* * lm63.c - driver for the National Semiconductor LM63 temperature sensor * with integrated fan control - * Copyright (C) 2004-2006 Jean Delvare <khali@linux-fr.org> + * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org> * Based on the lm90 driver. * * The LM63 is a sensor chip made by National Semiconductor. It measures @@ -128,24 +128,36 @@ I2C_CLIENT_INSMOD_1(lm63); * Functions declaration */ -static int lm63_attach_adapter(struct i2c_adapter *adapter); -static int lm63_detach_client(struct i2c_client *client); +static int lm63_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int lm63_remove(struct i2c_client *client); static struct lm63_data *lm63_update_device(struct device *dev); -static int lm63_detect(struct i2c_adapter *adapter, int address, int kind); +static int lm63_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void lm63_init_client(struct i2c_client *client); /* * Driver data (common to all clients) */ +static const struct i2c_device_id lm63_id[] = { + { "lm63", lm63 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm63_id); + static struct i2c_driver lm63_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm63", }, - .attach_adapter = lm63_attach_adapter, - .detach_client = lm63_detach_client, + .probe = lm63_probe, + .remove = lm63_remove, + .id_table = lm63_id, + .detect = lm63_detect, + .address_data = &addr_data, }; /* @@ -153,7 +165,6 @@ static struct i2c_driver lm63_driver = { */ struct lm63_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -411,43 +422,14 @@ static const struct attribute_group lm63_group_fan1 = { * Real code */ -static int lm63_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm63_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, lm63_detect); -} - -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int lm63_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *new_client; - struct lm63_data *data; - int err = 0; + struct i2c_adapter *adapter = new_client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct lm63_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* The common I2C client data is placed right before the - LM63-specific data. */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &lm63_driver; - new_client->flags = 0; - - /* Default to an LM63 if forced */ - if (kind == 0) - kind = lm63; + return -ENODEV; if (kind < 0) { /* must identify */ u8 man_id, chip_id, reg_config1, reg_config2; @@ -477,25 +459,38 @@ static int lm63_detect(struct i2c_adapter *adapter, int address, int kind) dev_dbg(&adapter->dev, "Unsupported chip " "(man_id=0x%02X, chip_id=0x%02X).\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } - strlcpy(new_client->name, "lm63", I2C_NAME_SIZE); + strlcpy(info->type, "lm63", I2C_NAME_SIZE); + + return 0; +} + +static int lm63_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct lm63_data *data; + int err; + + data = kzalloc(sizeof(struct lm63_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* Initialize the LM63 chip */ lm63_init_client(new_client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm63_group))) - goto exit_detach; + goto exit_free; if (data->config & 0x04) { /* tachometer enabled */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm63_group_fan1))) @@ -513,8 +508,6 @@ static int lm63_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &lm63_group); sysfs_remove_group(&new_client->dev.kobj, &lm63_group_fan1); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: @@ -556,18 +549,14 @@ static void lm63_init_client(struct i2c_client *client) (data->config_fan & 0x20) ? "manual" : "auto"); } -static int lm63_detach_client(struct i2c_client *client) +static int lm63_remove(struct i2c_client *client) { struct lm63_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm63_group); sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c index 36d5a8c3ad8c..866b401ab6e8 100644 --- a/drivers/hwmon/lm77.c +++ b/drivers/hwmon/lm77.c @@ -52,7 +52,6 @@ I2C_CLIENT_INSMOD_1(lm77); /* Each client has this additional data */ struct lm77_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; @@ -65,23 +64,35 @@ struct lm77_data { u8 alarms; }; -static int lm77_attach_adapter(struct i2c_adapter *adapter); -static int lm77_detect(struct i2c_adapter *adapter, int address, int kind); +static int lm77_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int lm77_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void lm77_init_client(struct i2c_client *client); -static int lm77_detach_client(struct i2c_client *client); +static int lm77_remove(struct i2c_client *client); static u16 lm77_read_value(struct i2c_client *client, u8 reg); static int lm77_write_value(struct i2c_client *client, u8 reg, u16 value); static struct lm77_data *lm77_update_device(struct device *dev); +static const struct i2c_device_id lm77_id[] = { + { "lm77", lm77 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm77_id); + /* This is the driver that will be inserted */ static struct i2c_driver lm77_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm77", }, - .attach_adapter = lm77_attach_adapter, - .detach_client = lm77_detach_client, + .probe = lm77_probe, + .remove = lm77_remove, + .id_table = lm77_id, + .detect = lm77_detect, + .address_data = &addr_data, }; /* straight from the datasheet */ @@ -215,13 +226,6 @@ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1); -static int lm77_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, lm77_detect); -} - static struct attribute *lm77_attributes[] = { &dev_attr_temp1_input.attr, &dev_attr_temp1_crit.attr, @@ -240,32 +244,15 @@ static const struct attribute_group lm77_group = { .attrs = lm77_attributes, }; -/* This function is called by i2c_probe */ -static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm77_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct lm77_data *data; - int err = 0; - const char *name = ""; + struct i2c_adapter *adapter = new_client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access lm77_{read,write}_value. */ - if (!(data = kzalloc(sizeof(struct lm77_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &lm77_driver; - new_client->flags = 0; + return -ENODEV; /* Here comes the remaining detection. Since the LM77 has no register dedicated to identification, we have to rely on the @@ -294,7 +281,7 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) || i2c_smbus_read_word_data(new_client, i + 3) != crit || i2c_smbus_read_word_data(new_client, i + 4) != min || i2c_smbus_read_word_data(new_client, i + 5) != max) - goto exit_free; + return -ENODEV; /* sign bits */ if (((cur & 0x00f0) != 0xf0 && (cur & 0x00f0) != 0x0) @@ -302,51 +289,55 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) || ((crit & 0x00f0) != 0xf0 && (crit & 0x00f0) != 0x0) || ((min & 0x00f0) != 0xf0 && (min & 0x00f0) != 0x0) || ((max & 0x00f0) != 0xf0 && (max & 0x00f0) != 0x0)) - goto exit_free; + return -ENODEV; /* unused bits */ if (conf & 0xe0) - goto exit_free; + return -ENODEV; /* 0x06 and 0x07 return the last read value */ cur = i2c_smbus_read_word_data(new_client, 0); if (i2c_smbus_read_word_data(new_client, 6) != cur || i2c_smbus_read_word_data(new_client, 7) != cur) - goto exit_free; + return -ENODEV; hyst = i2c_smbus_read_word_data(new_client, 2); if (i2c_smbus_read_word_data(new_client, 6) != hyst || i2c_smbus_read_word_data(new_client, 7) != hyst) - goto exit_free; + return -ENODEV; min = i2c_smbus_read_word_data(new_client, 4); if (i2c_smbus_read_word_data(new_client, 6) != min || i2c_smbus_read_word_data(new_client, 7) != min) - goto exit_free; + return -ENODEV; } - /* Determine the chip type - only one kind supported! */ - if (kind <= 0) - kind = lm77; + strlcpy(info->type, "lm77", I2C_NAME_SIZE); - if (kind == lm77) { - name = "lm77"; + return 0; +} + +static int lm77_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct lm77_data *data; + int err; + + data = kzalloc(sizeof(struct lm77_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; } - /* Fill in the remaining client fields and put it into the global list */ - strlcpy(new_client->name, name, I2C_NAME_SIZE); + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* Initialize the LM77 chip */ lm77_init_client(new_client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm77_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -358,20 +349,17 @@ static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&new_client->dev.kobj, &lm77_group); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: return err; } -static int lm77_detach_client(struct i2c_client *client) +static int lm77_remove(struct i2c_client *client) { struct lm77_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm77_group); - i2c_detach_client(client); kfree(data); return 0; } diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index 26c91c9d4769..bcffc1899403 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -108,7 +108,6 @@ static inline long TEMP_FROM_REG(u16 temp) */ struct lm80_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ @@ -132,10 +131,12 @@ struct lm80_data { * Functions declaration */ -static int lm80_attach_adapter(struct i2c_adapter *adapter); -static int lm80_detect(struct i2c_adapter *adapter, int address, int kind); +static int lm80_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int lm80_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void lm80_init_client(struct i2c_client *client); -static int lm80_detach_client(struct i2c_client *client); +static int lm80_remove(struct i2c_client *client); static struct lm80_data *lm80_update_device(struct device *dev); static int lm80_read_value(struct i2c_client *client, u8 reg); static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value); @@ -144,12 +145,22 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value); * Driver data (common to all clients) */ +static const struct i2c_device_id lm80_id[] = { + { "lm80", lm80 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm80_id); + static struct i2c_driver lm80_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm80", }, - .attach_adapter = lm80_attach_adapter, - .detach_client = lm80_detach_client, + .probe = lm80_probe, + .remove = lm80_remove, + .id_table = lm80_id, + .detect = lm80_detect, + .address_data = &addr_data, }; /* @@ -383,13 +394,6 @@ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 13); * Real code */ -static int lm80_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, lm80_detect); -} - static struct attribute *lm80_attributes[] = { &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, @@ -442,53 +446,46 @@ static const struct attribute_group lm80_group = { .attrs = lm80_attributes, }; -static int lm80_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm80_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { + struct i2c_adapter *adapter = client->adapter; int i, cur; - struct i2c_client *client; - struct lm80_data *data; - int err = 0; - const char *name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access lm80_{read,write}_value. */ - if (!(data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &lm80_driver; + return -ENODEV; /* Now, we do the remaining detection. It is lousy. */ if (lm80_read_value(client, LM80_REG_ALARM2) & 0xc0) - goto error_free; + return -ENODEV; for (i = 0x2a; i <= 0x3d; i++) { cur = i2c_smbus_read_byte_data(client, i); if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur) || (i2c_smbus_read_byte_data(client, i + 0x80) != cur) || (i2c_smbus_read_byte_data(client, i + 0xc0) != cur)) - goto error_free; + return -ENODEV; } - /* Determine the chip type - only one kind supported! */ - kind = lm80; - name = "lm80"; + strlcpy(info->type, "lm80", I2C_NAME_SIZE); - /* Fill in the remaining client fields */ - strlcpy(client->name, name, I2C_NAME_SIZE); - mutex_init(&data->update_lock); + return 0; +} - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto error_free; +static int lm80_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lm80_data *data; + int err; + + data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); /* Initialize the LM80 chip */ lm80_init_client(client); @@ -499,7 +496,7 @@ static int lm80_detect(struct i2c_adapter *adapter, int address, int kind) /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group))) - goto error_detach; + goto error_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -511,23 +508,18 @@ static int lm80_detect(struct i2c_adapter *adapter, int address, int kind) error_remove: sysfs_remove_group(&client->dev.kobj, &lm80_group); -error_detach: - i2c_detach_client(client); error_free: kfree(data); exit: return err; } -static int lm80_detach_client(struct i2c_client *client) +static int lm80_remove(struct i2c_client *client) { struct lm80_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm80_group); - if ((err = i2c_detach_client(client))) - return err; kfree(data); return 0; diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c index 6a8642fa25fb..e59e2d1f080c 100644 --- a/drivers/hwmon/lm83.c +++ b/drivers/hwmon/lm83.c @@ -1,7 +1,7 @@ /* * lm83.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring - * Copyright (C) 2003-2006 Jean Delvare <khali@linux-fr.org> + * Copyright (C) 2003-2008 Jean Delvare <khali@linux-fr.org> * * Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is * a sensor chip made by National Semiconductor. It reports up to four @@ -118,21 +118,34 @@ static const u8 LM83_REG_W_HIGH[] = { * Functions declaration */ -static int lm83_attach_adapter(struct i2c_adapter *adapter); -static int lm83_detect(struct i2c_adapter *adapter, int address, int kind); -static int lm83_detach_client(struct i2c_client *client); +static int lm83_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info); +static int lm83_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int lm83_remove(struct i2c_client *client); static struct lm83_data *lm83_update_device(struct device *dev); /* * Driver data (common to all clients) */ +static const struct i2c_device_id lm83_id[] = { + { "lm83", lm83 }, + { "lm82", lm82 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm83_id); + static struct i2c_driver lm83_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm83", }, - .attach_adapter = lm83_attach_adapter, - .detach_client = lm83_detach_client, + .probe = lm83_probe, + .remove = lm83_remove, + .id_table = lm83_id, + .detect = lm83_detect, + .address_data = &addr_data, }; /* @@ -140,7 +153,6 @@ static struct i2c_driver lm83_driver = { */ struct lm83_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -278,40 +290,15 @@ static const struct attribute_group lm83_group_opt = { * Real code */ -static int lm83_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm83_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, lm83_detect); -} - -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int lm83_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *new_client; - struct lm83_data *data; - int err = 0; + struct i2c_adapter *adapter = new_client->adapter; const char *name = ""; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* The common I2C client data is placed right after the - * LM83-specific data. */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &lm83_driver; - new_client->flags = 0; + return -ENODEV; /* Now we do the detection and identification. A negative kind * means that the driver was loaded with no force parameter @@ -335,8 +322,9 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind) ((i2c_smbus_read_byte_data(new_client, LM83_REG_R_CONFIG) & 0x41) != 0x00)) { dev_dbg(&adapter->dev, - "LM83 detection failed at 0x%02x.\n", address); - goto exit_free; + "LM83 detection failed at 0x%02x.\n", + new_client->addr); + return -ENODEV; } } @@ -361,7 +349,7 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind) dev_info(&adapter->dev, "Unsupported chip (man_id=0x%02X, " "chip_id=0x%02X).\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } @@ -372,15 +360,27 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind) name = "lm82"; } - /* We can fill in the remaining client fields */ - strlcpy(new_client->name, name, I2C_NAME_SIZE); + strlcpy(info->type, name, I2C_NAME_SIZE); + + return 0; +} + +static int lm83_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct lm83_data *data; + int err; + + data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* * Register sysfs hooks * The LM82 can only monitor one external diode which is @@ -389,9 +389,9 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind) */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm83_group))) - goto exit_detach; + goto exit_free; - if (kind == lm83) { + if (id->driver_data == lm83) { if ((err = sysfs_create_group(&new_client->dev.kobj, &lm83_group_opt))) goto exit_remove_files; @@ -408,26 +408,20 @@ static int lm83_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &lm83_group); sysfs_remove_group(&new_client->dev.kobj, &lm83_group_opt); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: return err; } -static int lm83_detach_client(struct i2c_client *client) +static int lm83_remove(struct i2c_client *client) { struct lm83_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm83_group); sysfs_remove_group(&client->dev.kobj, &lm83_group_opt); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c index e1c183f0aae0..21970f0d53a1 100644 --- a/drivers/hwmon/lm87.c +++ b/drivers/hwmon/lm87.c @@ -5,7 +5,7 @@ * Philip Edelbrock <phil@netroedge.com> * Stephen Rousset <stephen.rousset@rocketlogix.com> * Dan Eaton <dan.eaton@rocketlogix.com> - * Copyright (C) 2004,2007 Jean Delvare <khali@linux-fr.org> + * Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org> * * Original port to Linux 2.6 by Jeff Oliver. * @@ -157,22 +157,35 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C }; * Functions declaration */ -static int lm87_attach_adapter(struct i2c_adapter *adapter); -static int lm87_detect(struct i2c_adapter *adapter, int address, int kind); +static int lm87_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int lm87_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info); static void lm87_init_client(struct i2c_client *client); -static int lm87_detach_client(struct i2c_client *client); +static int lm87_remove(struct i2c_client *client); static struct lm87_data *lm87_update_device(struct device *dev); /* * Driver data (common to all clients) */ +static const struct i2c_device_id lm87_id[] = { + { "lm87", lm87 }, + { "adm1024", adm1024 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm87_id); + static struct i2c_driver lm87_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm87", }, - .attach_adapter = lm87_attach_adapter, - .detach_client = lm87_detach_client, + .probe = lm87_probe, + .remove = lm87_remove, + .id_table = lm87_id, + .detect = lm87_detect, + .address_data = &addr_data, }; /* @@ -180,7 +193,6 @@ static struct i2c_driver lm87_driver = { */ struct lm87_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -562,13 +574,6 @@ static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15); * Real code */ -static int lm87_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, lm87_detect); -} - static struct attribute *lm87_attributes[] = { &dev_attr_in1_input.attr, &dev_attr_in1_min.attr, @@ -656,33 +661,15 @@ static const struct attribute_group lm87_group_opt = { .attrs = lm87_attributes_opt, }; -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int lm87_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm87_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct lm87_data *data; - int err = 0; + struct i2c_adapter *adapter = new_client->adapter; static const char *names[] = { "lm87", "adm1024" }; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct lm87_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* The common I2C client data is placed right before the - LM87-specific data. */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &lm87_driver; - new_client->flags = 0; + return -ENODEV; /* Default to an LM87 if forced */ if (kind == 0) @@ -704,20 +691,32 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind) || (lm87_read_value(new_client, LM87_REG_CONFIG) & 0x80)) { dev_dbg(&adapter->dev, "LM87 detection failed at 0x%02x.\n", - address); - goto exit_free; + new_client->addr); + return -ENODEV; } } - /* We can fill in the remaining client fields */ - strlcpy(new_client->name, names[kind - 1], I2C_NAME_SIZE); + strlcpy(info->type, names[kind - 1], I2C_NAME_SIZE); + + return 0; +} + +static int lm87_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct lm87_data *data; + int err; + + data = kzalloc(sizeof(struct lm87_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* Initialize the LM87 chip */ lm87_init_client(new_client); @@ -732,7 +731,7 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind) /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm87_group))) - goto exit_detach; + goto exit_free; if (data->channel & CHAN_NO_FAN(0)) { if ((err = device_create_file(&new_client->dev, @@ -832,8 +831,6 @@ static int lm87_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&new_client->dev.kobj, &lm87_group); sysfs_remove_group(&new_client->dev.kobj, &lm87_group_opt); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: @@ -877,18 +874,14 @@ static void lm87_init_client(struct i2c_client *client) } } -static int lm87_detach_client(struct i2c_client *client) +static int lm87_remove(struct i2c_client *client) { struct lm87_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm87_group); sysfs_remove_group(&client->dev.kobj, &lm87_group_opt); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index d1a3da3dd8e0..c24fe36ac787 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -187,23 +187,44 @@ I2C_CLIENT_INSMOD_7(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680); * Functions declaration */ -static int lm90_attach_adapter(struct i2c_adapter *adapter); -static int lm90_detect(struct i2c_adapter *adapter, int address, - int kind); +static int lm90_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int lm90_probe(struct i2c_client *client, + const struct i2c_device_id *id); static void lm90_init_client(struct i2c_client *client); -static int lm90_detach_client(struct i2c_client *client); +static int lm90_remove(struct i2c_client *client); static struct lm90_data *lm90_update_device(struct device *dev); /* * Driver data (common to all clients) */ +static const struct i2c_device_id lm90_id[] = { + { "adm1032", adm1032 }, + { "adt7461", adt7461 }, + { "lm90", lm90 }, + { "lm86", lm86 }, + { "lm89", lm99 }, + { "lm99", lm99 }, /* Missing temperature offset */ + { "max6657", max6657 }, + { "max6658", max6657 }, + { "max6659", max6657 }, + { "max6680", max6680 }, + { "max6681", max6680 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm90_id); + static struct i2c_driver lm90_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm90", }, - .attach_adapter = lm90_attach_adapter, - .detach_client = lm90_detach_client, + .probe = lm90_probe, + .remove = lm90_remove, + .id_table = lm90_id, + .detect = lm90_detect, + .address_data = &addr_data, }; /* @@ -211,7 +232,6 @@ static struct i2c_driver lm90_driver = { */ struct lm90_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -477,40 +497,16 @@ static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value) return 0; } -static int lm90_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, lm90_detect); -} - -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm90_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct lm90_data *data; - int err = 0; + struct i2c_adapter *adapter = new_client->adapter; + int address = new_client->addr; const char *name = ""; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct lm90_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* The common I2C client data is placed right before the - LM90-specific data. */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &lm90_driver; - new_client->flags = 0; + return -ENODEV; /* * Now we do the remaining detection. A negative kind means that @@ -538,7 +534,7 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) LM90_REG_R_CONFIG1)) < 0 || (reg_convrate = i2c_smbus_read_byte_data(new_client, LM90_REG_R_CONVRATE)) < 0) - goto exit_free; + return -ENODEV; if ((address == 0x4C || address == 0x4D) && man_id == 0x01) { /* National Semiconductor */ @@ -546,7 +542,7 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) if ((reg_config2 = i2c_smbus_read_byte_data(new_client, LM90_REG_R_CONFIG2)) < 0) - goto exit_free; + return -ENODEV; if ((reg_config1 & 0x2A) == 0x00 && (reg_config2 & 0xF8) == 0x00 @@ -610,10 +606,11 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) dev_info(&adapter->dev, "Unsupported chip (man_id=0x%02X, " "chip_id=0x%02X).\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } + /* Fill the i2c board info */ if (kind == lm90) { name = "lm90"; } else if (kind == adm1032) { @@ -621,7 +618,7 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) /* The ADM1032 supports PEC, but only if combined transactions are not used. */ if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) - new_client->flags |= I2C_CLIENT_PEC; + info->flags |= I2C_CLIENT_PEC; } else if (kind == lm99) { name = "lm99"; } else if (kind == lm86) { @@ -633,23 +630,39 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) } else if (kind == adt7461) { name = "adt7461"; } + strlcpy(info->type, name, I2C_NAME_SIZE); + + return 0; +} + +static int lm90_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(new_client->dev.parent); + struct lm90_data *data; + int err; - /* We can fill in the remaining client fields */ - strlcpy(new_client->name, name, I2C_NAME_SIZE); - data->valid = 0; - data->kind = kind; + data = kzalloc(sizeof(struct lm90_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + i2c_set_clientdata(new_client, data); mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; + /* Set the device type */ + data->kind = id->driver_data; + if (data->kind == adm1032) { + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) + new_client->flags &= ~I2C_CLIENT_PEC; + } /* Initialize the LM90 chip */ lm90_init_client(new_client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm90_group))) - goto exit_detach; + goto exit_free; if (new_client->flags & I2C_CLIENT_PEC) { if ((err = device_create_file(&new_client->dev, &dev_attr_pec))) @@ -672,8 +685,6 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &lm90_group); device_remove_file(&new_client->dev, &dev_attr_pec); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: @@ -710,10 +721,9 @@ static void lm90_init_client(struct i2c_client *client) i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config); } -static int lm90_detach_client(struct i2c_client *client) +static int lm90_remove(struct i2c_client *client) { struct lm90_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm90_group); @@ -722,9 +732,6 @@ static int lm90_detach_client(struct i2c_client *client) device_remove_file(&client->dev, &sensor_dev_attr_temp2_offset.dev_attr); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index c31942e08246..b2e00c5a7eec 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -1,6 +1,6 @@ /* * lm92 - Hardware monitoring driver - * Copyright (C) 2005 Jean Delvare <khali@linux-fr.org> + * Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org> * * Based on the lm90 driver, with some ideas taken from the lm_sensors * lm92 driver as well. @@ -96,7 +96,6 @@ static struct i2c_driver lm92_driver; /* Client data (each client gets its own) */ struct lm92_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -319,32 +318,15 @@ static const struct attribute_group lm92_group = { .attrs = lm92_attributes, }; -/* The following function does more than just detection. If detection - succeeds, it also registers the new chip. */ -static int lm92_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm92_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct lm92_data *data; - int err = 0; - char *name; + struct i2c_adapter *adapter = new_client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* Fill in enough client fields so that we can read from the chip, - which is required for identication */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &lm92_driver; - new_client->flags = 0; + return -ENODEV; /* A negative kind means that the driver was loaded with no force parameter (default), so we must identify the chip. */ @@ -364,34 +346,36 @@ static int lm92_detect(struct i2c_adapter *adapter, int address, int kind) kind = lm92; /* No separate prefix */ } else - goto exit_free; - } else - if (kind == 0) /* Default to an LM92 if forced */ - kind = lm92; - - /* Give it the proper name */ - if (kind == lm92) { - name = "lm92"; - } else { /* Supposedly cannot happen */ - dev_dbg(&new_client->dev, "Kind out of range?\n"); - goto exit_free; + return -ENODEV; } - /* Fill in the remaining client fields */ - strlcpy(new_client->name, name, I2C_NAME_SIZE); + strlcpy(info->type, "lm92", I2C_NAME_SIZE); + + return 0; +} + +static int lm92_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct lm92_data *data; + int err; + + data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the i2c subsystem a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* Initialize the chipset */ lm92_init_client(new_client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &lm92_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -403,32 +387,19 @@ static int lm92_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&new_client->dev.kobj, &lm92_group); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: return err; } -static int lm92_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, lm92_detect); -} - -static int lm92_detach_client(struct i2c_client *client) +static int lm92_remove(struct i2c_client *client) { struct lm92_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm92_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } @@ -438,12 +409,23 @@ static int lm92_detach_client(struct i2c_client *client) * Module and driver stuff */ +static const struct i2c_device_id lm92_id[] = { + { "lm92", lm92 }, + /* max6635 could be added here */ + { } +}; +MODULE_DEVICE_TABLE(i2c, lm92_id); + static struct i2c_driver lm92_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm92", }, - .attach_adapter = lm92_attach_adapter, - .detach_client = lm92_detach_client, + .probe = lm92_probe, + .remove = lm92_remove, + .id_table = lm92_id, + .detect = lm92_detect, + .address_data = &addr_data, }; static int __init sensors_lm92_init(void) diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index 5e678f5c883d..fc36cadf36fb 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c @@ -200,7 +200,6 @@ struct block1_t { * Client-specific data */ struct lm93_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; @@ -2501,45 +2500,14 @@ static void lm93_init_client(struct i2c_client *client) "chip to signal ready!\n"); } -static int lm93_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int lm93_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct lm93_data *data; - struct i2c_client *client; - - int err = -ENODEV, func; - void (*update)(struct lm93_data *, struct i2c_client *); - - /* choose update routine based on bus capabilities */ - func = i2c_get_functionality(adapter); - if ( ((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) && - (!disable_block) ) { - dev_dbg(&adapter->dev,"using SMBus block data transactions\n"); - update = lm93_update_client_full; - } else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) { - dev_dbg(&adapter->dev,"disabled SMBus block data " - "transactions\n"); - update = lm93_update_client_min; - } else { - dev_dbg(&adapter->dev,"detect failed, " - "smbus byte and/or word data not supported!\n"); - goto err_out; - } + struct i2c_adapter *adapter = client->adapter; - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access lm78_{read,write}_value. */ - - if ( !(data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL))) { - dev_dbg(&adapter->dev,"out of memory!\n"); - err = -ENOMEM; - goto err_out; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &lm93_driver; + if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN)) + return -ENODEV; /* detection */ if (kind < 0) { @@ -2548,7 +2516,7 @@ static int lm93_detect(struct i2c_adapter *adapter, int address, int kind) if (mfr != 0x01) { dev_dbg(&adapter->dev,"detect failed, " "bad manufacturer id 0x%02x!\n", mfr); - goto err_free; + return -ENODEV; } } @@ -2563,31 +2531,61 @@ static int lm93_detect(struct i2c_adapter *adapter, int address, int kind) if (kind == 0) dev_dbg(&adapter->dev, "(ignored 'force' parameter)\n"); - goto err_free; + return -ENODEV; } } - /* fill in remaining client fields */ - strlcpy(client->name, "lm93", I2C_NAME_SIZE); + strlcpy(info->type, "lm93", I2C_NAME_SIZE); dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n", client->name, i2c_adapter_id(client->adapter), client->addr); + return 0; +} + +static int lm93_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lm93_data *data; + int err, func; + void (*update)(struct lm93_data *, struct i2c_client *); + + /* choose update routine based on bus capabilities */ + func = i2c_get_functionality(client->adapter); + if (((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) && + (!disable_block)) { + dev_dbg(&client->dev, "using SMBus block data transactions\n"); + update = lm93_update_client_full; + } else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) { + dev_dbg(&client->dev, "disabled SMBus block data " + "transactions\n"); + update = lm93_update_client_min; + } else { + dev_dbg(&client->dev, "detect failed, " + "smbus byte and/or word data not supported!\n"); + err = -ENODEV; + goto err_out; + } + + data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL); + if (!data) { + dev_dbg(&client->dev, "out of memory!\n"); + err = -ENOMEM; + goto err_out; + } + i2c_set_clientdata(client, data); + /* housekeeping */ data->valid = 0; data->update = update; mutex_init(&data->update_lock); - /* tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto err_free; - /* initialize the chip */ lm93_init_client(client); err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp); if (err) - goto err_detach; + goto err_free; /* Register hwmon driver class */ data->hwmon_dev = hwmon_device_register(&client->dev); @@ -2597,43 +2595,39 @@ static int lm93_detect(struct i2c_adapter *adapter, int address, int kind) err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); -err_detach: - i2c_detach_client(client); err_free: kfree(data); err_out: return err; } -/* This function is called when: - * lm93_driver is inserted (when this module is loaded), for each - available adapter - * when a new adapter is inserted (and lm93_driver is still present) */ -static int lm93_attach_adapter(struct i2c_adapter *adapter) -{ - return i2c_probe(adapter, &addr_data, lm93_detect); -} - -static int lm93_detach_client(struct i2c_client *client) +static int lm93_remove(struct i2c_client *client) { struct lm93_data *data = i2c_get_clientdata(client); - int err = 0; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); - err = i2c_detach_client(client); - if (!err) - kfree(data); - return err; + kfree(data); + return 0; } +static const struct i2c_device_id lm93_id[] = { + { "lm93", lm93 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, lm93_id); + static struct i2c_driver lm93_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "lm93", }, - .attach_adapter = lm93_attach_adapter, - .detach_client = lm93_detach_client, + .probe = lm93_probe, + .remove = lm93_remove, + .id_table = lm93_id, + .detect = lm93_detect, + .address_data = &addr_data, }; static int __init lm93_init(void) diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index 7e7267a04544..1ab1cacad598 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -79,23 +79,34 @@ I2C_CLIENT_INSMOD_1(max1619); * Functions declaration */ -static int max1619_attach_adapter(struct i2c_adapter *adapter); -static int max1619_detect(struct i2c_adapter *adapter, int address, - int kind); +static int max1619_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int max1619_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static void max1619_init_client(struct i2c_client *client); -static int max1619_detach_client(struct i2c_client *client); +static int max1619_remove(struct i2c_client *client); static struct max1619_data *max1619_update_device(struct device *dev); /* * Driver data (common to all clients) */ +static const struct i2c_device_id max1619_id[] = { + { "max1619", max1619 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max1619_id); + static struct i2c_driver max1619_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "max1619", }, - .attach_adapter = max1619_attach_adapter, - .detach_client = max1619_detach_client, + .probe = max1619_probe, + .remove = max1619_remove, + .id_table = max1619_id, + .detect = max1619_detect, + .address_data = &addr_data, }; /* @@ -103,7 +114,6 @@ static struct i2c_driver max1619_driver = { */ struct max1619_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -208,41 +218,15 @@ static const struct attribute_group max1619_group = { * Real code */ -static int max1619_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, max1619_detect); -} - -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int max1619_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max1619_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct max1619_data *data; - int err = 0; - const char *name = ""; + struct i2c_adapter *adapter = new_client->adapter; u8 reg_config=0, reg_convrate=0, reg_status=0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct max1619_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* The common I2C client data is placed right before the - MAX1619-specific data. */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &max1619_driver; - new_client->flags = 0; + return -ENODEV; /* * Now we do the remaining detection. A negative kind means that @@ -265,8 +249,8 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind) || reg_convrate > 0x07 || (reg_status & 0x61 ) !=0x00) { dev_dbg(&adapter->dev, "MAX1619 detection failed at 0x%02x.\n", - address); - goto exit_free; + new_client->addr); + return -ENODEV; } } @@ -285,28 +269,37 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind) dev_info(&adapter->dev, "Unsupported chip (man_id=0x%02X, " "chip_id=0x%02X).\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } - if (kind == max1619) - name = "max1619"; + strlcpy(info->type, "max1619", I2C_NAME_SIZE); + + return 0; +} + +static int max1619_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct max1619_data *data; + int err; + + data = kzalloc(sizeof(struct max1619_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } - /* We can fill in the remaining client fields */ - strlcpy(new_client->name, name, I2C_NAME_SIZE); + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* Initialize the MAX1619 chip */ max1619_init_client(new_client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&new_client->dev.kobj, &max1619_group))) - goto exit_detach; + goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { @@ -318,8 +311,6 @@ static int max1619_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &max1619_group); -exit_detach: - i2c_detach_client(new_client); exit_free: kfree(data); exit: @@ -341,17 +332,13 @@ static void max1619_init_client(struct i2c_client *client) config & 0xBF); /* run */ } -static int max1619_detach_client(struct i2c_client *client) +static int max1619_remove(struct i2c_client *client) { struct max1619_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &max1619_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; } diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index 52d528b76cc3..f27af6a9da41 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -104,22 +104,34 @@ I2C_CLIENT_INSMOD_1(max6650); #define DIV_FROM_REG(reg) (1 << (reg & 7)) -static int max6650_attach_adapter(struct i2c_adapter *adapter); -static int max6650_detect(struct i2c_adapter *adapter, int address, int kind); +static int max6650_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int max6650_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); static int max6650_init_client(struct i2c_client *client); -static int max6650_detach_client(struct i2c_client *client); +static int max6650_remove(struct i2c_client *client); static struct max6650_data *max6650_update_device(struct device *dev); /* * Driver data (common to all clients) */ +static const struct i2c_device_id max6650_id[] = { + { "max6650", max6650 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max6650_id); + static struct i2c_driver max6650_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "max6650", }, - .attach_adapter = max6650_attach_adapter, - .detach_client = max6650_detach_client, + .probe = max6650_probe, + .remove = max6650_remove, + .id_table = max6650_id, + .detect = max6650_detect, + .address_data = &addr_data, }; /* @@ -128,7 +140,6 @@ static struct i2c_driver max6650_driver = { struct max6650_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -437,47 +448,21 @@ static struct attribute_group max6650_attr_grp = { * Real code */ -static int max6650_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max6650_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - if (!(adapter->class & I2C_CLASS_HWMON)) { - dev_dbg(&adapter->dev, - "FATAL: max6650_attach_adapter class HWMON not set\n"); - return 0; - } - - return i2c_probe(adapter, &addr_data, max6650_detect); -} - -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ - -static int max6650_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *client; - struct max6650_data *data; - int err = -ENODEV; + struct i2c_adapter *adapter = client->adapter; + int address = client->addr; dev_dbg(&adapter->dev, "max6650_detect called, kind = %d\n", kind); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support " "byte read mode, skipping.\n"); - return 0; - } - - if (!(data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL))) { - dev_err(&adapter->dev, "max6650: out of memory.\n"); - return -ENOMEM; + return -ENODEV; } - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &max6650_driver; - /* * Now we do the remaining detection. A negative kind means that * the driver was loaded with no force parameter (default), so we @@ -501,28 +486,40 @@ static int max6650_detect(struct i2c_adapter *adapter, int address, int kind) ||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) { dev_dbg(&adapter->dev, "max6650: detection failed at 0x%02x.\n", address); - goto err_free; + return -ENODEV; } dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address); - strlcpy(client->name, "max6650", I2C_NAME_SIZE); - mutex_init(&data->update_lock); + strlcpy(info->type, "max6650", I2C_NAME_SIZE); - if ((err = i2c_attach_client(client))) { - dev_err(&adapter->dev, "max6650: failed to attach client.\n"); - goto err_free; + return 0; +} + +static int max6650_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct max6650_data *data; + int err; + + if (!(data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL))) { + dev_err(&client->dev, "out of memory.\n"); + return -ENOMEM; } + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + /* * Initialize the max6650 chip */ - if (max6650_init_client(client)) - goto err_detach; + err = max6650_init_client(client); + if (err) + goto err_free; err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp); if (err) - goto err_detach; + goto err_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (!IS_ERR(data->hwmon_dev)) @@ -531,24 +528,19 @@ static int max6650_detect(struct i2c_adapter *adapter, int address, int kind) err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); -err_detach: - i2c_detach_client(client); err_free: kfree(data); return err; } -static int max6650_detach_client(struct i2c_client *client) +static int max6650_remove(struct i2c_client *client) { struct max6650_data *data = i2c_get_clientdata(client); - int err; sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); hwmon_device_unregister(data->hwmon_dev); - err = i2c_detach_client(client); - if (!err) - kfree(data); - return err; + kfree(data); + return 0; } static int max6650_init_client(struct i2c_client *client) diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c index 3c9db6598ba7..8bb5cb532d4d 100644 --- a/drivers/hwmon/smsc47m192.c +++ b/drivers/hwmon/smsc47m192.c @@ -96,7 +96,6 @@ static inline int TEMP_FROM_REG(s8 val) } struct smsc47m192_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ @@ -114,18 +113,29 @@ struct smsc47m192_data { u8 vrm; }; -static int smsc47m192_attach_adapter(struct i2c_adapter *adapter); -static int smsc47m192_detect(struct i2c_adapter *adapter, int address, - int kind); -static int smsc47m192_detach_client(struct i2c_client *client); +static int smsc47m192_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int smsc47m192_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int smsc47m192_remove(struct i2c_client *client); static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); +static const struct i2c_device_id smsc47m192_id[] = { + { "smsc47m192", smsc47m192 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, smsc47m192_id); + static struct i2c_driver smsc47m192_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "smsc47m192", }, - .attach_adapter = smsc47m192_attach_adapter, - .detach_client = smsc47m192_detach_client, + .probe = smsc47m192_probe, + .remove = smsc47m192_remove, + .id_table = smsc47m192_id, + .detect = smsc47m192_detect, + .address_data = &addr_data, }; /* Voltages */ @@ -440,17 +450,6 @@ static const struct attribute_group smsc47m192_group_in4 = { .attrs = smsc47m192_attributes_in4, }; -/* This function is called when: - * smsc47m192_driver is inserted (when this module is loaded), for each - available adapter - * when a new adapter is inserted (and smsc47m192_driver is still present) */ -static int smsc47m192_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, smsc47m192_detect); -} - static void smsc47m192_init_client(struct i2c_client *client) { int i; @@ -481,31 +480,15 @@ static void smsc47m192_init_client(struct i2c_client *client) } } -/* This function is called by i2c_probe */ -static int smsc47m192_detect(struct i2c_adapter *adapter, int address, - int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int smsc47m192_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct smsc47m192_data *data; - int err = 0; - int version, config; + struct i2c_adapter *adapter = client->adapter; + int version; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &smsc47m192_driver; - - if (kind == 0) - kind = smsc47m192; + return -ENODEV; /* Detection criteria from sensors_detect script */ if (kind < 0) { @@ -523,26 +506,39 @@ static int smsc47m192_detect(struct i2c_adapter *adapter, int address, } else { dev_dbg(&adapter->dev, "SMSC47M192 detection failed at 0x%02x\n", - address); - goto exit_free; + client->addr); + return -ENODEV; } } - /* Fill in the remaining client fields and put into the global list */ - strlcpy(client->name, "smsc47m192", I2C_NAME_SIZE); + strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE); + + return 0; +} + +static int smsc47m192_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct smsc47m192_data *data; + int config; + int err; + + data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); data->vrm = vid_which_vrm(); mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; - /* Initialize the SMSC47M192 chip */ smsc47m192_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group))) - goto exit_detach; + goto exit_free; /* Pin 110 is either in4 (+12V) or VID4 */ config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); @@ -563,26 +559,20 @@ static int smsc47m192_detect(struct i2c_adapter *adapter, int address, exit_remove_files: sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int smsc47m192_detach_client(struct i2c_client *client) +static int smsc47m192_remove(struct i2c_client *client) { struct smsc47m192_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c index 76a3859c3fbe..3b01001108c1 100644 --- a/drivers/hwmon/thmc50.c +++ b/drivers/hwmon/thmc50.c @@ -60,7 +60,6 @@ static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B }; /* Each client has this additional data */ struct thmc50_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; @@ -77,17 +76,31 @@ struct thmc50_data { u8 alarms; }; -static int thmc50_attach_adapter(struct i2c_adapter *adapter); -static int thmc50_detach_client(struct i2c_client *client); +static int thmc50_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int thmc50_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int thmc50_remove(struct i2c_client *client); static void thmc50_init_client(struct i2c_client *client); static struct thmc50_data *thmc50_update_device(struct device *dev); +static const struct i2c_device_id thmc50_id[] = { + { "adm1022", adm1022 }, + { "thmc50", thmc50 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, thmc50_id); + static struct i2c_driver thmc50_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "thmc50", }, - .attach_adapter = thmc50_attach_adapter, - .detach_client = thmc50_detach_client, + .probe = thmc50_probe, + .remove = thmc50_remove, + .id_table = thmc50_id, + .detect = thmc50_detect, + .address_data = &addr_data, }; static ssize_t show_analog_out(struct device *dev, @@ -250,39 +263,23 @@ static const struct attribute_group temp3_group = { .attrs = temp3_attributes, }; -static int thmc50_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int thmc50_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { unsigned company; unsigned revision; unsigned config; - struct i2c_client *client; - struct thmc50_data *data; - struct device *dev; + struct i2c_adapter *adapter = client->adapter; int err = 0; const char *type_name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_debug("thmc50: detect failed, " "smbus byte data not supported!\n"); - goto exit; - } - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access thmc50 registers. */ - if (!(data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL))) { - pr_debug("thmc50: detect failed, kzalloc failed!\n"); - err = -ENOMEM; - goto exit; + return -ENODEV; } - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &thmc50_driver; - dev = &client->dev; - pr_debug("thmc50: Probing for THMC50 at 0x%2X on bus %d\n", client->addr, i2c_adapter_id(client->adapter)); @@ -307,21 +304,22 @@ static int thmc50_detect(struct i2c_adapter *adapter, int address, int kind) } if (err == -ENODEV) { pr_debug("thmc50: Detection of THMC50/ADM1022 failed\n"); - goto exit_free; + return err; } - data->type = kind; if (kind == adm1022) { int id = i2c_adapter_id(client->adapter); int i; type_name = "adm1022"; - data->has_temp3 = (config >> 7) & 1; /* config MSB */ for (i = 0; i + 1 < adm1022_temp3_num; i += 2) if (adm1022_temp3[i] == id && - adm1022_temp3[i + 1] == address) { + adm1022_temp3[i + 1] == client->addr) { /* enable 2nd remote temp */ - data->has_temp3 = 1; + config |= (1 << 7); + i2c_smbus_write_byte_data(client, + THMC50_REG_CONF, + config); break; } } else { @@ -330,19 +328,33 @@ static int thmc50_detect(struct i2c_adapter *adapter, int address, int kind) pr_debug("thmc50: Detected %s (version %x, revision %x)\n", type_name, (revision >> 4) - 0xc, revision & 0xf); - /* Fill in the remaining client fields & put it into the global list */ - strlcpy(client->name, type_name, I2C_NAME_SIZE); - mutex_init(&data->update_lock); + strlcpy(info->type, type_name, I2C_NAME_SIZE); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; + return 0; +} + +static int thmc50_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct thmc50_data *data; + int err; + + data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL); + if (!data) { + pr_debug("thmc50: detect failed, kzalloc failed!\n"); + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->type = id->driver_data; + mutex_init(&data->update_lock); thmc50_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &thmc50_group))) - goto exit_detach; + goto exit_free; /* Register ADM1022 sysfs hooks */ if (data->has_temp3) @@ -364,34 +376,21 @@ exit_remove_sysfs: sysfs_remove_group(&client->dev.kobj, &temp3_group); exit_remove_sysfs_thmc50: sysfs_remove_group(&client->dev.kobj, &thmc50_group); -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int thmc50_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, thmc50_detect); -} - -static int thmc50_detach_client(struct i2c_client *client) +static int thmc50_remove(struct i2c_client *client) { struct thmc50_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &thmc50_group); if (data->has_temp3) sysfs_remove_group(&client->dev.kobj, &temp3_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; @@ -412,8 +411,8 @@ static void thmc50_init_client(struct i2c_client *client) } config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); config |= 0x1; /* start the chip if it is in standby mode */ - if (data->has_temp3) - config |= 0x80; /* enable 2nd remote temp */ + if (data->type == adm1022 && (config & (1 << 7))) + data->has_temp3 = 1; i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); } diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 85077c4c8039..e4e91c9d480a 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -247,7 +247,6 @@ static u8 div_to_reg(int nr, long val) } struct w83791d_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; @@ -286,9 +285,11 @@ struct w83791d_data { u8 vrm; /* hwmon-vid */ }; -static int w83791d_attach_adapter(struct i2c_adapter *adapter); -static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind); -static int w83791d_detach_client(struct i2c_client *client); +static int w83791d_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int w83791d_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int w83791d_remove(struct i2c_client *client); static int w83791d_read(struct i2c_client *client, u8 register); static int w83791d_write(struct i2c_client *client, u8 register, u8 value); @@ -300,12 +301,22 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev); static void w83791d_init_client(struct i2c_client *client); +static const struct i2c_device_id w83791d_id[] = { + { "w83791d", w83791d }, + { } +}; +MODULE_DEVICE_TABLE(i2c, w83791d_id); + static struct i2c_driver w83791d_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "w83791d", }, - .attach_adapter = w83791d_attach_adapter, - .detach_client = w83791d_detach_client, + .probe = w83791d_probe, + .remove = w83791d_remove, + .id_table = w83791d_id, + .detect = w83791d_detect, + .address_data = &addr_data, }; /* following are the sysfs callback functions */ @@ -905,49 +916,12 @@ static const struct attribute_group w83791d_group = { .attrs = w83791d_attributes, }; -/* This function is called when: - * w83791d_driver is inserted (when this module is loaded), for each - available adapter - * when a new adapter is inserted (and w83791d_driver is still present) */ -static int w83791d_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, w83791d_detect); -} - -static int w83791d_create_subclient(struct i2c_adapter *adapter, - struct i2c_client *client, int addr, - struct i2c_client **sub_cli) -{ - int err; - struct i2c_client *sub_client; - - (*sub_cli) = sub_client = - kzalloc(sizeof(struct i2c_client), GFP_KERNEL); - if (!(sub_client)) { - return -ENOMEM; - } - sub_client->addr = 0x48 + addr; - i2c_set_clientdata(sub_client, NULL); - sub_client->adapter = adapter; - sub_client->driver = &w83791d_driver; - strlcpy(sub_client->name, "w83791d subclient", I2C_NAME_SIZE); - if ((err = i2c_attach_client(sub_client))) { - dev_err(&client->dev, "subclient registration " - "at address 0x%x failed\n", sub_client->addr); - kfree(sub_client); - return err; - } - return 0; -} - - -static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address, - int kind, struct i2c_client *client) +static int w83791d_detect_subclients(struct i2c_client *client) { + struct i2c_adapter *adapter = client->adapter; struct w83791d_data *data = i2c_get_clientdata(client); + int address = client->addr; int i, id, err; u8 val; @@ -971,10 +945,7 @@ static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address, val = w83791d_read(client, W83791D_REG_I2C_SUBADDR); if (!(val & 0x08)) { - err = w83791d_create_subclient(adapter, client, - val & 0x7, &data->lm75[0]); - if (err < 0) - goto error_sc_0; + data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7)); } if (!(val & 0x80)) { if ((data->lm75[0] != NULL) && @@ -986,10 +957,8 @@ static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address, err = -ENODEV; goto error_sc_1; } - err = w83791d_create_subclient(adapter, client, - (val >> 4) & 0x7, &data->lm75[1]); - if (err < 0) - goto error_sc_1; + data->lm75[1] = i2c_new_dummy(adapter, + 0x48 + ((val >> 4) & 0x7)); } return 0; @@ -997,53 +966,31 @@ static int w83791d_detect_subclients(struct i2c_adapter *adapter, int address, /* Undo inits in case of errors */ error_sc_1: - if (data->lm75[0] != NULL) { - i2c_detach_client(data->lm75[0]); - kfree(data->lm75[0]); - } + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); error_sc_0: return err; } -static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int w83791d_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct device *dev; - struct w83791d_data *data; - int i, val1, val2; - int err = 0; - const char *client_name = ""; + struct i2c_adapter *adapter = client->adapter; + int val1, val2; + unsigned short address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - goto error0; + return -ENODEV; } - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access w83791d_{read,write}_value. */ - if (!(data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL))) { - err = -ENOMEM; - goto error0; - } - - client = &data->client; - dev = &client->dev; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &w83791d_driver; - mutex_init(&data->update_lock); - - /* Now, we do the remaining detection. */ - /* The w83791d may be stuck in some other bank than bank 0. This may make reading other information impossible. Specify a force=... parameter, and the Winbond will be reset to the right bank. */ if (kind < 0) { if (w83791d_read(client, W83791D_REG_CONFIG) & 0x80) { - dev_dbg(dev, "Detection failed at step 1\n"); - goto error1; + return -ENODEV; } val1 = w83791d_read(client, W83791D_REG_BANK); val2 = w83791d_read(client, W83791D_REG_CHIPMAN); @@ -1052,15 +999,13 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind) /* yes it is Bank0 */ if (((!(val1 & 0x80)) && (val2 != 0xa3)) || ((val1 & 0x80) && (val2 != 0x5c))) { - dev_dbg(dev, "Detection failed at step 2\n"); - goto error1; + return -ENODEV; } } /* If Winbond chip, address of chip and W83791D_REG_I2C_ADDR should match */ if (w83791d_read(client, W83791D_REG_I2C_ADDR) != address) { - dev_dbg(dev, "Detection failed at step 3\n"); - goto error1; + return -ENODEV; } } @@ -1075,30 +1020,33 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind) /* get vendor ID */ val2 = w83791d_read(client, W83791D_REG_CHIPMAN); if (val2 != 0x5c) { /* the vendor is NOT Winbond */ - dev_dbg(dev, "Detection failed at step 4\n"); - goto error1; + return -ENODEV; } val1 = w83791d_read(client, W83791D_REG_WCHIPID); if (val1 == 0x71) { kind = w83791d; } else { if (kind == 0) - dev_warn(dev, + dev_warn(&adapter->dev, "w83791d: Ignoring 'force' parameter " "for unknown chip at adapter %d, " "address 0x%02x\n", i2c_adapter_id(adapter), address); - goto error1; + return -ENODEV; } } - if (kind == w83791d) { - client_name = "w83791d"; - } else { - dev_err(dev, "w83791d: Internal error: unknown kind (%d)?!?\n", - kind); - goto error1; - } + strlcpy(info->type, "w83791d", I2C_NAME_SIZE); + + return 0; +} + +static int w83791d_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct w83791d_data *data; + struct device *dev = &client->dev; + int i, val1, err; #ifdef DEBUG val1 = w83791d_read(client, W83791D_REG_DID_VID4); @@ -1106,15 +1054,18 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind) (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1); #endif - /* Fill in the remaining client fields and put into the global list */ - strlcpy(client->name, client_name, I2C_NAME_SIZE); + data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto error0; + } - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto error1; + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); - if ((err = w83791d_detect_subclients(adapter, address, kind, client))) - goto error2; + err = w83791d_detect_subclients(client); + if (err) + goto error1; /* Initialize the chip */ w83791d_init_client(client); @@ -1141,43 +1092,29 @@ static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind) error4: sysfs_remove_group(&client->dev.kobj, &w83791d_group); error3: - if (data->lm75[0] != NULL) { - i2c_detach_client(data->lm75[0]); - kfree(data->lm75[0]); - } - if (data->lm75[1] != NULL) { - i2c_detach_client(data->lm75[1]); - kfree(data->lm75[1]); - } -error2: - i2c_detach_client(client); + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); + if (data->lm75[1] != NULL) + i2c_unregister_device(data->lm75[1]); error1: kfree(data); error0: return err; } -static int w83791d_detach_client(struct i2c_client *client) +static int w83791d_remove(struct i2c_client *client) { struct w83791d_data *data = i2c_get_clientdata(client); - int err; - - /* main client */ - if (data) { - hwmon_device_unregister(data->hwmon_dev); - sysfs_remove_group(&client->dev.kobj, &w83791d_group); - } - if ((err = i2c_detach_client(client))) - return err; + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &w83791d_group); - /* main client */ - if (data) - kfree(data); - /* subclient */ - else - kfree(client); + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); + if (data->lm75[1] != NULL) + i2c_unregister_device(data->lm75[1]); + kfree(data); return 0; } diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c index 299629d47ed6..cf94c5b0c879 100644 --- a/drivers/hwmon/w83792d.c +++ b/drivers/hwmon/w83792d.c @@ -267,9 +267,7 @@ DIV_TO_REG(long val) } struct w83792d_data { - struct i2c_client client; struct device *hwmon_dev; - enum chips type; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ @@ -299,9 +297,11 @@ struct w83792d_data { u8 sf2_levels[3][4]; /* Smart FanII: Fan1,2,3 duty cycle levels */ }; -static int w83792d_attach_adapter(struct i2c_adapter *adapter); -static int w83792d_detect(struct i2c_adapter *adapter, int address, int kind); -static int w83792d_detach_client(struct i2c_client *client); +static int w83792d_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int w83792d_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int w83792d_remove(struct i2c_client *client); static struct w83792d_data *w83792d_update_device(struct device *dev); #ifdef DEBUG @@ -310,12 +310,22 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev); static void w83792d_init_client(struct i2c_client *client); +static const struct i2c_device_id w83792d_id[] = { + { "w83792d", w83792d }, + { } +}; +MODULE_DEVICE_TABLE(i2c, w83792d_id); + static struct i2c_driver w83792d_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "w83792d", }, - .attach_adapter = w83792d_attach_adapter, - .detach_client = w83792d_detach_client, + .probe = w83792d_probe, + .remove = w83792d_remove, + .id_table = w83792d_id, + .detect = w83792d_detect, + .address_data = &addr_data, }; static inline long in_count_from_reg(int nr, struct w83792d_data *data) @@ -864,53 +874,14 @@ store_sf2_level(struct device *dev, struct device_attribute *attr, return count; } -/* This function is called when: - * w83792d_driver is inserted (when this module is loaded), for each - available adapter - * when a new adapter is inserted (and w83792d_driver is still present) */ -static int -w83792d_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, w83792d_detect); -} - - -static int -w83792d_create_subclient(struct i2c_adapter *adapter, - struct i2c_client *new_client, int addr, - struct i2c_client **sub_cli) -{ - int err; - struct i2c_client *sub_client; - - (*sub_cli) = sub_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); - if (!(sub_client)) { - return -ENOMEM; - } - sub_client->addr = 0x48 + addr; - i2c_set_clientdata(sub_client, NULL); - sub_client->adapter = adapter; - sub_client->driver = &w83792d_driver; - sub_client->flags = 0; - strlcpy(sub_client->name, "w83792d subclient", I2C_NAME_SIZE); - if ((err = i2c_attach_client(sub_client))) { - dev_err(&new_client->dev, "subclient registration " - "at address 0x%x failed\n", sub_client->addr); - kfree(sub_client); - return err; - } - return 0; -} - static int -w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind, - struct i2c_client *new_client) +w83792d_detect_subclients(struct i2c_client *new_client) { int i, id, err; + int address = new_client->addr; u8 val; + struct i2c_adapter *adapter = new_client->adapter; struct w83792d_data *data = i2c_get_clientdata(new_client); id = i2c_adapter_id(adapter); @@ -932,10 +903,7 @@ w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind, val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR); if (!(val & 0x08)) { - err = w83792d_create_subclient(adapter, new_client, val & 0x7, - &data->lm75[0]); - if (err < 0) - goto ERROR_SC_0; + data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7)); } if (!(val & 0x80)) { if ((data->lm75[0] != NULL) && @@ -945,10 +913,8 @@ w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind, err = -ENODEV; goto ERROR_SC_1; } - err = w83792d_create_subclient(adapter, new_client, - (val >> 4) & 0x7, &data->lm75[1]); - if (err < 0) - goto ERROR_SC_1; + data->lm75[1] = i2c_new_dummy(adapter, + 0x48 + ((val >> 4) & 0x7)); } return 0; @@ -956,10 +922,8 @@ w83792d_detect_subclients(struct i2c_adapter *adapter, int address, int kind, /* Undo inits in case of errors */ ERROR_SC_1: - if (data->lm75[0] != NULL) { - i2c_detach_client(data->lm75[0]); - kfree(data->lm75[0]); - } + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); ERROR_SC_0: return err; } @@ -1294,47 +1258,25 @@ static const struct attribute_group w83792d_group = { .attrs = w83792d_attributes, }; +/* Return 0 if detection is successful, -ENODEV otherwise */ static int -w83792d_detect(struct i2c_adapter *adapter, int address, int kind) +w83792d_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) { - int i = 0, val1 = 0, val2; - struct i2c_client *client; - struct device *dev; - struct w83792d_data *data; - int err = 0; - const char *client_name = ""; + struct i2c_adapter *adapter = client->adapter; + int val1, val2; + unsigned short address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - goto ERROR0; - } - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access w83792d_{read,write}_value. */ - - if (!(data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL))) { - err = -ENOMEM; - goto ERROR0; + return -ENODEV; } - client = &data->client; - dev = &client->dev; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &w83792d_driver; - client->flags = 0; - - /* Now, we do the remaining detection. */ - /* The w83792d may be stuck in some other bank than bank 0. This may make reading other information impossible. Specify a force=... or force_*=... parameter, and the Winbond will be reset to the right bank. */ if (kind < 0) { if (w83792d_read_value(client, W83792D_REG_CONFIG) & 0x80) { - dev_dbg(dev, "Detection failed at step 1\n"); - goto ERROR1; + return -ENODEV; } val1 = w83792d_read_value(client, W83792D_REG_BANK); val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); @@ -1342,16 +1284,14 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind) if (!(val1 & 0x07)) { /* is Bank0 */ if (((!(val1 & 0x80)) && (val2 != 0xa3)) || ((val1 & 0x80) && (val2 != 0x5c))) { - dev_dbg(dev, "Detection failed at step 2\n"); - goto ERROR1; + return -ENODEV; } } /* If Winbond chip, address of chip and W83792D_REG_I2C_ADDR should match */ if (w83792d_read_value(client, W83792D_REG_I2C_ADDR) != address) { - dev_dbg(dev, "Detection failed at step 3\n"); - goto ERROR1; + return -ENODEV; } } @@ -1367,45 +1307,48 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind) /* get vendor ID */ val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); if (val2 != 0x5c) { /* the vendor is NOT Winbond */ - goto ERROR1; + return -ENODEV; } val1 = w83792d_read_value(client, W83792D_REG_WCHIPID); if (val1 == 0x7a) { kind = w83792d; } else { if (kind == 0) - dev_warn(dev, + dev_warn(&adapter->dev, "w83792d: Ignoring 'force' parameter for" " unknown chip at adapter %d, address" " 0x%02x\n", i2c_adapter_id(adapter), address); - goto ERROR1; + return -ENODEV; } } - if (kind == w83792d) { - client_name = "w83792d"; - } else { - dev_err(dev, "w83792d: Internal error: unknown kind (%d)?!?\n", - kind); - goto ERROR1; - } + strlcpy(info->type, "w83792d", I2C_NAME_SIZE); + + return 0; +} + +static int +w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct w83792d_data *data; + struct device *dev = &client->dev; + int i, val1, err; - /* Fill in the remaining client fields and put into the global list */ - strlcpy(client->name, client_name, I2C_NAME_SIZE); - data->type = kind; + data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto ERROR0; + } + i2c_set_clientdata(client, data); data->valid = 0; mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) + err = w83792d_detect_subclients(client); + if (err) goto ERROR1; - if ((err = w83792d_detect_subclients(adapter, address, - kind, client))) - goto ERROR2; - /* Initialize the chip */ w83792d_init_client(client); @@ -1457,16 +1400,10 @@ exit_remove_files: for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++) sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]); ERROR3: - if (data->lm75[0] != NULL) { - i2c_detach_client(data->lm75[0]); - kfree(data->lm75[0]); - } - if (data->lm75[1] != NULL) { - i2c_detach_client(data->lm75[1]); - kfree(data->lm75[1]); - } -ERROR2: - i2c_detach_client(client); + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); + if (data->lm75[1] != NULL) + i2c_unregister_device(data->lm75[1]); ERROR1: kfree(data); ERROR0: @@ -1474,30 +1411,23 @@ ERROR0: } static int -w83792d_detach_client(struct i2c_client *client) +w83792d_remove(struct i2c_client *client) { struct w83792d_data *data = i2c_get_clientdata(client); - int err, i; - - /* main client */ - if (data) { - hwmon_device_unregister(data->hwmon_dev); - sysfs_remove_group(&client->dev.kobj, &w83792d_group); - for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++) - sysfs_remove_group(&client->dev.kobj, - &w83792d_group_fan[i]); - } + int i; - if ((err = i2c_detach_client(client))) - return err; + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &w83792d_group); + for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++) + sysfs_remove_group(&client->dev.kobj, + &w83792d_group_fan[i]); - /* main client */ - if (data) - kfree(data); - /* subclient */ - else - kfree(client); + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); + if (data->lm75[1] != NULL) + i2c_unregister_device(data->lm75[1]); + kfree(data); return 0; } diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index ed3c019b78c7..0a739f1c69be 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -179,7 +179,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max) } struct w83793_data { - struct i2c_client client; struct i2c_client *lm75[2]; struct device *hwmon_dev; struct mutex update_lock; @@ -226,19 +225,31 @@ struct w83793_data { static u8 w83793_read_value(struct i2c_client *client, u16 reg); static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value); -static int w83793_attach_adapter(struct i2c_adapter *adapter); -static int w83793_detect(struct i2c_adapter *adapter, int address, int kind); -static int w83793_detach_client(struct i2c_client *client); +static int w83793_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int w83793_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int w83793_remove(struct i2c_client *client); static void w83793_init_client(struct i2c_client *client); static void w83793_update_nonvolatile(struct device *dev); static struct w83793_data *w83793_update_device(struct device *dev); +static const struct i2c_device_id w83793_id[] = { + { "w83793", w83793 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, w83793_id); + static struct i2c_driver w83793_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "w83793", }, - .attach_adapter = w83793_attach_adapter, - .detach_client = w83793_detach_client, + .probe = w83793_probe, + .remove = w83793_remove, + .id_table = w83793_id, + .detect = w83793_detect, + .address_data = &addr_data, }; static ssize_t @@ -1053,89 +1064,51 @@ static void w83793_init_client(struct i2c_client *client) } -static int w83793_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, w83793_detect); -} - -static int w83793_detach_client(struct i2c_client *client) +static int w83793_remove(struct i2c_client *client) { struct w83793_data *data = i2c_get_clientdata(client); struct device *dev = &client->dev; - int err, i; + int i; - /* main client */ - if (data) { - hwmon_device_unregister(data->hwmon_dev); + hwmon_device_unregister(data->hwmon_dev); - for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) - device_remove_file(dev, - &w83793_sensor_attr_2[i].dev_attr); + for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) + device_remove_file(dev, + &w83793_sensor_attr_2[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) - device_remove_file(dev, &sda_single_files[i].dev_attr); + for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) + device_remove_file(dev, &sda_single_files[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) - device_remove_file(dev, &w83793_vid[i].dev_attr); - device_remove_file(dev, &dev_attr_vrm); + for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) + device_remove_file(dev, &w83793_vid[i].dev_attr); + device_remove_file(dev, &dev_attr_vrm); - for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++) - device_remove_file(dev, &w83793_left_fan[i].dev_attr); + for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++) + device_remove_file(dev, &w83793_left_fan[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++) - device_remove_file(dev, &w83793_left_pwm[i].dev_attr); + for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++) + device_remove_file(dev, &w83793_left_pwm[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(w83793_temp); i++) - device_remove_file(dev, &w83793_temp[i].dev_attr); - } + for (i = 0; i < ARRAY_SIZE(w83793_temp); i++) + device_remove_file(dev, &w83793_temp[i].dev_attr); - if ((err = i2c_detach_client(client))) - return err; + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); + if (data->lm75[1] != NULL) + i2c_unregister_device(data->lm75[1]); - /* main client */ - if (data) - kfree(data); - /* subclient */ - else - kfree(client); + kfree(data); return 0; } static int -w83793_create_subclient(struct i2c_adapter *adapter, - struct i2c_client *client, int addr, - struct i2c_client **sub_cli) -{ - int err = 0; - struct i2c_client *sub_client; - - (*sub_cli) = sub_client = - kzalloc(sizeof(struct i2c_client), GFP_KERNEL); - if (!(sub_client)) { - return -ENOMEM; - } - sub_client->addr = 0x48 + addr; - i2c_set_clientdata(sub_client, NULL); - sub_client->adapter = adapter; - sub_client->driver = &w83793_driver; - strlcpy(sub_client->name, "w83793 subclient", I2C_NAME_SIZE); - if ((err = i2c_attach_client(sub_client))) { - dev_err(&client->dev, "subclient registration " - "at address 0x%x failed\n", sub_client->addr); - kfree(sub_client); - } - return err; -} - -static int -w83793_detect_subclients(struct i2c_adapter *adapter, int address, - int kind, struct i2c_client *client) +w83793_detect_subclients(struct i2c_client *client) { int i, id, err; + int address = client->addr; u8 tmp; + struct i2c_adapter *adapter = client->adapter; struct w83793_data *data = i2c_get_clientdata(client); id = i2c_adapter_id(adapter); @@ -1158,11 +1131,7 @@ w83793_detect_subclients(struct i2c_adapter *adapter, int address, tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR); if (!(tmp & 0x08)) { - err = - w83793_create_subclient(adapter, client, tmp & 0x7, - &data->lm75[0]); - if (err < 0) - goto ERROR_SC_0; + data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7)); } if (!(tmp & 0x80)) { if ((data->lm75[0] != NULL) @@ -1173,10 +1142,8 @@ w83793_detect_subclients(struct i2c_adapter *adapter, int address, err = -ENODEV; goto ERROR_SC_1; } - err = w83793_create_subclient(adapter, client, - (tmp >> 4) & 0x7, &data->lm75[1]); - if (err < 0) - goto ERROR_SC_1; + data->lm75[1] = i2c_new_dummy(adapter, + 0x48 + ((tmp >> 4) & 0x7)); } return 0; @@ -1184,69 +1151,44 @@ w83793_detect_subclients(struct i2c_adapter *adapter, int address, /* Undo inits in case of errors */ ERROR_SC_1: - if (data->lm75[0] != NULL) { - i2c_detach_client(data->lm75[0]); - kfree(data->lm75[0]); - } + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); ERROR_SC_0: return err; } -static int w83793_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int w83793_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - int i; - u8 tmp, val; - struct i2c_client *client; - struct device *dev; - struct w83793_data *data; - int files_fan = ARRAY_SIZE(w83793_left_fan) / 7; - int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5; - int files_temp = ARRAY_SIZE(w83793_temp) / 6; - int err = 0; + u8 tmp, bank; + struct i2c_adapter *adapter = client->adapter; + unsigned short address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - goto exit; + return -ENODEV; } - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access w83793_{read,write}_value. */ - - if (!(data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - dev = &client->dev; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &w83793_driver; + bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL); - data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL); - - /* Now, we do the remaining detection. */ if (kind < 0) { - tmp = data->bank & 0x80 ? 0x5c : 0xa3; + tmp = bank & 0x80 ? 0x5c : 0xa3; /* Check Winbond vendor ID */ if (tmp != i2c_smbus_read_byte_data(client, W83793_REG_VENDORID)) { pr_debug("w83793: Detection failed at check " "vendor id\n"); - err = -ENODEV; - goto free_mem; + return -ENODEV; } /* If Winbond chip, address of chip and W83793_REG_I2C_ADDR should match */ - if ((data->bank & 0x07) == 0 + if ((bank & 0x07) == 0 && i2c_smbus_read_byte_data(client, W83793_REG_I2C_ADDR) != (address << 1)) { pr_debug("w83793: Detection failed at check " "i2c addr\n"); - err = -ENODEV; - goto free_mem; + return -ENODEV; } } @@ -1255,30 +1197,47 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind) Winbond. Determine the chip type now */ if (kind <= 0) { - if (0x7b == w83793_read_value(client, W83793_REG_CHIPID)) { + if (0x7b == i2c_smbus_read_byte_data(client, + W83793_REG_CHIPID)) { kind = w83793; } else { if (kind == 0) dev_warn(&adapter->dev, "w83793: Ignoring " "'force' parameter for unknown chip " "at address 0x%02x\n", address); - err = -ENODEV; - goto free_mem; + return -ENODEV; } } - /* Fill in the remaining client fields and put into the global list */ - strlcpy(client->name, "w83793", I2C_NAME_SIZE); + strlcpy(info->type, "w83793", I2C_NAME_SIZE); + + return 0; +} +static int w83793_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct w83793_data *data; + int i, tmp, val, err; + int files_fan = ARRAY_SIZE(w83793_left_fan) / 7; + int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5; + int files_temp = ARRAY_SIZE(w83793_temp) / 6; + + data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL); mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) + err = w83793_detect_subclients(client); + if (err) goto free_mem; - if ((err = w83793_detect_subclients(adapter, address, kind, client))) - goto detach_client; - /* Initialize the chip */ w83793_init_client(client); @@ -1459,16 +1418,10 @@ exit_remove: for (i = 0; i < ARRAY_SIZE(w83793_temp); i++) device_remove_file(dev, &w83793_temp[i].dev_attr); - if (data->lm75[0] != NULL) { - i2c_detach_client(data->lm75[0]); - kfree(data->lm75[0]); - } - if (data->lm75[1] != NULL) { - i2c_detach_client(data->lm75[1]); - kfree(data->lm75[1]); - } -detach_client: - i2c_detach_client(client); + if (data->lm75[0] != NULL) + i2c_unregister_device(data->lm75[0]); + if (data->lm75[1] != NULL) + i2c_unregister_device(data->lm75[1]); free_mem: kfree(data); exit: diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c index 52e268e25dab..ea295b9fc4f4 100644 --- a/drivers/hwmon/w83l785ts.c +++ b/drivers/hwmon/w83l785ts.c @@ -81,10 +81,11 @@ I2C_CLIENT_INSMOD_1(w83l785ts); * Functions declaration */ -static int w83l785ts_attach_adapter(struct i2c_adapter *adapter); -static int w83l785ts_detect(struct i2c_adapter *adapter, int address, - int kind); -static int w83l785ts_detach_client(struct i2c_client *client); +static int w83l785ts_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int w83l785ts_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int w83l785ts_remove(struct i2c_client *client); static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval); static struct w83l785ts_data *w83l785ts_update_device(struct device *dev); @@ -92,12 +93,22 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev); * Driver data (common to all clients) */ +static const struct i2c_device_id w83l785ts_id[] = { + { "w83l785ts", w83l785ts }, + { } +}; +MODULE_DEVICE_TABLE(i2c, w83l785ts_id); + static struct i2c_driver w83l785ts_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "w83l785ts", }, - .attach_adapter = w83l785ts_attach_adapter, - .detach_client = w83l785ts_detach_client, + .probe = w83l785ts_probe, + .remove = w83l785ts_remove, + .id_table = w83l785ts_id, + .detect = w83l785ts_detect, + .address_data = &addr_data, }; /* @@ -105,7 +116,6 @@ static struct i2c_driver w83l785ts_driver = { */ struct w83l785ts_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ @@ -135,40 +145,14 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 1); * Real code */ -static int w83l785ts_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, w83l785ts_detect); -} - -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int w83l785ts_detect(struct i2c_client *new_client, int kind, + struct i2c_board_info *info) { - struct i2c_client *new_client; - struct w83l785ts_data *data; - int err = 0; - + struct i2c_adapter *adapter = new_client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - if (!(data = kzalloc(sizeof(struct w83l785ts_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - /* The common I2C client data is placed right before the - * W83L785TS-specific data. */ - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &w83l785ts_driver; - new_client->flags = 0; + return -ENODEV; /* * Now we do the remaining detection. A negative kind means that @@ -188,8 +172,8 @@ static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind) W83L785TS_REG_TYPE, 0) & 0xFC) != 0x00)) { dev_dbg(&adapter->dev, "W83L785TS-S detection failed at 0x%02x.\n", - address); - goto exit_free; + new_client->addr); + return -ENODEV; } } @@ -214,22 +198,34 @@ static int w83l785ts_detect(struct i2c_adapter *adapter, int address, int kind) dev_info(&adapter->dev, "Unsupported chip (man_id=0x%04X, " "chip_id=0x%02X).\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } - /* We can fill in the remaining client fields. */ - strlcpy(new_client->name, "w83l785ts", I2C_NAME_SIZE); + strlcpy(info->type, "w83l785ts", I2C_NAME_SIZE); + + return 0; +} + +static int w83l785ts_probe(struct i2c_client *new_client, + const struct i2c_device_id *id) +{ + struct w83l785ts_data *data; + int err = 0; + + data = kzalloc(sizeof(struct w83l785ts_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); /* Default values in case the first read fails (unlikely). */ data->temp[1] = data->temp[0] = 0; - /* Tell the I2C layer a new client has arrived. */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* * Initialize the W83L785TS chip * Nothing yet, assume it is already started. @@ -259,25 +255,20 @@ exit_remove: &sensor_dev_attr_temp1_input.dev_attr); device_remove_file(&new_client->dev, &sensor_dev_attr_temp1_max.dev_attr); - i2c_detach_client(new_client); -exit_free: kfree(data); exit: return err; } -static int w83l785ts_detach_client(struct i2c_client *client) +static int w83l785ts_remove(struct i2c_client *client) { struct w83l785ts_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); device_remove_file(&client->dev, &sensor_dev_attr_temp1_input.dev_attr); device_remove_file(&client->dev, &sensor_dev_attr_temp1_max.dev_attr); - if ((err = i2c_detach_client(client))) - return err; kfree(data); return 0; @@ -286,6 +277,18 @@ static int w83l785ts_detach_client(struct i2c_client *client) static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval) { int value, i; + struct device *dev; + const char *prefix; + + /* We might be called during detection, at which point the client + isn't yet fully initialized, so we can't use dev_dbg on it */ + if (i2c_get_clientdata(client)) { + dev = &client->dev; + prefix = ""; + } else { + dev = &client->adapter->dev; + prefix = "w83l785ts: "; + } /* Frequent read errors have been reported on Asus boards, so we * retry on read errors. If it still fails (unlikely), return the @@ -293,15 +296,15 @@ static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval) for (i = 1; i <= MAX_RETRIES; i++) { value = i2c_smbus_read_byte_data(client, reg); if (value >= 0) { - dev_dbg(&client->dev, "Read 0x%02x from register " - "0x%02x.\n", value, reg); + dev_dbg(dev, "%sRead 0x%02x from register 0x%02x.\n", + prefix, value, reg); return value; } - dev_dbg(&client->dev, "Read failed, will retry in %d.\n", i); + dev_dbg(dev, "%sRead failed, will retry in %d.\n", prefix, i); msleep(i); } - dev_err(&client->dev, "Couldn't read value from register 0x%02x.\n", + dev_err(dev, "%sCouldn't read value from register 0x%02x.\n", prefix, reg); return defval; } diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index 41e22ddb568a..badca769f350 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -121,7 +121,6 @@ DIV_TO_REG(long val) } struct w83l786ng_data { - struct i2c_client client; struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ @@ -146,18 +145,30 @@ struct w83l786ng_data { u8 tolerance[2]; }; -static int w83l786ng_attach_adapter(struct i2c_adapter *adapter); -static int w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind); -static int w83l786ng_detach_client(struct i2c_client *client); +static int w83l786ng_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int w83l786ng_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int w83l786ng_remove(struct i2c_client *client); static void w83l786ng_init_client(struct i2c_client *client); static struct w83l786ng_data *w83l786ng_update_device(struct device *dev); +static const struct i2c_device_id w83l786ng_id[] = { + { "w83l786ng", w83l786ng }, + { } +}; +MODULE_DEVICE_TABLE(i2c, w83l786ng_id); + static struct i2c_driver w83l786ng_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "w83l786ng", }, - .attach_adapter = w83l786ng_attach_adapter, - .detach_client = w83l786ng_detach_client, + .probe = w83l786ng_probe, + .remove = w83l786ng_remove, + .id_table = w83l786ng_id, + .detect = w83l786ng_detect, + .address_data = &addr_data, }; static u8 @@ -575,42 +586,15 @@ static const struct attribute_group w83l786ng_group = { }; static int -w83l786ng_attach_adapter(struct i2c_adapter *adapter) +w83l786ng_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - if (!(adapter->class & I2C_CLASS_HWMON)) - return 0; - return i2c_probe(adapter, &addr_data, w83l786ng_detect); -} - -static int -w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *client; - struct device *dev; - struct w83l786ng_data *data; - int i, err = 0; - u8 reg_tmp; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - goto exit; - } - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. - But it allows us to access w83l786ng_{read,write}_value. */ - - if (!(data = kzalloc(sizeof(struct w83l786ng_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; + return -ENODEV; } - client = &data->client; - dev = &client->dev; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &w83l786ng_driver; - /* * Now we do the remaining detection. A negative kind means that * the driver was loaded with no force parameter (default), so we @@ -627,8 +611,8 @@ w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind) W83L786NG_REG_CONFIG) & 0x80) != 0x00)) { dev_dbg(&adapter->dev, "W83L786NG detection failed at 0x%02x.\n", - address); - goto exit_free; + client->addr); + return -ENODEV; } } @@ -651,17 +635,31 @@ w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind) dev_info(&adapter->dev, "Unsupported chip (man_id=0x%04X, " "chip_id=0x%02X).\n", man_id, chip_id); - goto exit_free; + return -ENODEV; } } - /* Fill in the remaining client fields and put into the global list */ - strlcpy(client->name, "w83l786ng", I2C_NAME_SIZE); - mutex_init(&data->update_lock); + strlcpy(info->type, "w83l786ng", I2C_NAME_SIZE); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; + return 0; +} + +static int +w83l786ng_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct w83l786ng_data *data; + int i, err = 0; + u8 reg_tmp; + + data = kzalloc(sizeof(struct w83l786ng_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); /* Initialize the chip */ w83l786ng_init_client(client); @@ -693,25 +691,19 @@ w83l786ng_detect(struct i2c_adapter *adapter, int address, int kind) exit_remove: sysfs_remove_group(&client->dev.kobj, &w83l786ng_group); - i2c_detach_client(client); -exit_free: kfree(data); exit: return err; } static int -w83l786ng_detach_client(struct i2c_client *client) +w83l786ng_remove(struct i2c_client *client) { struct w83l786ng_data *data = i2c_get_clientdata(client); - int err; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &w83l786ng_group); - if ((err = i2c_detach_client(client))) - return err; - kfree(data); return 0; diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c index 2f150e33c74c..72872d1e63ef 100644 --- a/drivers/i2c/busses/i2c-amd756-s4882.c +++ b/drivers/i2c/busses/i2c-amd756-s4882.c @@ -155,6 +155,16 @@ static int __init amd756_s4882_init(void) int i, error; union i2c_smbus_data ioconfig; + /* Configure the PCA9556 multiplexer */ + ioconfig.byte = 0x00; /* All I/O to output mode */ + error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, + I2C_SMBUS_BYTE_DATA, &ioconfig); + if (error) { + dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n"); + error = -EIO; + goto ERROR0; + } + /* Unregister physical bus */ error = i2c_del_adapter(&amd756_smbus); if (error) { @@ -198,22 +208,11 @@ static int __init amd756_s4882_init(void) s4882_algo[3].smbus_xfer = amd756_access_virt3; s4882_algo[4].smbus_xfer = amd756_access_virt4; - /* Configure the PCA9556 multiplexer */ - ioconfig.byte = 0x00; /* All I/O to output mode */ - error = amd756_smbus.algo->smbus_xfer(&amd756_smbus, 0x18, 0, - I2C_SMBUS_WRITE, 0x03, - I2C_SMBUS_BYTE_DATA, &ioconfig); - if (error) { - dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n"); - error = -EIO; - goto ERROR3; - } - /* Register virtual adapters */ for (i = 0; i < 5; i++) { error = i2c_add_adapter(s4882_adapter+i); if (error) { - dev_err(&amd756_smbus.dev, + printk(KERN_ERR "i2c-amd756-s4882: " "Virtual adapter %d registration " "failed, module not inserted\n", i); for (i--; i >= 0; i--) @@ -252,8 +251,8 @@ static void __exit amd756_s4882_exit(void) /* Restore physical bus */ if (i2c_add_adapter(&amd756_smbus)) - dev_err(&amd756_smbus.dev, "Physical bus restoration " - "failed\n"); + printk(KERN_ERR "i2c-amd756-s4882: " + "Physical bus restoration failed\n"); } MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 10b9342a36c2..27443f073bc9 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -17,7 +17,8 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> -#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/of_i2c.h> #include <asm/io.h> #include <linux/fsl_devices.h> @@ -25,13 +26,13 @@ #include <linux/interrupt.h> #include <linux/delay.h> -#define MPC_I2C_ADDR 0x00 +#define DRV_NAME "mpc-i2c" + #define MPC_I2C_FDR 0x04 #define MPC_I2C_CR 0x08 #define MPC_I2C_SR 0x0c #define MPC_I2C_DR 0x10 #define MPC_I2C_DFSRR 0x14 -#define MPC_I2C_REGION 0x20 #define CCR_MEN 0x80 #define CCR_MIEN 0x40 @@ -315,102 +316,117 @@ static struct i2c_adapter mpc_ops = { .timeout = 1, }; -static int fsl_i2c_probe(struct platform_device *pdev) +static int __devinit fsl_i2c_probe(struct of_device *op, const struct of_device_id *match) { int result = 0; struct mpc_i2c *i2c; - struct fsl_i2c_platform_data *pdata; - struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - pdata = (struct fsl_i2c_platform_data *) pdev->dev.platform_data; i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; - i2c->irq = platform_get_irq(pdev, 0); - if (i2c->irq < 0) - i2c->irq = NO_IRQ; /* Use polling */ + if (of_get_property(op->node, "dfsrr", NULL)) + i2c->flags |= FSL_I2C_DEV_SEPARATE_DFSRR; - i2c->flags = pdata->device_flags; - init_waitqueue_head(&i2c->queue); + if (of_device_is_compatible(op->node, "fsl,mpc5200-i2c") || + of_device_is_compatible(op->node, "mpc5200-i2c")) + i2c->flags |= FSL_I2C_DEV_CLOCK_5200; - i2c->base = ioremap((phys_addr_t)r->start, MPC_I2C_REGION); + init_waitqueue_head(&i2c->queue); + i2c->base = of_iomap(op->node, 0); if (!i2c->base) { printk(KERN_ERR "i2c-mpc - failed to map controller\n"); result = -ENOMEM; goto fail_map; } - if (i2c->irq != NO_IRQ) - if ((result = request_irq(i2c->irq, mpc_i2c_isr, - IRQF_SHARED, "i2c-mpc", i2c)) < 0) { - printk(KERN_ERR - "i2c-mpc - failed to attach interrupt\n"); - goto fail_irq; + i2c->irq = irq_of_parse_and_map(op->node, 0); + if (i2c->irq != NO_IRQ) { /* i2c->irq = NO_IRQ implies polling */ + result = request_irq(i2c->irq, mpc_i2c_isr, + IRQF_SHARED, "i2c-mpc", i2c); + if (result < 0) { + printk(KERN_ERR "i2c-mpc - failed to attach interrupt\n"); + goto fail_request; } - + } + mpc_i2c_setclock(i2c); - platform_set_drvdata(pdev, i2c); + + dev_set_drvdata(&op->dev, i2c); i2c->adap = mpc_ops; - i2c->adap.nr = pdev->id; i2c_set_adapdata(&i2c->adap, i2c); - i2c->adap.dev.parent = &pdev->dev; - if ((result = i2c_add_numbered_adapter(&i2c->adap)) < 0) { + i2c->adap.dev.parent = &op->dev; + + result = i2c_add_adapter(&i2c->adap); + if (result < 0) { printk(KERN_ERR "i2c-mpc - failed to add adapter\n"); goto fail_add; } + of_register_i2c_devices(&i2c->adap, op->node); return result; - fail_add: - if (i2c->irq != NO_IRQ) - free_irq(i2c->irq, i2c); - fail_irq: - iounmap(i2c->base); - fail_map: + fail_add: + dev_set_drvdata(&op->dev, NULL); + free_irq(i2c->irq, i2c); + fail_request: + irq_dispose_mapping(i2c->irq); + iounmap(i2c->base); + fail_map: kfree(i2c); return result; }; -static int fsl_i2c_remove(struct platform_device *pdev) +static int __devexit fsl_i2c_remove(struct of_device *op) { - struct mpc_i2c *i2c = platform_get_drvdata(pdev); + struct mpc_i2c *i2c = dev_get_drvdata(&op->dev); i2c_del_adapter(&i2c->adap); - platform_set_drvdata(pdev, NULL); + dev_set_drvdata(&op->dev, NULL); if (i2c->irq != NO_IRQ) free_irq(i2c->irq, i2c); + irq_dispose_mapping(i2c->irq); iounmap(i2c->base); kfree(i2c); return 0; }; -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:fsl-i2c"); +static const struct of_device_id mpc_i2c_of_match[] = { + {.compatible = "fsl-i2c",}, + {}, +}; +MODULE_DEVICE_TABLE(of, mpc_i2c_of_match); + /* Structure for a device driver */ -static struct platform_driver fsl_i2c_driver = { - .probe = fsl_i2c_probe, - .remove = fsl_i2c_remove, - .driver = { - .owner = THIS_MODULE, - .name = "fsl-i2c", +static struct of_platform_driver mpc_i2c_driver = { + .match_table = mpc_i2c_of_match, + .probe = fsl_i2c_probe, + .remove = __devexit_p(fsl_i2c_remove), + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, }, }; static int __init fsl_i2c_init(void) { - return platform_driver_register(&fsl_i2c_driver); + int rv; + + rv = of_register_platform_driver(&mpc_i2c_driver); + if (rv) + printk(KERN_ERR DRV_NAME + " of_register_platform_driver failed (%i)\n", rv); + return rv; } static void __exit fsl_i2c_exit(void) { - platform_driver_unregister(&fsl_i2c_driver); + of_unregister_platform_driver(&mpc_i2c_driver); } module_init(fsl_i2c_init); diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c index 6a8995dfd0bb..d1a4cbcf2aa4 100644 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c @@ -150,6 +150,16 @@ static int __init nforce2_s4985_init(void) int i, error; union i2c_smbus_data ioconfig; + /* Configure the PCA9556 multiplexer */ + ioconfig.byte = 0x00; /* All I/O to output mode */ + error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, + I2C_SMBUS_BYTE_DATA, &ioconfig); + if (error) { + dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n"); + error = -EIO; + goto ERROR0; + } + /* Unregister physical bus */ if (!nforce2_smbus) return -ENODEV; @@ -191,24 +201,13 @@ static int __init nforce2_s4985_init(void) s4985_algo[3].smbus_xfer = nforce2_access_virt3; s4985_algo[4].smbus_xfer = nforce2_access_virt4; - /* Configure the PCA9556 multiplexer */ - ioconfig.byte = 0x00; /* All I/O to output mode */ - error = nforce2_smbus->algo->smbus_xfer(nforce2_smbus, 0x18, 0, - I2C_SMBUS_WRITE, 0x03, - I2C_SMBUS_BYTE_DATA, &ioconfig); - if (error) { - dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n"); - error = -EIO; - goto ERROR3; - } - /* Register virtual adapters */ for (i = 0; i < 5; i++) { error = i2c_add_adapter(s4985_adapter + i); if (error) { - dev_err(&nforce2_smbus->dev, - "Virtual adapter %d registration " - "failed, module not inserted\n", i); + printk(KERN_ERR "i2c-nforce2-s4985: " + "Virtual adapter %d registration " + "failed, module not inserted\n", i); for (i--; i >= 0; i--) i2c_del_adapter(s4985_adapter + i); goto ERROR3; @@ -245,8 +244,8 @@ static void __exit nforce2_s4985_exit(void) /* Restore physical bus */ if (i2c_add_adapter(nforce2_smbus)) - dev_err(&nforce2_smbus->dev, "Physical bus restoration " - "failed\n"); + printk(KERN_ERR "i2c-nforce2-s4985: " + "Physical bus restoration failed\n"); } MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c index 373ea8d8fe8f..2c27193aeaa0 100644 --- a/drivers/i2c/chips/eeprom.c +++ b/drivers/i2c/chips/eeprom.c @@ -47,7 +47,6 @@ enum eeprom_nature { /* Each client has this additional data */ struct eeprom_data { - struct i2c_client client; struct mutex update_lock; u8 valid; /* bitfield, bit!=0 if slice is valid */ unsigned long last_updated[8]; /* In jiffies, 8 slices */ @@ -56,19 +55,6 @@ struct eeprom_data { }; -static int eeprom_attach_adapter(struct i2c_adapter *adapter); -static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind); -static int eeprom_detach_client(struct i2c_client *client); - -/* This is the driver that will be inserted */ -static struct i2c_driver eeprom_driver = { - .driver = { - .name = "eeprom", - }, - .attach_adapter = eeprom_attach_adapter, - .detach_client = eeprom_detach_client, -}; - static void eeprom_update_client(struct i2c_client *client, u8 slice) { struct eeprom_data *data = i2c_get_clientdata(client); @@ -148,25 +134,17 @@ static struct bin_attribute eeprom_attr = { .read = eeprom_read, }; -static int eeprom_attach_adapter(struct i2c_adapter *adapter) -{ - if (!(adapter->class & (I2C_CLASS_DDC | I2C_CLASS_SPD))) - return 0; - return i2c_probe(adapter, &addr_data, eeprom_detect); -} - -/* This function is called by i2c_probe */ -static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int eeprom_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct eeprom_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; /* EDID EEPROMs are often 24C00 EEPROMs, which answer to all addresses 0x50-0x57, but we only care about 0x50. So decline attaching to addresses >= 0x51 on DDC buses */ - if (!(adapter->class & I2C_CLASS_SPD) && address >= 0x51) - goto exit; + if (!(adapter->class & I2C_CLASS_SPD) && client->addr >= 0x51) + return -ENODEV; /* There are four ways we can read the EEPROM data: (1) I2C block reads (faster, but unsupported by most adapters) @@ -177,32 +155,33 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind) because all known adapters support one of the first two. */ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA) && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) - goto exit; + return -ENODEV; + + strlcpy(info->type, "eeprom", I2C_NAME_SIZE); + + return 0; +} + +static int eeprom_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct eeprom_data *data; + int err; if (!(data = kzalloc(sizeof(struct eeprom_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; } - client = &data->client; memset(data->data, 0xff, EEPROM_SIZE); i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &eeprom_driver; - - /* Fill in the remaining client fields */ - strlcpy(client->name, "eeprom", I2C_NAME_SIZE); mutex_init(&data->update_lock); data->nature = UNKNOWN; - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_kfree; - /* Detect the Vaio nature of EEPROMs. We use the "PCG-" or "VGN-" prefix as the signature. */ - if (address == 0x57 + if (client->addr == 0x57 && i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) { char name[4]; @@ -221,33 +200,42 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind) /* create the sysfs eeprom file */ err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr); if (err) - goto exit_detach; + goto exit_kfree; return 0; -exit_detach: - i2c_detach_client(client); exit_kfree: kfree(data); exit: return err; } -static int eeprom_detach_client(struct i2c_client *client) +static int eeprom_remove(struct i2c_client *client) { - int err; - sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr); - - err = i2c_detach_client(client); - if (err) - return err; - kfree(i2c_get_clientdata(client)); return 0; } +static const struct i2c_device_id eeprom_id[] = { + { "eeprom", 0 }, + { } +}; + +static struct i2c_driver eeprom_driver = { + .driver = { + .name = "eeprom", + }, + .probe = eeprom_probe, + .remove = eeprom_remove, + .id_table = eeprom_id, + + .class = I2C_CLASS_DDC | I2C_CLASS_SPD, + .detect = eeprom_detect, + .address_data = &addr_data, +}; + static int __init eeprom_init(void) { return i2c_add_driver(&eeprom_driver); diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c index 5a0285d8b6f9..033d9d81ec8a 100644 --- a/drivers/i2c/chips/max6875.c +++ b/drivers/i2c/chips/max6875.c @@ -53,7 +53,7 @@ I2C_CLIENT_INSMOD_1(max6875); /* Each client has this additional data */ struct max6875_data { - struct i2c_client client; + struct i2c_client *fake_client; struct mutex update_lock; u32 valid; @@ -61,19 +61,6 @@ struct max6875_data { unsigned long last_updated[USER_EEPROM_SLICES]; }; -static int max6875_attach_adapter(struct i2c_adapter *adapter); -static int max6875_detect(struct i2c_adapter *adapter, int address, int kind); -static int max6875_detach_client(struct i2c_client *client); - -/* This is the driver that will be inserted */ -static struct i2c_driver max6875_driver = { - .driver = { - .name = "max6875", - }, - .attach_adapter = max6875_attach_adapter, - .detach_client = max6875_detach_client, -}; - static void max6875_update_slice(struct i2c_client *client, int slice) { struct max6875_data *data = i2c_get_clientdata(client); @@ -159,96 +146,87 @@ static struct bin_attribute user_eeprom_attr = { .read = max6875_read, }; -static int max6875_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max6875_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - return i2c_probe(adapter, &addr_data, max6875_detect); -} - -/* This function is called by i2c_probe */ -static int max6875_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *real_client; - struct i2c_client *fake_client; - struct max6875_data *data; - int err; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA | I2C_FUNC_SMBUS_READ_BYTE)) - return 0; + return -ENODEV; /* Only check even addresses */ - if (address & 1) - return 0; + if (client->addr & 1) + return -ENODEV; + + strlcpy(info->type, "max6875", I2C_NAME_SIZE); + + return 0; +} + +static int max6875_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct max6875_data *data; + int err; if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) return -ENOMEM; /* A fake client is created on the odd address */ - if (!(fake_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { + data->fake_client = i2c_new_dummy(client->adapter, client->addr + 1); + if (!data->fake_client) { err = -ENOMEM; - goto exit_kfree1; + goto exit_kfree; } /* Init real i2c_client */ - real_client = &data->client; - i2c_set_clientdata(real_client, data); - real_client->addr = address; - real_client->adapter = adapter; - real_client->driver = &max6875_driver; - strlcpy(real_client->name, "max6875", I2C_NAME_SIZE); + i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - /* Init fake client data */ - i2c_set_clientdata(fake_client, NULL); - fake_client->addr = address | 1; - fake_client->adapter = adapter; - fake_client->driver = &max6875_driver; - strlcpy(fake_client->name, "max6875 subclient", I2C_NAME_SIZE); - - if ((err = i2c_attach_client(real_client)) != 0) - goto exit_kfree2; - - if ((err = i2c_attach_client(fake_client)) != 0) - goto exit_detach1; - - err = sysfs_create_bin_file(&real_client->dev.kobj, &user_eeprom_attr); + err = sysfs_create_bin_file(&client->dev.kobj, &user_eeprom_attr); if (err) - goto exit_detach2; + goto exit_remove_fake; return 0; -exit_detach2: - i2c_detach_client(fake_client); -exit_detach1: - i2c_detach_client(real_client); -exit_kfree2: - kfree(fake_client); -exit_kfree1: +exit_remove_fake: + i2c_unregister_device(data->fake_client); +exit_kfree: kfree(data); return err; } -/* Will be called for both the real client and the fake client */ -static int max6875_detach_client(struct i2c_client *client) +static int max6875_remove(struct i2c_client *client) { - int err; struct max6875_data *data = i2c_get_clientdata(client); - /* data is NULL for the fake client */ - if (data) - sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr); + i2c_unregister_device(data->fake_client); - err = i2c_detach_client(client); - if (err) - return err; + sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr); + kfree(data); - if (data) /* real client */ - kfree(data); - else /* fake client */ - kfree(client); return 0; } +static const struct i2c_device_id max6875_id[] = { + { "max6875", 0 }, + { } +}; + +static struct i2c_driver max6875_driver = { + .driver = { + .name = "max6875", + }, + .probe = max6875_probe, + .remove = max6875_remove, + .id_table = max6875_id, + + .detect = max6875_detect, + .address_data = &addr_data, +}; + static int __init max6875_init(void) { return i2c_add_driver(&max6875_driver); diff --git a/drivers/i2c/chips/pca9539.c b/drivers/i2c/chips/pca9539.c index 58ab7f26be26..270de4e56a81 100644 --- a/drivers/i2c/chips/pca9539.c +++ b/drivers/i2c/chips/pca9539.c @@ -14,8 +14,8 @@ #include <linux/i2c.h> #include <linux/hwmon-sysfs.h> -/* Addresses to scan */ -static unsigned short normal_i2c[] = {0x74, 0x75, 0x76, 0x77, I2C_CLIENT_END}; +/* Addresses to scan: none, device is not autodetected */ +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; /* Insmod parameters */ I2C_CLIENT_INSMOD_1(pca9539); @@ -32,23 +32,6 @@ enum pca9539_cmd PCA9539_DIRECTION_1 = 7, }; -static int pca9539_attach_adapter(struct i2c_adapter *adapter); -static int pca9539_detect(struct i2c_adapter *adapter, int address, int kind); -static int pca9539_detach_client(struct i2c_client *client); - -/* This is the driver that will be inserted */ -static struct i2c_driver pca9539_driver = { - .driver = { - .name = "pca9539", - }, - .attach_adapter = pca9539_attach_adapter, - .detach_client = pca9539_detach_client, -}; - -struct pca9539_data { - struct i2c_client client; -}; - /* following are the sysfs callback functions */ static ssize_t pca9539_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -105,77 +88,51 @@ static struct attribute_group pca9539_defattr_group = { .attrs = pca9539_attributes, }; -static int pca9539_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int pca9539_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - return i2c_probe(adapter, &addr_data, pca9539_detect); -} - -/* This function is called by i2c_probe */ -static int pca9539_detect(struct i2c_adapter *adapter, int address, int kind) -{ - struct i2c_client *client; - struct pca9539_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. */ - if (!(data = kzalloc(sizeof(struct pca9539_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &pca9539_driver; - - if (kind < 0) { - /* Detection: the pca9539 only has 8 registers (0-7). - A read of 7 should succeed, but a read of 8 should fail. */ - if ((i2c_smbus_read_byte_data(client, 7) < 0) || - (i2c_smbus_read_byte_data(client, 8) >= 0)) - goto exit_kfree; - } - - strlcpy(client->name, "pca9539", I2C_NAME_SIZE); - - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_kfree; + return -ENODEV; - /* Register sysfs hooks */ - err = sysfs_create_group(&client->dev.kobj, - &pca9539_defattr_group); - if (err) - goto exit_detach; + strlcpy(info->type, "pca9539", I2C_NAME_SIZE); return 0; - -exit_detach: - i2c_detach_client(client); -exit_kfree: - kfree(data); -exit: - return err; } -static int pca9539_detach_client(struct i2c_client *client) +static int pca9539_probe(struct i2c_client *client, + const struct i2c_device_id *id) { - int err; + /* Register sysfs hooks */ + return sysfs_create_group(&client->dev.kobj, + &pca9539_defattr_group); +} +static int pca9539_remove(struct i2c_client *client) +{ sysfs_remove_group(&client->dev.kobj, &pca9539_defattr_group); - - if ((err = i2c_detach_client(client))) - return err; - - kfree(i2c_get_clientdata(client)); return 0; } +static const struct i2c_device_id pca9539_id[] = { + { "pca9539", 0 }, + { } +}; + +static struct i2c_driver pca9539_driver = { + .driver = { + .name = "pca9539", + }, + .probe = pca9539_probe, + .remove = pca9539_remove, + .id_table = pca9539_id, + + .detect = pca9539_detect, + .address_data = &addr_data, +}; + static int __init pca9539_init(void) { return i2c_add_driver(&pca9539_driver); diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c index 1b3db2b3ada9..6ec309894c88 100644 --- a/drivers/i2c/chips/pcf8574.c +++ b/drivers/i2c/chips/pcf8574.c @@ -38,37 +38,19 @@ #include <linux/slab.h> #include <linux/i2c.h> -/* Addresses to scan */ -static const unsigned short normal_i2c[] = { - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, - I2C_CLIENT_END -}; +/* Addresses to scan: none, device can't be detected */ +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; /* Insmod parameters */ I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a); /* Each client has this additional data */ struct pcf8574_data { - struct i2c_client client; - int write; /* Remember last written value */ }; -static int pcf8574_attach_adapter(struct i2c_adapter *adapter); -static int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind); -static int pcf8574_detach_client(struct i2c_client *client); static void pcf8574_init_client(struct i2c_client *client); -/* This is the driver that will be inserted */ -static struct i2c_driver pcf8574_driver = { - .driver = { - .name = "pcf8574", - }, - .attach_adapter = pcf8574_attach_adapter, - .detach_client = pcf8574_detach_client, -}; - /* following are the sysfs callback functions */ static ssize_t show_read(struct device *dev, struct device_attribute *attr, char *buf) { @@ -119,41 +101,22 @@ static const struct attribute_group pcf8574_attr_group = { * Real code */ -static int pcf8574_attach_adapter(struct i2c_adapter *adapter) -{ - return i2c_probe(adapter, &addr_data, pcf8574_detect); -} - -/* This function is called by i2c_probe */ -static int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int pcf8574_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct pcf8574_data *data; - int err = 0; - const char *client_name = ""; + struct i2c_adapter *adapter = client->adapter; + const char *client_name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) - goto exit; - - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. */ - if (!(data = kzalloc(sizeof(struct pcf8574_data), GFP_KERNEL))) { - err = -ENOMEM; - goto exit; - } - - client = &data->client; - i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &pcf8574_driver; + return -ENODEV; /* Now, we would do the remaining detection. But the PCF8574 is plainly impossible to detect! Stupid chip. */ /* Determine the chip type */ if (kind <= 0) { - if (address >= 0x38 && address <= 0x3f) + if (client->addr >= 0x38 && client->addr <= 0x3f) kind = pcf8574a; else kind = pcf8574; @@ -163,40 +126,43 @@ static int pcf8574_detect(struct i2c_adapter *adapter, int address, int kind) client_name = "pcf8574a"; else client_name = "pcf8574"; + strlcpy(info->type, client_name, I2C_NAME_SIZE); - /* Fill in the remaining client fields and put it into the global list */ - strlcpy(client->name, client_name, I2C_NAME_SIZE); + return 0; +} + +static int pcf8574_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pcf8574_data *data; + int err; + + data = kzalloc(sizeof(struct pcf8574_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_free; - /* Initialize the PCF8574 chip */ pcf8574_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &pcf8574_attr_group); if (err) - goto exit_detach; + goto exit_free; return 0; - exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int pcf8574_detach_client(struct i2c_client *client) +static int pcf8574_remove(struct i2c_client *client) { - int err; - sysfs_remove_group(&client->dev.kobj, &pcf8574_attr_group); - - if ((err = i2c_detach_client(client))) - return err; - kfree(i2c_get_clientdata(client)); return 0; } @@ -208,6 +174,24 @@ static void pcf8574_init_client(struct i2c_client *client) data->write = -EAGAIN; } +static const struct i2c_device_id pcf8574_id[] = { + { "pcf8574", 0 }, + { "pcf8574a", 0 }, + { } +}; + +static struct i2c_driver pcf8574_driver = { + .driver = { + .name = "pcf8574", + }, + .probe = pcf8574_probe, + .remove = pcf8574_remove, + .id_table = pcf8574_id, + + .detect = pcf8574_detect, + .address_data = &addr_data, +}; + static int __init pcf8574_init(void) { return i2c_add_driver(&pcf8574_driver); diff --git a/drivers/i2c/chips/pcf8575.c b/drivers/i2c/chips/pcf8575.c index 3ea08ac0bfa3..07fd7cb3c57d 100644 --- a/drivers/i2c/chips/pcf8575.c +++ b/drivers/i2c/chips/pcf8575.c @@ -32,11 +32,8 @@ #include <linux/slab.h> /* kzalloc() */ #include <linux/sysfs.h> /* sysfs_create_group() */ -/* Addresses to scan */ -static const unsigned short normal_i2c[] = { - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - I2C_CLIENT_END -}; +/* Addresses to scan: none, device can't be detected */ +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; /* Insmod parameters */ I2C_CLIENT_INSMOD; @@ -44,24 +41,9 @@ I2C_CLIENT_INSMOD; /* Each client has this additional data */ struct pcf8575_data { - struct i2c_client client; int write; /* last written value, or error code */ }; -static int pcf8575_attach_adapter(struct i2c_adapter *adapter); -static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind); -static int pcf8575_detach_client(struct i2c_client *client); - -/* This is the driver that will be inserted */ -static struct i2c_driver pcf8575_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "pcf8575", - }, - .attach_adapter = pcf8575_attach_adapter, - .detach_client = pcf8575_detach_client, -}; - /* following are the sysfs callback functions */ static ssize_t show_read(struct device *dev, struct device_attribute *attr, char *buf) @@ -126,75 +108,77 @@ static const struct attribute_group pcf8575_attr_group = { * Real code */ -static int pcf8575_attach_adapter(struct i2c_adapter *adapter) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int pcf8575_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - return i2c_probe(adapter, &addr_data, pcf8575_detect); + struct i2c_adapter *adapter = client->adapter; + + if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) + return -ENODEV; + + /* This is the place to detect whether the chip at the specified + address really is a PCF8575 chip. However, there is no method known + to detect whether an I2C chip is a PCF8575 or any other I2C chip. */ + + strlcpy(info->type, "pcf8575", I2C_NAME_SIZE); + + return 0; } -/* This function is called by i2c_probe */ -static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind) +static int pcf8575_probe(struct i2c_client *client, + const struct i2c_device_id *id) { - struct i2c_client *client; struct pcf8575_data *data; - int err = 0; - - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) - goto exit; + int err; - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. */ data = kzalloc(sizeof(struct pcf8575_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } - client = &data->client; i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &pcf8575_driver; - strlcpy(client->name, "pcf8575", I2C_NAME_SIZE); data->write = -EAGAIN; - /* This is the place to detect whether the chip at the specified - address really is a PCF8575 chip. However, there is no method known - to detect whether an I2C chip is a PCF8575 or any other I2C chip. */ - - /* Tell the I2C layer a new client has arrived */ - err = i2c_attach_client(client); - if (err) - goto exit_free; - /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &pcf8575_attr_group); if (err) - goto exit_detach; + goto exit_free; return 0; -exit_detach: - i2c_detach_client(client); exit_free: kfree(data); exit: return err; } -static int pcf8575_detach_client(struct i2c_client *client) +static int pcf8575_remove(struct i2c_client *client) { - int err; - sysfs_remove_group(&client->dev.kobj, &pcf8575_attr_group); - - err = i2c_detach_client(client); - if (err) - return err; - kfree(i2c_get_clientdata(client)); return 0; } +static const struct i2c_device_id pcf8575_id[] = { + { "pcf8575", 0 }, + { } +}; + +static struct i2c_driver pcf8575_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "pcf8575", + }, + .probe = pcf8575_probe, + .remove = pcf8575_remove, + .id_table = pcf8575_id, + + .detect = pcf8575_detect, + .address_data = &addr_data, +}; + static int __init pcf8575_init(void) { return i2c_add_driver(&pcf8575_driver); diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c index db735379f22f..16ce3e193776 100644 --- a/drivers/i2c/chips/pcf8591.c +++ b/drivers/i2c/chips/pcf8591.c @@ -72,28 +72,15 @@ MODULE_PARM_DESC(input_mode, #define REG_TO_SIGNED(reg) (((reg) & 0x80)?((reg) - 256):(reg)) struct pcf8591_data { - struct i2c_client client; struct mutex update_lock; u8 control; u8 aout; }; -static int pcf8591_attach_adapter(struct i2c_adapter *adapter); -static int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind); -static int pcf8591_detach_client(struct i2c_client *client); static void pcf8591_init_client(struct i2c_client *client); static int pcf8591_read_channel(struct device *dev, int channel); -/* This is the driver that will be inserted */ -static struct i2c_driver pcf8591_driver = { - .driver = { - .name = "pcf8591", - }, - .attach_adapter = pcf8591_attach_adapter, - .detach_client = pcf8591_detach_client, -}; - /* following are the sysfs callback functions */ #define show_in_channel(channel) \ static ssize_t show_in##channel##_input(struct device *dev, struct device_attribute *attr, char *buf) \ @@ -180,58 +167,46 @@ static const struct attribute_group pcf8591_attr_group_opt = { /* * Real code */ -static int pcf8591_attach_adapter(struct i2c_adapter *adapter) -{ - return i2c_probe(adapter, &addr_data, pcf8591_detect); -} -/* This function is called by i2c_probe */ -static int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int pcf8591_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - struct i2c_client *client; - struct pcf8591_data *data; - int err = 0; + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) - goto exit; + return -ENODEV; + + /* Now, we would do the remaining detection. But the PCF8591 is plainly + impossible to detect! Stupid chip. */ + + strlcpy(info->type, "pcf8591", I2C_NAME_SIZE); + + return 0; +} + +static int pcf8591_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pcf8591_data *data; + int err; - /* OK. For now, we presume we have a valid client. We now create the - client structure, even though we cannot fill it completely yet. */ if (!(data = kzalloc(sizeof(struct pcf8591_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; } - client = &data->client; i2c_set_clientdata(client, data); - client->addr = address; - client->adapter = adapter; - client->driver = &pcf8591_driver; - - /* Now, we would do the remaining detection. But the PCF8591 is plainly - impossible to detect! Stupid chip. */ - - /* Determine the chip type - only one kind supported! */ - if (kind <= 0) - kind = pcf8591; - - /* Fill in the remaining client fields and put it into the global - list */ - strlcpy(client->name, "pcf8591", I2C_NAME_SIZE); mutex_init(&data->update_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(client))) - goto exit_kfree; - /* Initialize the PCF8591 chip */ pcf8591_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &pcf8591_attr_group); if (err) - goto exit_detach; + goto exit_kfree; /* Register input2 if not in "two differential inputs" mode */ if (input_mode != 3) { @@ -252,24 +227,16 @@ static int pcf8591_detect(struct i2c_adapter *adapter, int address, int kind) exit_sysfs_remove: sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt); sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group); -exit_detach: - i2c_detach_client(client); exit_kfree: kfree(data); exit: return err; } -static int pcf8591_detach_client(struct i2c_client *client) +static int pcf8591_remove(struct i2c_client *client) { - int err; - sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt); sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group); - - if ((err = i2c_detach_client(client))) - return err; - kfree(i2c_get_clientdata(client)); return 0; } @@ -316,6 +283,25 @@ static int pcf8591_read_channel(struct device *dev, int channel) return (10 * value); } +static const struct i2c_device_id pcf8591_id[] = { + { "pcf8591", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, pcf8591_id); + +static struct i2c_driver pcf8591_driver = { + .driver = { + .name = "pcf8591", + }, + .probe = pcf8591_probe, + .remove = pcf8591_remove, + .id_table = pcf8591_id, + + .class = I2C_CLASS_HWMON, /* Nearest choice */ + .detect = pcf8591_detect, + .address_data = &addr_data, +}; + static int __init pcf8591_init(void) { if (input_mode < 0 || input_mode > 3) { diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 0a79f7661017..7608df83d6d1 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -654,6 +654,10 @@ int i2c_del_adapter(struct i2c_adapter *adap) dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); + /* Clear the device structure in case this adapter is ever going to be + added again */ + memset(&adap->dev, 0, sizeof(adap->dev)); + out_unlock: mutex_unlock(&core_lock); return res; diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index cf707c8f08d4..15b09b89588a 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -98,6 +98,9 @@ if BLK_DEV_IDE comment "Please see Documentation/ide/ide.txt for help/info on IDE drives" +config IDE_TIMINGS + bool + config IDE_ATAPI bool @@ -326,6 +329,7 @@ config BLK_DEV_PLATFORM config BLK_DEV_CMD640 tristate "CMD640 chipset bugfix/support" depends on X86 + select IDE_TIMINGS ---help--- The CMD-Technologies CMD640 IDE chip is used on many common 486 and Pentium motherboards, usually in combination with a "Neptune" or @@ -455,6 +459,7 @@ config BLK_DEV_AEC62XX config BLK_DEV_ALI15X3 tristate "ALI M15x3 chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver ensures (U)DMA support for ALI 1533, 1543 and 1543C @@ -469,6 +474,7 @@ config BLK_DEV_ALI15X3 config BLK_DEV_AMD74XX tristate "AMD and nVidia IDE support" depends on !ARM + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver adds explicit support for AMD-7xx and AMD-8111 chips @@ -489,6 +495,7 @@ config BLK_DEV_ATIIXP config BLK_DEV_CMD64X tristate "CMD64{3|6|8|9} chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help Say Y here if you have an IDE controller which uses any of these @@ -503,6 +510,7 @@ config BLK_DEV_TRIFLEX config BLK_DEV_CY82C693 tristate "CY82C693 chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver adds detection and support for the CY82C693 chipset @@ -695,6 +703,7 @@ config BLK_DEV_SIS5513 config BLK_DEV_SL82C105 tristate "Winbond SL82c105 support" depends on (PPC || ARM) + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help If you have a Winbond SL82c105 IDE controller, say Y here to enable @@ -725,6 +734,7 @@ config BLK_DEV_TRM290 config BLK_DEV_VIA82CXXX tristate "VIA82CXXX chipset support" + select IDE_TIMINGS select BLK_DEV_IDEDMA_PCI help This driver adds explicit support for VIA BusMastering IDE chips. @@ -751,6 +761,7 @@ endif config BLK_DEV_IDE_PMAC tristate "PowerMac on-board IDE support" depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y + select IDE_TIMINGS help This driver provides support for the on-board IDE controller on most of the recent Apple Power Macintoshes and PowerBooks. @@ -829,13 +840,6 @@ config BLK_DEV_IDE_RAPIDE Say Y here if you want to support the Yellowstone RapIDE controller manufactured for use with Acorn computers. -config BLK_DEV_IDE_BAST - tristate "Simtec BAST / Thorcom VR1000 IDE support" - depends on ARM && (ARCH_BAST || MACH_VR1000) - help - Say Y here if you want to support the onboard IDE channels on the - Simtec BAST or the Thorcom VR1000 - config IDE_H8300 tristate "H8300 IDE support" depends on H8300 @@ -919,51 +923,12 @@ config BLK_DEV_Q40IDE config BLK_DEV_PALMCHIP_BK3710 tristate "Palmchip bk3710 IDE controller support" depends on ARCH_DAVINCI + select IDE_TIMINGS select BLK_DEV_IDEDMA_SFF help Say Y here if you want to support the onchip IDE controller on the TI DaVinci SoC - -config BLK_DEV_MPC8xx_IDE - tristate "MPC8xx IDE support" - depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE - help - This option provides support for IDE on Motorola MPC8xx Systems. - Please see 'Type of MPC8xx IDE interface' for details. - - If unsure, say N. - -choice - prompt "Type of MPC8xx IDE interface" - depends on BLK_DEV_MPC8xx_IDE - default IDE_8xx_PCCARD - -config IDE_8xx_PCCARD - bool "8xx_PCCARD" - ---help--- - Select how the IDE devices are connected to the MPC8xx system: - - 8xx_PCCARD uses the 8xx internal PCMCIA interface in combination - with a PC Card (e.g. ARGOSY portable Hard Disk Adapter), - ATA PC Card HDDs or ATA PC Flash Cards (example: TQM8xxL - systems) - - 8xx_DIRECT is used for directly connected IDE devices using the 8xx - internal PCMCIA interface (example: IVMS8 systems) - - EXT_DIRECT is used for IDE devices directly connected to the 8xx - bus using some glue logic, but _not_ the 8xx internal - PCMCIA interface (example: IDIF860 systems) - -config IDE_8xx_DIRECT - bool "8xx_DIRECT" - -config IDE_EXT_DIRECT - bool "EXT_DIRECT" - -endchoice - # no isa -> no vlb if ISA && (ALPHA || X86 || MIPS) @@ -981,6 +946,7 @@ config BLK_DEV_4DRIVES config BLK_DEV_ALI14XX tristate "ALI M14xx support" + select IDE_TIMINGS help This driver is enabled at runtime using the "ali14xx.probe" kernel boot parameter. It enables support for the secondary IDE interface @@ -1000,6 +966,7 @@ config BLK_DEV_DTC2278 config BLK_DEV_HT6560B tristate "Holtek HT6560B support" + select IDE_TIMINGS help This driver is enabled at runtime using the "ht6560b.probe" kernel boot parameter. It enables support for the secondary IDE interface @@ -1009,6 +976,7 @@ config BLK_DEV_HT6560B config BLK_DEV_QD65XX tristate "QDI QD65xx support" + select IDE_TIMINGS help This driver is enabled at runtime using the "qd65xx.probe" kernel boot parameter. It permits faster I/O speeds to be set. See the @@ -1032,30 +1000,4 @@ config BLK_DEV_IDEDMA endif -config BLK_DEV_HD_ONLY - bool "Old hard disk (MFM/RLL/IDE) driver" - depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN - help - There are two drivers for MFM/RLL/IDE hard disks. Most people use - the newer enhanced driver, but this old one is still around for two - reasons. Some older systems have strange timing problems and seem to - work only with the old driver (which itself does not work with some - newer systems). The other reason is that the old driver is smaller, - since it lacks the enhanced functionality of the new one. This makes - it a good choice for systems with very tight memory restrictions, or - for systems with only older MFM/RLL/ESDI drives. Choosing the old - driver can save 13 KB or so of kernel memory. - - If you want to use this driver together with the new one you have - to use "hda=noprobe hdb=noprobe" kernel parameters to prevent the new - driver from probing the primary interface. - - If you are unsure, then just choose the Enhanced IDE/MFM/RLL driver - instead of this one. For more detailed information, read the - Disk-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. - -config BLK_DEV_HD - def_bool BLK_DEV_HD_ONLY - endif # IDE diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index a2b3f84d710d..5d414e301a5a 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile @@ -11,9 +11,11 @@ EXTRA_CFLAGS += -Idrivers/ide -ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o +ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o \ + ide-pio-blacklist.o # core IDE code +ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o @@ -59,9 +61,3 @@ ifeq ($(CONFIG_BLK_DEV_PLATFORM), y) endif obj-$(CONFIG_BLK_DEV_IDE) += arm/ mips/ - -# old hd driver must be last -ifeq ($(CONFIG_BLK_DEV_HD), y) - hd-core-y += legacy/hd.o - obj-y += hd-core.o -endif diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile index 936e7b0237f5..5bc26053afa6 100644 --- a/drivers/ide/arm/Makefile +++ b/drivers/ide/arm/Makefile @@ -1,7 +1,6 @@ obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o -obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o ifeq ($(CONFIG_IDE_ARM), m) diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c deleted file mode 100644 index 8e8c28104b45..000000000000 --- a/drivers/ide/arm/bast-ide.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2003-2004 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * -*/ - -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/ide.h> -#include <linux/init.h> - -#include <asm/mach-types.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/arch/map.h> -#include <asm/arch/bast-map.h> -#include <asm/arch/bast-irq.h> - -#define DRV_NAME "bast-ide" - -static int __init bastide_register(unsigned int base, unsigned int aux, int irq) -{ - ide_hwif_t *hwif; - hw_regs_t hw; - int i; - u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; - - memset(&hw, 0, sizeof(hw)); - - base += BAST_IDE_CS; - aux += BAST_IDE_CS; - - for (i = 0; i <= 7; i++) { - hw.io_ports_array[i] = (unsigned long)base; - base += 0x20; - } - - hw.io_ports.ctl_addr = aux + (6 * 0x20); - hw.irq = irq; - hw.chipset = ide_generic; - - hwif = ide_find_port(); - if (hwif == NULL) - goto out; - - i = hwif->index; - - ide_init_port_data(hwif, i); - ide_init_port_hw(hwif, &hw); - hwif->port_ops = NULL; - - idx[0] = i; - - ide_device_add(idx, NULL); -out: - return 0; -} - -static int __init bastide_init(void) -{ - unsigned long base = BAST_VA_IDEPRI + BAST_IDE_CS; - - /* we can treat the VR1000 and the BAST the same */ - - if (!(machine_is_bast() || machine_is_vr1000())) - return 0; - - printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n"); - - if (!request_mem_region(base, 0x400000, DRV_NAME)) { - printk(KERN_ERR "%s: resources busy\n", DRV_NAME); - return -EBUSY; - } - - bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0); - bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1); - - return 0; -} - -module_init(bastide_init); - -MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Simtec BAST / Thorcom VR1000 IDE driver"); diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index 061456914ca3..52f58c885783 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c @@ -21,6 +21,8 @@ #include <asm/dma.h> #include <asm/ecard.h> +#define DRV_NAME "icside" + #define ICS_IDENT_OFFSET 0x2280 #define ICS_ARCIN_V5_INTRSTAT 0x0000 @@ -68,6 +70,7 @@ struct icside_state { unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; + unsigned int sel; unsigned int type; ide_hwif_t *hwif[2]; }; @@ -165,7 +168,8 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = { static void icside_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = HWIF(drive); - struct icside_state *state = hwif->hwif_data; + struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); unsigned long flags; local_irq_save(flags); @@ -308,6 +312,7 @@ static int icside_dma_setup(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); struct request *rq = hwif->hwgroup->rq; unsigned int dma_mode; @@ -331,7 +336,7 @@ static int icside_dma_setup(ide_drive_t *drive) /* * Route the DMA signals to the correct interface. */ - writeb(hwif->select_data, hwif->config_data); + writeb(state->sel | hwif->channel, state->ioc_base); /* * Select the correct timing for this drive. @@ -359,7 +364,8 @@ static void icside_dma_exec_cmd(ide_drive_t *drive, u8 cmd) static int icside_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); - struct icside_state *state = hwif->hwif_data; + struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); return readb(state->irq_port + (hwif->channel ? @@ -411,36 +417,24 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) return -EOPNOTSUPP; } -static ide_hwif_t * -icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec) +static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, + struct cardinfo *info, struct expansion_card *ec) { unsigned long port = (unsigned long)base + info->dataoffset; - ide_hwif_t *hwif; - hwif = ide_find_port(); - if (hwif) { - /* - * Ensure we're using MMIO - */ - default_hwif_mmiops(hwif); - - hwif->io_ports.data_addr = port; - hwif->io_ports.error_addr = port + (1 << info->stepping); - hwif->io_ports.nsect_addr = port + (2 << info->stepping); - hwif->io_ports.lbal_addr = port + (3 << info->stepping); - hwif->io_ports.lbam_addr = port + (4 << info->stepping); - hwif->io_ports.lbah_addr = port + (5 << info->stepping); - hwif->io_ports.device_addr = port + (6 << info->stepping); - hwif->io_ports.status_addr = port + (7 << info->stepping); - hwif->io_ports.ctl_addr = - (unsigned long)base + info->ctrloffset; - hwif->irq = ec->irq; - hwif->chipset = ide_acorn; - hwif->gendev.parent = &ec->dev; - hwif->dev = &ec->dev; - } - - return hwif; + hw->io_ports.data_addr = port; + hw->io_ports.error_addr = port + (1 << info->stepping); + hw->io_ports.nsect_addr = port + (2 << info->stepping); + hw->io_ports.lbal_addr = port + (3 << info->stepping); + hw->io_ports.lbam_addr = port + (4 << info->stepping); + hw->io_ports.lbah_addr = port + (5 << info->stepping); + hw->io_ports.device_addr = port + (6 << info->stepping); + hw->io_ports.status_addr = port + (7 << info->stepping); + hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset; + + hw->irq = ec->irq; + hw->dev = &ec->dev; + hw->chipset = ide_acorn; } static int __init @@ -449,6 +443,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) ide_hwif_t *hwif; void __iomem *base; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; + hw_regs_t hw; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) @@ -466,12 +461,19 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) */ icside_irqdisable_arcin_v5(ec, 0); - hwif = icside_setup(base, &icside_cardinfo_v5, ec); + icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); + + hwif = ide_find_port(); if (!hwif) return -ENODEV; + ide_init_port_hw(hwif, &hw); + default_hwif_mmiops(hwif); + state->hwif[0] = hwif; + ecard_set_drvdata(ec, state); + idx[0] = hwif->index; ide_device_add(idx, NULL); @@ -497,6 +499,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) int ret; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; struct ide_port_info d = icside_v6_port_info; + hw_regs_t hw[2]; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); if (!ioc_base) { @@ -525,43 +528,47 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) state->irq_port = easi_base; state->ioc_base = ioc_base; + state->sel = sel; /* * Be on the safe side - disable interrupts */ icside_irqdisable_arcin_v6(ec, 0); + icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); + icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); + /* * Find and register the interfaces. */ - hwif = icside_setup(easi_base, &icside_cardinfo_v6_1, ec); - mate = icside_setup(easi_base, &icside_cardinfo_v6_2, ec); + hwif = ide_find_port(); + if (hwif == NULL) + return -ENODEV; - if (!hwif || !mate) { - ret = -ENODEV; - goto out; + ide_init_port_hw(hwif, &hw[0]); + default_hwif_mmiops(hwif); + + idx[0] = hwif->index; + + mate = ide_find_port(); + if (mate) { + ide_init_port_hw(mate, &hw[1]); + default_hwif_mmiops(mate); + + idx[1] = mate->index; } state->hwif[0] = hwif; state->hwif[1] = mate; - hwif->hwif_data = state; - hwif->config_data = (unsigned long)ioc_base; - hwif->select_data = sel; - - mate->hwif_data = state; - mate->config_data = (unsigned long)ioc_base; - mate->select_data = sel | 1; + ecard_set_drvdata(ec, state); - if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { + if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { d.init_dma = icside_dma_init; d.port_ops = &icside_v6_port_ops; d.dma_ops = NULL; } - idx[0] = hwif->index; - idx[1] = mate->index; - ide_device_add(idx, &d); return 0; @@ -627,10 +634,8 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id) break; } - if (ret == 0) { - ecard_set_drvdata(ec, state); + if (ret == 0) goto out; - } kfree(state); release: diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c index 3839f5722985..c79b85b6e4a3 100644 --- a/drivers/ide/arm/palm_bk3710.c +++ b/drivers/ide/arm/palm_bk3710.c @@ -74,8 +74,6 @@ struct palm_bk3710_udmatiming { #define BK3710_IORDYTMP 0x78 #define BK3710_IORDYTMS 0x7C -#include "../ide-timing.h" - static unsigned ideclk_period; /* in nanoseconds */ static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = { @@ -402,7 +400,6 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev) i = hwif->index; - ide_init_port_data(hwif, i); ide_init_port_hw(hwif, &hw); default_hwif_mmiops(hwif); diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c index 1747b2358775..43057e0303c8 100644 --- a/drivers/ide/arm/rapide.c +++ b/drivers/ide/arm/rapide.c @@ -11,6 +11,10 @@ #include <asm/ecard.h> +static struct const ide_port_info rapide_port_info = { + .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, +}; + static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq) { @@ -44,25 +48,26 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - hwif = ide_find_port(); - if (hwif) { - memset(&hw, 0, sizeof(hw)); - rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); - hw.chipset = ide_generic; - hw.dev = &ec->dev; + memset(&hw, 0, sizeof(hw)); + rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); + hw.chipset = ide_generic; + hw.dev = &ec->dev; - ide_init_port_hw(hwif, &hw); + hwif = ide_find_port(); + if (hwif == NULL) { + ret = -ENOENT; + goto release; + } - hwif->host_flags = IDE_HFLAG_MMIO; - default_hwif_mmiops(hwif); + ide_init_port_hw(hwif, &hw); + default_hwif_mmiops(hwif); - idx[0] = hwif->index; + idx[0] = hwif->index; - ide_device_add(idx, NULL); + ide_device_add(idx, &rapide_port_info); - ecard_set_drvdata(ec, hwif); - goto out; - } + ecard_set_drvdata(ec, hwif); + goto out; release: ecard_release_resources(ec); diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c index ae37ee58bae2..20fad6d542cc 100644 --- a/drivers/ide/h8300/ide-h8300.c +++ b/drivers/ide/h8300/ide-h8300.c @@ -8,6 +8,8 @@ #include <asm/io.h> #include <asm/irq.h> +#define DRV_NAME "ide-h8300" + #define bswap(d) \ ({ \ u16 r; \ @@ -176,6 +178,10 @@ static inline void hwif_setup(ide_hwif_t *hwif) hwif->output_data = h8300_output_data; } +static const struct ide_port_info h8300_port_info = { + .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, +}; + static int __init h8300_ide_init(void) { hw_regs_t hw; @@ -183,6 +189,8 @@ static int __init h8300_ide_init(void) int index; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; + printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); + if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300")) goto out_busy; if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) { @@ -192,22 +200,17 @@ static int __init h8300_ide_init(void) hw_setup(&hw); - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); + hwif = ide_find_port_slot(&h8300_port_info); + if (hwif == NULL) return -ENOENT; - } index = hwif->index; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); hwif_setup(hwif); - hwif->host_flags = IDE_HFLAG_NO_IO_32BIT; - printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", index); idx[0] = index; - ide_device_add(idx, NULL); + ide_device_add(idx, &h8300_port_info); return 0; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index d99847157186..6e29dd532090 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -517,14 +517,9 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, int xferlen, ide_handler_t *handler) { - ide_startstop_t startstop; struct cdrom_info *info = drive->driver_data; ide_hwif_t *hwif = drive->hwif; - /* wait for the controller to be idle */ - if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY)) - return startstop; - /* FIXME: for Virtual DMA we must check harder */ if (info->dma) info->dma = !hwif->dma_ops->dma_setup(drive); @@ -604,28 +599,6 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, } /* - * Block read functions. - */ -static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len) -{ - while (len > 0) { - int dum = 0; - xf(drive, NULL, &dum, sizeof(dum)); - len -= sizeof(dum); - } -} - -static void ide_cd_drain_data(ide_drive_t *drive, int nsects) -{ - while (nsects > 0) { - static char dum[SECTOR_SIZE]; - - drive->hwif->input_data(drive, NULL, dum, sizeof(dum)); - nsects--; - } -} - -/* * Check the contents of the interrupt reason register from the cdrom * and attempt to recover if there are problems. Returns 0 if everything's * ok; nonzero if the request has been terminated. @@ -640,15 +613,12 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, if (ireason == (!rw << 1)) return 0; else if (ireason == (rw << 1)) { - ide_hwif_t *hwif = drive->hwif; - xfer_func_t *xf; /* whoops... */ printk(KERN_ERR "%s: %s: wrong transfer direction!\n", drive->name, __func__); - xf = rw ? hwif->output_data : hwif->input_data; - ide_cd_pad_transfer(drive, xf, len); + ide_pad_transfer(drive, rw, len); } else if (rw == 0 && ireason == 1) { /* * Some drives (ASUS) seem to tell us that status info is @@ -696,16 +666,9 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) static ide_startstop_t cdrom_newpc_intr(ide_drive_t *); -/* - * Routine to send a read/write packet command to the drive. This is usually - * called directly from cdrom_start_{read,write}(). However, for drq_interrupt - * devices, it is called from an interrupt when the drive is ready to accept - * the command. - */ -static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) +static ide_startstop_t ide_cd_prepare_rw_request(ide_drive_t *drive, + struct request *rq) { - struct request *rq = HWGROUP(drive)->rq; - if (rq_data_dir(rq) == READ) { unsigned short sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS; @@ -742,6 +705,19 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) /* set up the command */ rq->timeout = ATAPI_WAIT_PC; + return ide_started; +} + +/* + * Routine to send a read/write packet command to the drive. This is usually + * called directly from cdrom_start_{read,write}(). However, for drq_interrupt + * devices, it is called from an interrupt when the drive is ready to accept + * the command. + */ +static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) +{ + struct request *rq = drive->hwif->hwgroup->rq; + /* send the command to the drive and return */ return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); } @@ -768,9 +744,8 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive) return ide_stopped; } -static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive) +static void ide_cd_prepare_seek_request(ide_drive_t *drive, struct request *rq) { - struct request *rq = HWGROUP(drive)->rq; sector_t frame = rq->sector; sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS); @@ -780,17 +755,13 @@ static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive) put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]); rq->timeout = ATAPI_WAIT_PC; - return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr); } -static ide_startstop_t cdrom_start_seek(ide_drive_t *drive, unsigned int block) +static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive) { - struct cdrom_info *info = drive->driver_data; + struct request *rq = drive->hwif->hwgroup->rq; - info->dma = 0; - info->start_seek = jiffies; - return cdrom_start_packet_command(drive, 0, - cdrom_start_seek_continuation); + return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr); } /* @@ -1011,7 +982,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) - bio_cur_sectors(rq->bio), thislen >> 9); if (nskip > 0) { - ide_cd_drain_data(drive, nskip); + ide_pad_transfer(drive, write, nskip << 9); rq->current_nr_sectors -= nskip; thislen -= (nskip << 9); } @@ -1048,7 +1019,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) * If the buffers are full, pipe the rest into * oblivion. */ - ide_cd_drain_data(drive, thislen >> 9); + ide_pad_transfer(drive, 0, thislen); else { printk(KERN_ERR "%s: confused, missing data\n", drive->name); @@ -1096,7 +1067,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) /* pad, if necessary */ if (!blk_fs_request(rq) && len > 0) - ide_cd_pad_transfer(drive, xferfunc, len); + ide_pad_transfer(drive, write, len); if (blk_pc_request(rq)) { timeout = rq->timeout; @@ -1165,21 +1136,17 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) if (write) cd->devinfo.media_written = 1; - /* start sending the read/write request to the drive */ - return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont); + return ide_started; } static ide_startstop_t cdrom_do_newpc_cont(ide_drive_t *drive) { struct request *rq = HWGROUP(drive)->rq; - if (!rq->timeout) - rq->timeout = ATAPI_WAIT_PC; - return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); } -static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) +static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) { struct cdrom_info *info = drive->driver_data; @@ -1191,10 +1158,16 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) info->dma = 0; /* sg request */ - if (rq->bio) { - int mask = drive->queue->dma_alignment; - unsigned long addr = - (unsigned long)page_address(bio_page(rq->bio)); + if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) { + struct request_queue *q = drive->queue; + unsigned int alignment; + unsigned long addr; + unsigned long stack_mask = ~(THREAD_SIZE - 1); + + if (rq->bio) + addr = (unsigned long)bio_data(rq->bio); + else + addr = (unsigned long)rq->data; info->dma = drive->using_dma; @@ -1204,23 +1177,25 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) * NOTE! The "len" and "addr" checks should possibly have * separate masks. */ - if ((rq->data_len & 15) || (addr & mask)) + alignment = queue_dma_alignment(q) | q->dma_pad_mask; + if (addr & alignment || rq->data_len & alignment) info->dma = 0; - } - /* start sending the command to the drive */ - return cdrom_start_packet_command(drive, rq->data_len, - cdrom_do_newpc_cont); + if (!((addr & stack_mask) ^ + ((unsigned long)current->stack & stack_mask))) + info->dma = 0; + } } /* * cdrom driver request routine. */ -static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, +static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, sector_t block) { - ide_startstop_t action; struct cdrom_info *info = drive->driver_data; + ide_handler_t *fn; + int xferlen; if (blk_fs_request(rq)) { if (info->cd_flags & IDE_CD_FLAG_SEEKING) { @@ -1240,29 +1215,48 @@ static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, } if (rq_data_dir(rq) == READ && IDE_LARGE_SEEK(info->last_block, block, - IDECD_SEEK_THRESHOLD) && - drive->dsc_overlap) - action = cdrom_start_seek(drive, block); - else - action = cdrom_start_rw(drive, rq); + IDECD_SEEK_THRESHOLD) && + drive->dsc_overlap) { + xferlen = 0; + fn = cdrom_start_seek_continuation; + + info->dma = 0; + info->start_seek = jiffies; + + ide_cd_prepare_seek_request(drive, rq); + } else { + xferlen = 32768; + fn = cdrom_start_rw_cont; + + if (cdrom_start_rw(drive, rq) == ide_stopped) + return ide_stopped; + + if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped) + return ide_stopped; + } info->last_block = block; - return action; } else if (blk_sense_request(rq) || blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) { - return cdrom_do_block_pc(drive, rq); + xferlen = rq->data_len; + fn = cdrom_do_newpc_cont; + + if (!rq->timeout) + rq->timeout = ATAPI_WAIT_PC; + + cdrom_do_block_pc(drive, rq); } else if (blk_special_request(rq)) { /* right now this can only be a reset... */ cdrom_end_request(drive, 1); return ide_stopped; + } else { + blk_dump_rq_flags(rq, "ide-cd bad flags"); + cdrom_end_request(drive, 0); + return ide_stopped; } - blk_dump_rq_flags(rq, "ide-cd bad flags"); - cdrom_end_request(drive, 0); - return ide_stopped; + return cdrom_start_packet_command(drive, xferlen, fn); } - - /* * Ioctl handling. * @@ -1872,6 +1866,7 @@ static int ide_cdrom_setup(ide_drive_t *drive) blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn); blk_queue_dma_alignment(drive->queue, 31); + blk_queue_update_dma_pad(drive->queue, 15); drive->queue->unplug_delay = (1 * HZ) / 1000; if (!drive->queue->unplug_delay) drive->queue->unplug_delay = 1; @@ -1954,10 +1949,9 @@ static ide_driver_t ide_cdrom_driver = { .version = IDECD_VERSION, .media = ide_cdrom, .supports_dsc_overlap = 1, - .do_request = ide_do_rw_cdrom, + .do_request = ide_cd_do_request, .end_request = ide_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idecd_proc, #endif diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 5f49a4ae9dd8..3a2e80237c10 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -985,7 +985,6 @@ static ide_driver_t idedisk_driver = { .do_request = ide_do_rw_disk, .end_request = ide_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idedisk_proc, #endif diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index b3689437269f..011d72011cc4 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -351,10 +351,7 @@ static void ide_floppy_callback(ide_drive_t *drive) static void idefloppy_init_pc(struct ide_atapi_pc *pc) { - memset(pc->c, 0, 12); - pc->retries = 0; - pc->flags = 0; - pc->req_xfer = 0; + memset(pc, 0, sizeof(*pc)); pc->buf = pc->pc_buf; pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; pc->callback = ide_floppy_callback; @@ -561,12 +558,6 @@ static void idefloppy_create_start_stop_cmd(struct ide_atapi_pc *pc, int start) pc->c[4] = start; } -static void idefloppy_create_test_unit_ready_cmd(struct ide_atapi_pc *pc) -{ - idefloppy_init_pc(pc); - pc->c[0] = GPCMD_TEST_UNIT_READY; -} - static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy, struct ide_atapi_pc *pc, struct request *rq, unsigned long sector) @@ -711,10 +702,10 @@ static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive) set_disk_ro(floppy->disk, floppy->wp); page = &pc.buf[8]; - transfer_rate = be16_to_cpu(*(u16 *)&pc.buf[8 + 2]); - sector_size = be16_to_cpu(*(u16 *)&pc.buf[8 + 6]); - cyls = be16_to_cpu(*(u16 *)&pc.buf[8 + 8]); - rpm = be16_to_cpu(*(u16 *)&pc.buf[8 + 28]); + transfer_rate = be16_to_cpup((__be16 *)&pc.buf[8 + 2]); + sector_size = be16_to_cpup((__be16 *)&pc.buf[8 + 6]); + cyls = be16_to_cpup((__be16 *)&pc.buf[8 + 8]); + rpm = be16_to_cpup((__be16 *)&pc.buf[8 + 28]); heads = pc.buf[8 + 4]; sectors = pc.buf[8 + 5]; @@ -789,8 +780,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) for (i = 0; i < desc_cnt; i++) { unsigned int desc_start = 4 + i*8; - blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]); - length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]); + blocks = be32_to_cpup((__be32 *)&pc.buf[desc_start]); + length = be16_to_cpup((__be16 *)&pc.buf[desc_start + 6]); debug_log("Descriptor %d: %dkB, %d blocks, %d sector size\n", i, blocks * length / 1024, blocks, length); @@ -911,8 +902,8 @@ static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg) if (u_index >= u_array_size) break; /* User-supplied buffer too small */ - blocks = be32_to_cpu(*(u32 *)&pc.buf[desc_start]); - length = be16_to_cpu(*(u16 *)&pc.buf[desc_start + 6]); + blocks = be32_to_cpup((__be32 *)&pc.buf[desc_start]); + length = be16_to_cpup((__be16 *)&pc.buf[desc_start + 6]); if (put_user(blocks, argp)) return(-EFAULT); @@ -1138,7 +1129,6 @@ static ide_driver_t idefloppy_driver = { .do_request = idefloppy_do_request, .end_request = idefloppy_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idefloppy_proc, #endif @@ -1166,7 +1156,9 @@ static int idefloppy_open(struct inode *inode, struct file *filp) floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; /* Just in case */ - idefloppy_create_test_unit_ready_cmd(&pc); + idefloppy_init_pc(&pc); + pc.c[0] = GPCMD_TEST_UNIT_READY; + if (idefloppy_queue_pc_tail(drive, &pc)) { idefloppy_create_start_stop_cmd(&pc, 1); (void) idefloppy_queue_pc_tail(drive, &pc); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 28057747c1f8..661b75a89d4d 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -504,55 +504,6 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) EXPORT_SYMBOL_GPL(ide_error); -ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) -{ - if (drive->media != ide_disk) - rq->errors |= ERROR_RESET; - - ide_kill_rq(drive, rq); - - return ide_stopped; -} - -EXPORT_SYMBOL_GPL(__ide_abort); - -/** - * ide_abort - abort pending IDE operations - * @drive: drive the error occurred on - * @msg: message to report - * - * ide_abort kills and cleans up when we are about to do a - * host initiated reset on active commands. Longer term we - * want handlers to have sensible abort handling themselves - * - * This differs fundamentally from ide_error because in - * this case the command is doing just fine when we - * blow it away. - */ - -ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) -{ - struct request *rq; - - if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) - return ide_stopped; - - /* retry only "normal" I/O: */ - if (!blk_fs_request(rq)) { - rq->errors = 1; - ide_end_drive_cmd(drive, BUSY_STAT, 0); - return ide_stopped; - } - - if (rq->rq_disk) { - ide_driver_t *drv; - - drv = *(ide_driver_t **)rq->rq_disk->private_data; - return drv->abort(drive, rq); - } else - return __ide_abort(drive, rq); -} - static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { tf->nsect = drive->sect; @@ -766,6 +717,18 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, return ide_stopped; } +static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) +{ + switch (rq->cmd[0]) { + case REQ_DRIVE_RESET: + return ide_do_reset(drive); + default: + blk_dump_rq_flags(rq, "ide_special_rq - bad request"); + ide_end_request(drive, 0, 0); + return ide_stopped; + } +} + static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->data; @@ -869,7 +832,16 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) pm->pm_step == ide_pm_state_completed) ide_complete_pm_request(drive, rq); return startstop; - } + } else if (!rq->rq_disk && blk_special_request(rq)) + /* + * TODO: Once all ULDs have been modified to + * check for specific op codes rather than + * blindly accepting any special request, the + * check for ->rq_disk above may be replaced + * by a more suitable mechanism or even + * dropped entirely. + */ + return ide_special_rq(drive, rq); drv = *(ide_driver_t **)rq->rq_disk->private_data; return drv->do_request(drive, rq, block); diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 80ad4f234f3f..44aaec256a30 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -905,6 +905,14 @@ void ide_execute_pkt_cmd(ide_drive_t *drive) } EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); +static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) +{ + struct request *rq = drive->hwif->hwgroup->rq; + + if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) + ide_end_request(drive, err ? err : 1, 0); +} + /* needed below */ static ide_startstop_t do_reset1 (ide_drive_t *, int); @@ -940,7 +948,7 @@ static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) } /* done polling */ hwgroup->polling = 0; - hwgroup->resetting = 0; + ide_complete_drive_reset(drive, 0); return ide_stopped; } @@ -956,12 +964,14 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive) ide_hwif_t *hwif = HWIF(drive); const struct ide_port_ops *port_ops = hwif->port_ops; u8 tmp; + int err = 0; if (port_ops && port_ops->reset_poll) { - if (port_ops->reset_poll(drive)) { + err = port_ops->reset_poll(drive); + if (err) { printk(KERN_ERR "%s: host reset_poll failure for %s.\n", hwif->name, drive->name); - return ide_stopped; + goto out; } } @@ -975,6 +985,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive) } printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp); drive->failures++; + err = -EIO; } else { printk("%s: reset: ", hwif->name); tmp = ide_read_error(drive); @@ -1001,10 +1012,12 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive) if (tmp & 0x80) printk("; slave: failed"); printk("\n"); + err = -EIO; } } +out: hwgroup->polling = 0; /* done polling */ - hwgroup->resetting = 0; /* done reset attempt */ + ide_complete_drive_reset(drive, err); return ide_stopped; } @@ -1090,7 +1103,6 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) /* For an ATAPI device, first try an ATAPI SRST. */ if (drive->media != ide_disk && !do_not_try_atapi) { - hwgroup->resetting = 1; pre_reset(drive); SELECT_DRIVE(drive); udelay (20); @@ -1112,10 +1124,10 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) if (io_ports->ctl_addr == 0) { spin_unlock_irqrestore(&ide_lock, flags); + ide_complete_drive_reset(drive, -ENXIO); return ide_stopped; } - hwgroup->resetting = 1; /* * Note that we also set nIEN while resetting the device, * to mask unwanted interrupts from the interface during the reset. diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 47af80df6872..13af72f09ec4 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -1,26 +1,11 @@ -#include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/interrupt.h> -#include <linux/major.h> -#include <linux/errno.h> -#include <linux/genhd.h> -#include <linux/blkpg.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/delay.h> #include <linux/hdreg.h> #include <linux/ide.h> #include <linux/bitops.h> -#include <asm/byteorder.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/io.h> - static const char *udma_str[] = { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44", "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" }; @@ -90,142 +75,6 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed) return min(speed, mode); } -/* - * Standard (generic) timings for PIO modes, from ATA2 specification. - * These timings are for access to the IDE data port register *only*. - * Some drives may specify a mode, while also specifying a different - * value for cycle_time (from drive identification data). - */ -const ide_pio_timings_t ide_pio_timings[6] = { - { 70, 165, 600 }, /* PIO Mode 0 */ - { 50, 125, 383 }, /* PIO Mode 1 */ - { 30, 100, 240 }, /* PIO Mode 2 */ - { 30, 80, 180 }, /* PIO Mode 3 with IORDY */ - { 25, 70, 120 }, /* PIO Mode 4 with IORDY */ - { 20, 50, 100 } /* PIO Mode 5 with IORDY (nonstandard) */ -}; - -EXPORT_SYMBOL_GPL(ide_pio_timings); - -/* - * Shared data/functions for determining best PIO mode for an IDE drive. - * Most of this stuff originally lived in cmd640.c, and changes to the - * ide_pio_blacklist[] table should be made with EXTREME CAUTION to avoid - * breaking the fragile cmd640.c support. - */ - -/* - * Black list. Some drives incorrectly report their maximal PIO mode, - * at least in respect to CMD640. Here we keep info on some known drives. - */ -static struct ide_pio_info { - const char *name; - int pio; -} ide_pio_blacklist [] = { - { "Conner Peripherals 540MB - CFS540A", 3 }, - - { "WDC AC2700", 3 }, - { "WDC AC2540", 3 }, - { "WDC AC2420", 3 }, - { "WDC AC2340", 3 }, - { "WDC AC2250", 0 }, - { "WDC AC2200", 0 }, - { "WDC AC21200", 4 }, - { "WDC AC2120", 0 }, - { "WDC AC2850", 3 }, - { "WDC AC1270", 3 }, - { "WDC AC1170", 1 }, - { "WDC AC1210", 1 }, - { "WDC AC280", 0 }, - { "WDC AC31000", 3 }, - { "WDC AC31200", 3 }, - - { "Maxtor 7131 AT", 1 }, - { "Maxtor 7171 AT", 1 }, - { "Maxtor 7213 AT", 1 }, - { "Maxtor 7245 AT", 1 }, - { "Maxtor 7345 AT", 1 }, - { "Maxtor 7546 AT", 3 }, - { "Maxtor 7540 AV", 3 }, - - { "SAMSUNG SHD-3121A", 1 }, - { "SAMSUNG SHD-3122A", 1 }, - { "SAMSUNG SHD-3172A", 1 }, - - { "ST5660A", 3 }, - { "ST3660A", 3 }, - { "ST3630A", 3 }, - { "ST3655A", 3 }, - { "ST3391A", 3 }, - { "ST3390A", 1 }, - { "ST3600A", 1 }, - { "ST3290A", 0 }, - { "ST3144A", 0 }, - { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on */ - /* drive) according to Seagates FIND-ATA program */ - - { "QUANTUM ELS127A", 0 }, - { "QUANTUM ELS170A", 0 }, - { "QUANTUM LPS240A", 0 }, - { "QUANTUM LPS210A", 3 }, - { "QUANTUM LPS270A", 3 }, - { "QUANTUM LPS365A", 3 }, - { "QUANTUM LPS540A", 3 }, - { "QUANTUM LIGHTNING 540A", 3 }, - { "QUANTUM LIGHTNING 730A", 3 }, - - { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */ - { "QUANTUM FIREBALL_640", 3 }, - { "QUANTUM FIREBALL_1080", 3 }, - { "QUANTUM FIREBALL_1280", 3 }, - { NULL, 0 } -}; - -/** - * ide_scan_pio_blacklist - check for a blacklisted drive - * @model: Drive model string - * - * This routine searches the ide_pio_blacklist for an entry - * matching the start/whole of the supplied model name. - * - * Returns -1 if no match found. - * Otherwise returns the recommended PIO mode from ide_pio_blacklist[]. - */ - -static int ide_scan_pio_blacklist (char *model) -{ - struct ide_pio_info *p; - - for (p = ide_pio_blacklist; p->name != NULL; p++) { - if (strncmp(p->name, model, strlen(p->name)) == 0) - return p->pio; - } - return -1; -} - -unsigned int ide_pio_cycle_time(ide_drive_t *drive, u8 pio) -{ - struct hd_driveid *id = drive->id; - int cycle_time = 0; - - if (id->field_valid & 2) { - if (id->capability & 8) - cycle_time = id->eide_pio_iordy; - else - cycle_time = id->eide_pio; - } - - /* conservative "downgrade" for all pre-ATA2 drives */ - if (pio < 3) { - if (cycle_time && cycle_time < ide_pio_timings[pio].cycle_time) - cycle_time = 0; /* use standard timing */ - } - - return cycle_time ? cycle_time : ide_pio_timings[pio].cycle_time; -} - -EXPORT_SYMBOL_GPL(ide_pio_cycle_time); - /** * ide_get_best_pio_mode - get PIO mode from drive * @drive: drive to consider diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c new file mode 100644 index 000000000000..a8c2c8f8660a --- /dev/null +++ b/drivers/ide/ide-pio-blacklist.c @@ -0,0 +1,94 @@ +/* + * PIO blacklist. Some drives incorrectly report their maximal PIO mode, + * at least in respect to CMD640. Here we keep info on some known drives. + * + * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION + * to avoid breaking the fragile cmd640.c support. + */ + +#include <linux/string.h> + +static struct ide_pio_info { + const char *name; + int pio; +} ide_pio_blacklist [] = { + { "Conner Peripherals 540MB - CFS540A", 3 }, + + { "WDC AC2700", 3 }, + { "WDC AC2540", 3 }, + { "WDC AC2420", 3 }, + { "WDC AC2340", 3 }, + { "WDC AC2250", 0 }, + { "WDC AC2200", 0 }, + { "WDC AC21200", 4 }, + { "WDC AC2120", 0 }, + { "WDC AC2850", 3 }, + { "WDC AC1270", 3 }, + { "WDC AC1170", 1 }, + { "WDC AC1210", 1 }, + { "WDC AC280", 0 }, + { "WDC AC31000", 3 }, + { "WDC AC31200", 3 }, + + { "Maxtor 7131 AT", 1 }, + { "Maxtor 7171 AT", 1 }, + { "Maxtor 7213 AT", 1 }, + { "Maxtor 7245 AT", 1 }, + { "Maxtor 7345 AT", 1 }, + { "Maxtor 7546 AT", 3 }, + { "Maxtor 7540 AV", 3 }, + + { "SAMSUNG SHD-3121A", 1 }, + { "SAMSUNG SHD-3122A", 1 }, + { "SAMSUNG SHD-3172A", 1 }, + + { "ST5660A", 3 }, + { "ST3660A", 3 }, + { "ST3630A", 3 }, + { "ST3655A", 3 }, + { "ST3391A", 3 }, + { "ST3390A", 1 }, + { "ST3600A", 1 }, + { "ST3290A", 0 }, + { "ST3144A", 0 }, + { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive) + according to Seagate's FIND-ATA program */ + + { "QUANTUM ELS127A", 0 }, + { "QUANTUM ELS170A", 0 }, + { "QUANTUM LPS240A", 0 }, + { "QUANTUM LPS210A", 3 }, + { "QUANTUM LPS270A", 3 }, + { "QUANTUM LPS365A", 3 }, + { "QUANTUM LPS540A", 3 }, + { "QUANTUM LIGHTNING 540A", 3 }, + { "QUANTUM LIGHTNING 730A", 3 }, + + { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */ + { "QUANTUM FIREBALL_640", 3 }, + { "QUANTUM FIREBALL_1080", 3 }, + { "QUANTUM FIREBALL_1280", 3 }, + { NULL, 0 } +}; + +/** + * ide_scan_pio_blacklist - check for a blacklisted drive + * @model: Drive model string + * + * This routine searches the ide_pio_blacklist for an entry + * matching the start/whole of the supplied model name. + * + * Returns -1 if no match found. + * Otherwise returns the recommended PIO mode from ide_pio_blacklist[]. + */ + +int ide_scan_pio_blacklist(char *model) +{ + struct ide_pio_info *p; + + for (p = ide_pio_blacklist; p->name != NULL; p++) { + if (strncmp(p->name, model, strlen(p->name)) == 0) + return p->pio; + } + return -1; +} diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index adbd01784162..03f2ef5470a3 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c @@ -33,6 +33,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) ide_hwif_t *hwif; unsigned long base, ctl; + printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); + if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) return -1; @@ -62,10 +64,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) u8 index = hwif->index; u8 idx[4] = { index, 0xff, 0xff, 0xff }; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); - printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); pnp_set_drvdata(dev, hwif); ide_device_add(idx, NULL); diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index d21e51a02c3e..235ebdb29b28 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -39,6 +39,8 @@ #include <asm/uaccess.h> #include <asm/io.h> +static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ + /** * generic_id - add a generic drive id * @drive: drive to make an ID block for @@ -1318,10 +1320,10 @@ static void ide_port_init_devices(ide_hwif_t *hwif) drive->unmask = 1; if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) drive->no_unmask = 1; - } - if (port_ops && port_ops->port_init_devs) - port_ops->port_init_devs(hwif); + if (port_ops && port_ops->init_dev) + port_ops->init_dev(drive); + } } static void ide_init_port(ide_hwif_t *hwif, unsigned int port, @@ -1473,22 +1475,29 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d) for (; i < MAX_HWIFS; i++) { hwif = &ide_hwifs[i]; if (hwif->chipset == ide_unknown) - return hwif; + goto out_found; } } else { for (i = 2; i < MAX_HWIFS; i++) { hwif = &ide_hwifs[i]; if (hwif->chipset == ide_unknown) - return hwif; + goto out_found; } for (i = 0; i < 2 && i < MAX_HWIFS; i++) { hwif = &ide_hwifs[i]; if (hwif->chipset == ide_unknown) - return hwif; + goto out_found; } } + printk(KERN_ERR "%s: no free slot for interface\n", + d ? d->name : "ide"); + return NULL; + +out_found: + ide_init_port_data(hwif, i); + return hwif; } EXPORT_SYMBOL_GPL(ide_find_port_slot); diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index f9cf1670e4e1..b711ab96e287 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -2591,7 +2591,6 @@ static ide_driver_t idetape_driver = { .do_request = idetape_do_request, .end_request = idetape_end_request, .error = __ide_error, - .abort = __ide_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idetape_proc, #endif diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index cf55a48a7dd2..1fbdb746dc88 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -8,28 +8,18 @@ * The big the bad and the ugly. */ -#include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/sched.h> #include <linux/interrupt.h> -#include <linux/major.h> #include <linux/errno.h> -#include <linux/genhd.h> -#include <linux/blkpg.h> #include <linux/slab.h> -#include <linux/pci.h> #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/ide.h> -#include <linux/bitops.h> #include <linux/scatterlist.h> -#include <asm/byteorder.h> -#include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -62,25 +52,6 @@ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) return ide_raw_taskfile(drive, &args, buf, 1); } -static int inline task_dma_ok(ide_task_t *task) -{ - if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED)) - return 1; - - switch (task->tf.command) { - case WIN_WRITEDMA_ONCE: - case WIN_WRITEDMA: - case WIN_WRITEDMA_EXT: - case WIN_READDMA_ONCE: - case WIN_READDMA: - case WIN_READDMA_EXT: - case WIN_IDENTIFY_DMA: - return 1; - } - - return 0; -} - static ide_startstop_t task_no_data_intr(ide_drive_t *); static ide_startstop_t set_geometry_intr(ide_drive_t *); static ide_startstop_t recal_intr(ide_drive_t *); @@ -139,8 +110,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task) WAIT_WORSTCASE, NULL); return ide_started; default: - if (task_dma_ok(task) == 0 || drive->using_dma == 0 || - dma_ops->dma_setup(drive)) + if (drive->using_dma == 0 || dma_ops->dma_setup(drive)) return ide_stopped; dma_ops->dma_exec_cmd(drive, tf->command); dma_ops->dma_start(drive); @@ -183,7 +153,6 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive) if (stat & (ERR_STAT|DRQ_STAT)) return ide_error(drive, "set_geometry_intr", stat); - BUG_ON(HWGROUP(drive)->handler != NULL); ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL); return ide_started; } diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timings.c index 2e91c5870b4c..8c2f8327f487 100644 --- a/drivers/ide/ide-timing.h +++ b/drivers/ide/ide-timings.c @@ -1,11 +1,7 @@ -#ifndef _IDE_TIMING_H -#define _IDE_TIMING_H - /* * Copyright (c) 1999-2001 Vojtech Pavlik - */ - -/* + * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -27,27 +23,14 @@ #include <linux/kernel.h> #include <linux/hdreg.h> - -#define XFER_PIO_5 0x0d -#define XFER_UDMA_SLOW 0x4f - -struct ide_timing { - short mode; - short setup; /* t1 */ - short act8b; /* t2 for 8-bit io */ - short rec8b; /* t2i for 8-bit io */ - short cyc8b; /* t0 for 8-bit io */ - short active; /* t2 or tD */ - short recover; /* t2i or tK */ - short cycle; /* t0 */ - short udma; /* t2CYCTYP/2 */ -}; +#include <linux/ide.h> +#include <linux/module.h> /* * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). * These were taken from ATA/ATAPI-6 standard, rev 0a, except * for PIO 5, which is a nonstandard extension and UDMA6, which - * is currently supported only by Maxtor drives. + * is currently supported only by Maxtor drives. */ static struct ide_timing ide_timing[] = { @@ -61,12 +44,10 @@ static struct ide_timing ide_timing[] = { { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, - { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, - { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, - + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, @@ -81,29 +62,46 @@ static struct ide_timing ide_timing[] = { { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, - { -1 } + { 0xff } }; -#define IDE_TIMING_SETUP 0x01 -#define IDE_TIMING_ACT8B 0x02 -#define IDE_TIMING_REC8B 0x04 -#define IDE_TIMING_CYC8B 0x08 -#define IDE_TIMING_8BIT 0x0e -#define IDE_TIMING_ACTIVE 0x10 -#define IDE_TIMING_RECOVER 0x20 -#define IDE_TIMING_CYCLE 0x40 -#define IDE_TIMING_UDMA 0x80 -#define IDE_TIMING_ALL 0xff - -#define ENOUGH(v,unit) (((v)-1)/(unit)+1) -#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) - -#define XFER_MODE 0xf0 -#define XFER_MWDMA 0x20 -#define XFER_EPIO 0x01 -#define XFER_PIO 0x00 - -static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int T, int UT) +struct ide_timing *ide_timing_find_mode(u8 speed) +{ + struct ide_timing *t; + + for (t = ide_timing; t->mode != speed; t++) + if (t->mode == 0xff) + return NULL; + return t; +} +EXPORT_SYMBOL_GPL(ide_timing_find_mode); + +u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio) +{ + struct hd_driveid *id = drive->id; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); + u16 cycle = 0; + + if (id->field_valid & 2) { + if (id->capability & 8) + cycle = id->eide_pio_iordy; + else + cycle = id->eide_pio; + + /* conservative "downgrade" for all pre-ATA2 drives */ + if (pio < 3 && cycle < t->cycle) + cycle = 0; /* use standard timing */ + } + + return cycle ? cycle : t->cycle; +} +EXPORT_SYMBOL_GPL(ide_pio_cycle_time); + +#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1) +#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0) + +static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, + int T, int UT) { q->setup = EZ(t->setup * 1000, T); q->act8b = EZ(t->act8b * 1000, T); @@ -115,92 +113,83 @@ static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int q->udma = EZ(t->udma * 1000, UT); } -static void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what) -{ - if (what & IDE_TIMING_SETUP ) m->setup = max(a->setup, b->setup); - if (what & IDE_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); - if (what & IDE_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); - if (what & IDE_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); - if (what & IDE_TIMING_ACTIVE ) m->active = max(a->active, b->active); - if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover); - if (what & IDE_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); - if (what & IDE_TIMING_UDMA ) m->udma = max(a->udma, b->udma); -} - -static struct ide_timing* ide_timing_find_mode(short speed) +void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, + struct ide_timing *m, unsigned int what) { - struct ide_timing *t; - - for (t = ide_timing; t->mode != speed; t++) - if (t->mode < 0) - return NULL; - return t; + if (what & IDE_TIMING_SETUP) + m->setup = max(a->setup, b->setup); + if (what & IDE_TIMING_ACT8B) + m->act8b = max(a->act8b, b->act8b); + if (what & IDE_TIMING_REC8B) + m->rec8b = max(a->rec8b, b->rec8b); + if (what & IDE_TIMING_CYC8B) + m->cyc8b = max(a->cyc8b, b->cyc8b); + if (what & IDE_TIMING_ACTIVE) + m->active = max(a->active, b->active); + if (what & IDE_TIMING_RECOVER) + m->recover = max(a->recover, b->recover); + if (what & IDE_TIMING_CYCLE) + m->cycle = max(a->cycle, b->cycle); + if (what & IDE_TIMING_UDMA) + m->udma = max(a->udma, b->udma); } +EXPORT_SYMBOL_GPL(ide_timing_merge); -static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing *t, int T, int UT) +int ide_timing_compute(ide_drive_t *drive, u8 speed, + struct ide_timing *t, int T, int UT) { struct hd_driveid *id = drive->id; struct ide_timing *s, p; -/* - * Find the mode. - */ - - if (!(s = ide_timing_find_mode(speed))) + /* + * Find the mode. + */ + s = ide_timing_find_mode(speed); + if (s == NULL) return -EINVAL; -/* - * Copy the timing from the table. - */ - + /* + * Copy the timing from the table. + */ *t = *s; -/* - * If the drive is an EIDE drive, it can tell us it needs extended - * PIO/MWDMA cycle timing. - */ - + /* + * If the drive is an EIDE drive, it can tell us it needs extended + * PIO/MWDMA cycle timing. + */ if (id && id->field_valid & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); - switch (speed & XFER_MODE) { - - case XFER_PIO: - if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = id->eide_pio; - else p.cycle = p.cyc8b = id->eide_pio_iordy; - break; - - case XFER_MWDMA: - p.cycle = id->eide_dma_min; - break; - } + if (speed <= XFER_PIO_2) + p.cycle = p.cyc8b = id->eide_pio; + else if (speed <= XFER_PIO_5) + p.cycle = p.cyc8b = id->eide_pio_iordy; + else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) + p.cycle = id->eide_dma_min; ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); } -/* - * Convert the timing to bus clock counts. - */ - + /* + * Convert the timing to bus clock counts. + */ ide_timing_quantize(t, t, T, UT); -/* - * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T - * and some other commands. We have to ensure that the DMA cycle timing is - * slower/equal than the fastest PIO timing. - */ - - if ((speed & XFER_MODE) != XFER_PIO) { + /* + * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, + * S.M.A.R.T and some other commands. We have to ensure that the + * DMA cycle timing is slower/equal than the fastest PIO timing. + */ + if (speed >= XFER_SW_DMA_0) { u8 pio = ide_get_best_pio_mode(drive, 255, 5); ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT); ide_timing_merge(&p, t, t, IDE_TIMING_ALL); } -/* - * Lengthen active & recovery time so that cycle time is correct. - */ - + /* + * Lengthen active & recovery time so that cycle time is correct. + */ if (t->act8b + t->rec8b < t->cyc8b) { t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; t->rec8b = t->cyc8b - t->act8b; @@ -213,5 +202,4 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing return 0; } - -#endif +EXPORT_SYMBOL_GPL(ide_timing_compute); diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 2b8453510e09..d4a6b102a772 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -50,29 +50,16 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> -#include <linux/blkpg.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> -#include <linux/delay.h> #include <linux/ide.h> #include <linux/completion.h> -#include <linux/reboot.h> -#include <linux/cdrom.h> -#include <linux/seq_file.h> #include <linux/device.h> -#include <linux/bitops.h> - -#include <asm/byteorder.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/io.h> /* default maximum number of failures */ @@ -91,8 +78,6 @@ DEFINE_MUTEX(ide_cfg_mtx); __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); EXPORT_SYMBOL(ide_lock); -ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ - static void ide_port_init_devices_data(ide_hwif_t *); /* @@ -121,7 +106,6 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) ide_port_init_devices_data(hwif); } -EXPORT_SYMBOL_GPL(ide_init_port_data); static void ide_port_init_devices_data(ide_hwif_t *hwif) { @@ -150,18 +134,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) } } -static void __init init_ide_data (void) -{ - unsigned int index; - - /* Initialise all interface structures */ - for (index = 0; index < MAX_HWIFS; ++index) { - ide_hwif_t *hwif = &ide_hwifs[index]; - - ide_init_port_data(hwif, index); - } -} - void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) { ide_hwgroup_t *hwgroup = hwif->hwgroup; @@ -312,7 +284,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); hwif->irq = hw->irq; hwif->chipset = hw->chipset; - hwif->gendev.parent = hw->dev; + hwif->dev = hw->dev; + hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; hwif->ack_intr = hw->ack_intr; } EXPORT_SYMBOL_GPL(ide_init_port_hw); @@ -556,6 +529,22 @@ static int generic_ide_resume(struct device *dev) return err; } +static int generic_drive_reset(ide_drive_t *drive) +{ + struct request *rq; + int ret = 0; + + rq = blk_get_request(drive->queue, READ, __GFP_WAIT); + rq->cmd_type = REQ_TYPE_SPECIAL; + rq->cmd_len = 1; + rq->cmd[0] = REQ_DRIVE_RESET; + rq->cmd_flags |= REQ_SOFTBARRIER; + if (blk_execute_rq(drive->queue, NULL, rq, 1)) + ret = rq->errors; + blk_put_request(rq); + return ret; +} + int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, unsigned int cmd, unsigned long arg) { @@ -630,33 +619,8 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device if (!capable(CAP_SYS_ADMIN)) return -EACCES; - /* - * Abort the current command on the - * group if there is one, taking - * care not to allow anything else - * to be queued and to die on the - * spot if we miss one somehow - */ - - spin_lock_irqsave(&ide_lock, flags); - - if (HWGROUP(drive)->resetting) { - spin_unlock_irqrestore(&ide_lock, flags); - return -EBUSY; - } + return generic_drive_reset(drive); - ide_abort(drive, "drive reset"); - - BUG_ON(HWGROUP(drive)->handler); - - /* Ensure nothing gets queued after we - drop the lock. Reset will clear the busy */ - - HWGROUP(drive)->busy = 1; - spin_unlock_irqrestore(&ide_lock, flags); - (void) ide_do_reset(drive); - - return 0; case HDIO_GET_BUSSTATE: if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -1021,8 +985,6 @@ static int __init ide_init(void) goto out_port_class; } - init_ide_data(); - proc_ide_create(); return 0; diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c index 052125fafcfa..4ec19737f3c5 100644 --- a/drivers/ide/legacy/ali14xx.c +++ b/drivers/ide/legacy/ali14xx.c @@ -117,10 +117,11 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio) u8 param1, param2, param3, param4; unsigned long flags; int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); /* calculate timing, according to PIO mode */ time1 = ide_pio_cycle_time(drive, pio); - time2 = ide_pio_timings[pio].active_time; + time2 = t->active; param3 = param1 = (time2 * bus_speed + 999) / 1000; param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1; if (pio < 3) { diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c index 9a1d27ef3f8a..0497e7f85b09 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/legacy/buddha.c @@ -227,7 +227,6 @@ fail_base2: if (hwif) { u8 index = hwif->index; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); idx[i] = index; diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index af11028b4794..129a812bb57f 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c @@ -111,7 +111,6 @@ static int __init falconide_init(void) u8 index = hwif->index; u8 idx[4] = { index, 0xff, 0xff, 0xff }; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); /* Atari has a byte-swapped IDE interface */ diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c index b78941680c32..7e74b20202df 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/legacy/gayle.c @@ -185,7 +185,6 @@ found: if (hwif) { u8 index = hwif->index; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); idx[i] = index; diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c index dd6dfb32e853..7bc8fd59ea9e 100644 --- a/drivers/ide/legacy/ht6560b.c +++ b/drivers/ide/legacy/ht6560b.c @@ -216,6 +216,7 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio) if (pio) { unsigned int cycle_time; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); cycle_time = ide_pio_cycle_time(drive, pio); @@ -224,10 +225,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio) * actual cycle time for recovery and activity * according system bus speed. */ - active_time = ide_pio_timings[pio].active_time; - recovery_time = cycle_time - - active_time - - ide_pio_timings[pio].setup_time; + active_time = t->active; + recovery_time = cycle_time - active_time - t->setup; /* * Cycle times should be Vesa bus cycles */ @@ -311,16 +310,16 @@ static void ht6560b_set_pio_mode(ide_drive_t *drive, const u8 pio) #endif } -static void __init ht6560b_port_init_devs(ide_hwif_t *hwif) +static void __init ht6560b_init_dev(ide_drive_t *drive) { + ide_hwif_t *hwif = drive->hwif; /* Setting default configurations for drives. */ int t = (HT_CONFIG_DEFAULT << 8) | HT_TIMING_DEFAULT; if (hwif->channel) t |= (HT_SECONDARY_IF << 8); - hwif->drives[0].drive_data = t; - hwif->drives[1].drive_data = t; + drive->drive_data = t; } static int probe_ht6560b; @@ -329,7 +328,7 @@ module_param_named(probe, probe_ht6560b, bool, 0); MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); static const struct ide_port_ops ht6560b_port_ops = { - .port_init_devs = ht6560b_port_init_devs, + .init_dev = ht6560b_init_dev, .set_pio_mode = ht6560b_set_pio_mode, .selectproc = ht6560b_selectproc, }; diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c index ecae916a3385..89c8ff0a4d08 100644 --- a/drivers/ide/legacy/ide-4drives.c +++ b/drivers/ide/legacy/ide-4drives.c @@ -11,6 +11,21 @@ static int probe_4drives; module_param_named(probe, probe_4drives, bool, 0); MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); +static void ide_4drives_init_dev(ide_drive_t *drive) +{ + if (drive->hwif->channel) + drive->select.all ^= 0x20; +} + +static const struct ide_port_ops ide_4drives_port_ops = { + .init_dev = ide_4drives_init_dev, +}; + +static const struct ide_port_info ide_4drives_port_info = { + .port_ops = &ide_4drives_port_ops, + .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, +}; + static int __init ide_4drives_init(void) { ide_hwif_t *hwif, *mate; @@ -49,18 +64,10 @@ static int __init ide_4drives_init(void) mate = ide_find_port(); if (mate) { ide_init_port_hw(mate, &hw); - mate->drives[0].select.all ^= 0x20; - mate->drives[1].select.all ^= 0x20; idx[1] = mate->index; - - if (hwif) { - hwif->mate = mate; - mate->mate = hwif; - hwif->serialized = mate->serialized = 1; - } } - ide_device_add(idx, NULL); + ide_device_add(idx, &ide_4drives_port_info); return 0; } diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 8dbf4d9b6447..27b1e0b7ecb4 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c @@ -66,8 +66,6 @@ MODULE_LICENSE("Dual MPL/GPL"); #ifdef CONFIG_PCMCIA_DEBUG INT_MODULE_PARM(pc_debug, 0); #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) -/*static char *version = -"ide-cs.c 1.3 2002/10/26 05:45:31 (David Hinds)";*/ #else #define DEBUG(n, args...) #endif @@ -154,6 +152,11 @@ static const struct ide_port_ops idecs_port_ops = { .quirkproc = ide_undecoded_slave, }; +static const struct ide_port_info idecs_port_info = { + .port_ops = &idecs_port_ops, + .host_flags = IDE_HFLAG_NO_DMA, +}; + static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) { @@ -187,13 +190,11 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, i = hwif->index; - ide_init_port_data(hwif, i); ide_init_port_hw(hwif, &hw); - hwif->port_ops = &idecs_port_ops; idx[0] = i; - ide_device_add(idx, NULL); + ide_device_add(idx, &idecs_port_info); if (hwif->present) return hwif; diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c index d3bc3f24e05d..a249562b34b5 100644 --- a/drivers/ide/legacy/ide_platform.c +++ b/drivers/ide/legacy/ide_platform.c @@ -44,6 +44,10 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw, hw->chipset = ide_generic; } +static const struct ide_port_info platform_ide_port_info = { + .host_flags = IDE_HFLAG_NO_DMA, +}; + static int __devinit plat_ide_probe(struct platform_device *pdev) { struct resource *res_base, *res_alt, *res_irq; @@ -54,6 +58,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) int ret = 0; int mmio = 0; hw_regs_t hw; + struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; @@ -102,13 +107,13 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) ide_init_port_hw(hwif, &hw); if (mmio) { - hwif->host_flags = IDE_HFLAG_MMIO; + d.host_flags |= IDE_HFLAG_MMIO; default_hwif_mmiops(hwif); } idx[0] = hwif->index; - ide_device_add(idx, NULL); + ide_device_add(idx, &d); platform_set_drvdata(pdev, hwif); diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c index 2e84290d0bcc..0a6195bcfeda 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/legacy/macide.c @@ -130,7 +130,6 @@ static int __init macide_init(void) u8 index = hwif->index; u8 idx[4] = { index, 0xff, 0xff, 0xff }; - ide_init_port_data(hwif, index); ide_init_port_hw(hwif, &hw); ide_device_add(idx, NULL); diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c index 8ff6e2d20834..9c2b9d078f69 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/legacy/q40ide.c @@ -142,7 +142,6 @@ static int __init q40ide_init(void) hwif = ide_find_port(); if (hwif) { - ide_init_port_data(hwif, hwif->index); ide_init_port_hw(hwif, &hw); /* Q40 has a byte-swapped IDE interface */ diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index 51dba82f8812..2338f344ea24 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c @@ -207,6 +207,7 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio) static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) { ide_hwif_t *hwif = drive->hwif; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ @@ -236,7 +237,7 @@ static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) active_time = 110; recovery_time = cycle_time - 120; } else { - active_time = ide_pio_timings[pio].active_time; + active_time = t->active; recovery_time = cycle_time - active_time; } } @@ -281,17 +282,18 @@ static int __init qd_testreg(int port) return (readreg != QD_TESTVAL); } -static void __init qd6500_port_init_devs(ide_hwif_t *hwif) +static void __init qd6500_init_dev(ide_drive_t *drive) { + ide_hwif_t *hwif = drive->hwif; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); - hwif->drives[0].drive_data = QD6500_DEF_DATA; - hwif->drives[1].drive_data = QD6500_DEF_DATA; + drive->drive_data = QD6500_DEF_DATA; } -static void __init qd6580_port_init_devs(ide_hwif_t *hwif) +static void __init qd6580_init_dev(ide_drive_t *drive) { + ide_hwif_t *hwif = drive->hwif; u16 t1, t2; u8 base = (hwif->config_data & 0xff00) >> 8; u8 config = QD_CONFIG(hwif); @@ -302,18 +304,17 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif) } else t2 = t1 = hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA; - hwif->drives[0].drive_data = t1; - hwif->drives[1].drive_data = t2; + drive->drive_data = drive->select.b.unit ? t2 : t1; } static const struct ide_port_ops qd6500_port_ops = { - .port_init_devs = qd6500_port_init_devs, + .init_dev = qd6500_init_dev, .set_pio_mode = qd6500_set_pio_mode, .selectproc = qd65xx_select, }; static const struct ide_port_ops qd6580_port_ops = { - .port_init_devs = qd6580_port_init_devs, + .init_dev = qd6580_init_dev, .set_pio_mode = qd6580_set_pio_mode, .selectproc = qd65xx_select, }; diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index 1a6c27b32498..48d57cae63c6 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c @@ -213,10 +213,8 @@ static int auide_build_dmatable(ide_drive_t *drive) { int i, iswrite, count = 0; ide_hwif_t *hwif = HWIF(drive); - struct request *rq = HWGROUP(drive)->rq; - - _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; + _auide_hwif *ahwif = &auide_hwif; struct scatterlist *sg; iswrite = (rq_data_dir(rq) == WRITE); @@ -402,7 +400,7 @@ static const struct ide_dma_ops au1xxx_dma_ops = { static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) { - _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data; + _auide_hwif *auide = &auide_hwif; dbdev_tab_t source_dev_tab, target_dev_tab; u32 dev_id, tsize, devwidth, flags; @@ -463,7 +461,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) #else static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) { - _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data; + _auide_hwif *auide = &auide_hwif; dbdev_tab_t source_dev_tab; int flags; @@ -600,8 +598,6 @@ static int au_ide_probe(struct device *dev) ide_init_port_hw(hwif, &hw); - hwif->dev = dev; - /* If the user has selected DDMA assisted copies, then set up a few local I/O function entry points */ @@ -610,11 +606,8 @@ static int au_ide_probe(struct device *dev) hwif->input_data = au1xxx_input_data; hwif->output_data = au1xxx_output_data; #endif - hwif->select_data = 0; /* no chipset-specific code */ - hwif->config_data = 0; /* no chipset-specific code */ auide_hwif.hwif = hwif; - hwif->hwif_data = &auide_hwif; idx[0] = hwif->index; diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index 52fee3d2771a..9f1212cc4aed 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c @@ -61,6 +61,11 @@ static struct resource swarm_ide_resource = { static struct platform_device *swarm_ide_dev; +static const struct ide_port_info swarm_port_info = { + .name = DRV_NAME, + .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, +}; + /* * swarm_ide_probe - if the board header indicates the existence of * Generic Bus IDE, allocate a HWIF for it. @@ -77,12 +82,6 @@ static int __devinit swarm_ide_probe(struct device *dev) if (!SIBYTE_HAVE_IDE) return -ENODEV; - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR DRV_NAME ": no free slot for interface\n"); - return -ENOMEM; - } - base = ioremap(A_IO_EXT_BASE, 0x800); offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); @@ -109,10 +108,6 @@ static int __devinit swarm_ide_probe(struct device *dev) base = ioremap(offset, size); - /* Setup MMIO ops. */ - hwif->host_flags = IDE_HFLAG_MMIO; - default_hwif_mmiops(hwif); - for (i = 0; i <= 7; i++) hw.io_ports_array[i] = (unsigned long)(base + ((0x1f0 + i) << 5)); @@ -121,15 +116,26 @@ static int __devinit swarm_ide_probe(struct device *dev) hw.irq = K_INT_GB_IDE; hw.chipset = ide_generic; + hwif = ide_find_port_slot(&swarm_port_info); + if (hwif == NULL) + goto err; + ide_init_port_hw(hwif, &hw); + /* Setup MMIO ops. */ + default_hwif_mmiops(hwif); + idx[0] = hwif->index; - ide_device_add(idx, NULL); + ide_device_add(idx, &swarm_port_info); dev_set_drvdata(dev, hwif); return 0; +err: + release_resource(&swarm_ide_resource); + iounmap(base); + return -ENOMEM; } static struct device_driver swarm_ide_driver = { diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index f2de00adf147..80d19c0eb780 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c @@ -69,7 +69,8 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = to_pci_dev(hwif->dev); - int s_time, a_time, c_time; + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); + int s_time = t->setup, a_time = t->active, c_time = t->cycle; u8 s_clc, a_clc, r_clc; unsigned long flags; int bus_speed = ide_pci_clk ? ide_pci_clk : 33; @@ -78,13 +79,10 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) u8 cd_dma_fifo = 0; int unit = drive->select.b.unit & 1; - s_time = ide_pio_timings[pio].setup_time; - a_time = ide_pio_timings[pio].active_time; if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8) s_clc = 0; if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8) a_clc = 0; - c_time = ide_pio_timings[pio].cycle_time; if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) { r_clc = 1; diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c index ad222206a429..0bfcdd0e77b3 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/pci/amd74xx.c @@ -21,8 +21,6 @@ #include <linux/init.h> #include <linux/ide.h> -#include "ide-timing.h" - enum { AMD_IDE_CONFIG = 0x41, AMD_CABLE_DETECT = 0x42, diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index cd1ba14984ab..1ad1e23e3105 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c @@ -521,6 +521,7 @@ static void program_drive_counts(ide_drive_t *drive, unsigned int index) static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, u8 pio_mode, unsigned int cycle_time) { + struct ide_timing *t; int setup_time, active_time, recovery_time, clock_time; u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; int bus_speed; @@ -532,8 +533,11 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, if (pio_mode > 5) pio_mode = 5; - setup_time = ide_pio_timings[pio_mode].setup_time; - active_time = ide_pio_timings[pio_mode].active_time; + + t = ide_timing_find_mode(XFER_PIO_0 + pio_mode); + setup_time = t->setup; + active_time = t->active; + recovery_time = cycle_time - (setup_time + active_time); clock_time = 1000 / bus_speed; cycle_count = DIV_ROUND_UP(cycle_time, clock_time); @@ -607,11 +611,40 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio) display_clocks(index); } +#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ + +static void cmd640_init_dev(ide_drive_t *drive) +{ + unsigned int i = drive->hwif->channel * 2 + drive->select.b.unit; + +#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED + /* + * Reset timing to the slowest speed and turn off prefetch. + * This way, the drive identify code has a better chance. + */ + setup_counts[i] = 4; /* max possible */ + active_counts[i] = 16; /* max possible */ + recovery_counts[i] = 16; /* max possible */ + program_drive_counts(drive, i); + set_prefetch_mode(drive, i, 0); + printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch cleared\n", i); +#else + /* + * Set the drive unmask flags to match the prefetch setting. + */ + check_prefetch(drive, i); + printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch(%s) preserved\n", + i, drive->no_io_32bit ? "off" : "on"); +#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ +} + static const struct ide_port_ops cmd640_port_ops = { + .init_dev = cmd640_init_dev, +#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED .set_pio_mode = cmd640_set_pio_mode, +#endif }; -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ static int pci_conf1(void) { @@ -654,10 +687,8 @@ static const struct ide_port_info cmd640_port_info __initdata = { IDE_HFLAG_NO_DMA | IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_ABUSE_FAST_DEVSEL, -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED .port_ops = &cmd640_port_ops, .pio_mask = ATA_PIO5, -#endif }; static int cmd640x_init_one(unsigned long base, unsigned long ctl) @@ -683,12 +714,8 @@ static int cmd640x_init_one(unsigned long base, unsigned long ctl) */ static int __init cmd640x_init(void) { -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED - int second_port_toggled = 0; -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ int second_port_cmd640 = 0, rc; const char *bus_type, *port2; - unsigned int index; u8 b, cfr; u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; hw_regs_t hw[2]; @@ -774,88 +801,44 @@ static int __init cmd640x_init(void) put_cmd640_reg(CMDTIM, 0); put_cmd640_reg(BRST, 0x40); - cmd_hwif1 = ide_find_port(); + b = get_cmd640_reg(CNTRL); /* * Try to enable the secondary interface, if not already enabled */ - if (cmd_hwif1 && - cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) { - port2 = "not probed"; + if (secondary_port_responding()) { + if ((b & CNTRL_ENA_2ND)) { + second_port_cmd640 = 1; + port2 = "okay"; + } else if (cmd640_vlb) { + second_port_cmd640 = 1; + port2 = "alive"; + } else + port2 = "not cmd640"; } else { - b = get_cmd640_reg(CNTRL); + put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */ if (secondary_port_responding()) { - if ((b & CNTRL_ENA_2ND)) { - second_port_cmd640 = 1; - port2 = "okay"; - } else if (cmd640_vlb) { - second_port_cmd640 = 1; - port2 = "alive"; - } else - port2 = "not cmd640"; + second_port_cmd640 = 1; + port2 = "enabled"; } else { - put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */ - if (secondary_port_responding()) { - second_port_cmd640 = 1; -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED - second_port_toggled = 1; -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ - port2 = "enabled"; - } else { - put_cmd640_reg(CNTRL, b); /* restore original setting */ - port2 = "not responding"; - } + put_cmd640_reg(CNTRL, b); /* restore original setting */ + port2 = "not responding"; } } /* * Initialize data for secondary cmd640 port, if enabled */ - if (second_port_cmd640 && cmd_hwif1) { - ide_init_port_hw(cmd_hwif1, &hw[1]); - idx[1] = cmd_hwif1->index; + if (second_port_cmd640) { + cmd_hwif1 = ide_find_port(); + if (cmd_hwif1) { + ide_init_port_hw(cmd_hwif1, &hw[1]); + idx[1] = cmd_hwif1->index; + } } printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", second_port_cmd640 ? "" : "not ", port2); - /* - * Establish initial timings/prefetch for all drives. - * Do not unnecessarily disturb any prior BIOS setup of these. - */ - for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) { - ide_drive_t *drive; - - if (index > 1) { - if (cmd_hwif1 == NULL) - continue; - drive = &cmd_hwif1->drives[index & 1]; - } else { - if (cmd_hwif0 == NULL) - continue; - drive = &cmd_hwif0->drives[index & 1]; - } - -#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED - /* - * Reset timing to the slowest speed and turn off prefetch. - * This way, the drive identify code has a better chance. - */ - setup_counts [index] = 4; /* max possible */ - active_counts [index] = 16; /* max possible */ - recovery_counts [index] = 16; /* max possible */ - program_drive_counts(drive, index); - set_prefetch_mode(drive, index, 0); - printk("cmd640: drive%d timings/prefetch cleared\n", index); -#else - /* - * Set the drive unmask flags to match the prefetch setting - */ - check_prefetch(drive, index); - printk("cmd640: drive%d timings/prefetch(%s) preserved\n", - index, drive->no_io_32bit ? "off" : "on"); -#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ - } - #ifdef CMD640_DUMP_REGS cmd640_dump_regs(); #endif diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index ca4774aa27ee..cfa784bacf48 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c @@ -116,6 +116,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = to_pci_dev(hwif->dev); + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; u8 setup_count, arttim = 0; @@ -124,10 +125,9 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) cycle_time = ide_pio_cycle_time(drive, pio); - program_cycle_times(drive, cycle_time, - ide_pio_timings[pio].active_time); + program_cycle_times(drive, cycle_time, t->active); - setup_count = quantize_timing(ide_pio_timings[pio].setup_time, + setup_count = quantize_timing(t->setup, 1000 / (ide_pci_clk ? ide_pci_clk : 33)); /* diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c index 99fe91a191b8..dc97c48623f3 100644 --- a/drivers/ide/pci/cs5535.c +++ b/drivers/ide/pci/cs5535.c @@ -26,8 +26,6 @@ #include <linux/pci.h> #include <linux/ide.h> -#include "ide-timing.h" - #define MSR_ATAC_BASE 0x51300000 #define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0) #define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01) @@ -75,13 +73,11 @@ static unsigned int cs5535_udma_timings[5] = */ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed) { - u32 reg = 0, dummy; int unit = drive->select.b.unit; - /* Set the PIO timings */ - if ((speed & XFER_MODE) == XFER_PIO) { + if (speed < XFER_SW_DMA_0) { ide_drive_t *pair = ide_get_paired_drive(drive); u8 cmd, pioa; diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c index 8c534afcb6c8..e14ad5530fa4 100644 --- a/drivers/ide/pci/cy82c693.c +++ b/drivers/ide/pci/cy82c693.c @@ -133,6 +133,7 @@ static int calc_clk(int time, int bus_speed) */ static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) { + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); int clk1, clk2; int bus_speed = ide_pci_clk ? ide_pci_clk : 33; @@ -141,15 +142,13 @@ static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) */ /* let's calc the address setup time clocks */ - p_pclk->address_time = (u8)calc_clk(ide_pio_timings[pio].setup_time, bus_speed); + p_pclk->address_time = (u8)calc_clk(t->setup, bus_speed); /* let's calc the active and recovery time clocks */ - clk1 = calc_clk(ide_pio_timings[pio].active_time, bus_speed); + clk1 = calc_clk(t->active, bus_speed); /* calc recovery timing */ - clk2 = ide_pio_timings[pio].cycle_time - - ide_pio_timings[pio].active_time - - ide_pio_timings[pio].setup_time; + clk2 = t->cycle - t->active - t->setup; clk2 = calc_clk(clk2, bus_speed); diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index af0f30051d5a..0106e2a2df77 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c @@ -93,7 +93,6 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) i = hwif->index; - ide_init_port_data(hwif, i); ide_init_port_hw(hwif, &hw); idx[0] = i; diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 6ab04115286b..cbf647202994 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c @@ -512,8 +512,14 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive) } static struct ide_dma_ops it821x_pass_through_dma_ops = { + .dma_host_set = ide_dma_host_set, + .dma_setup = ide_dma_setup, + .dma_exec_cmd = ide_dma_exec_cmd, .dma_start = it821x_dma_start, .dma_end = it821x_dma_end, + .dma_test_irq = ide_dma_test_irq, + .dma_timeout = ide_dma_timeout, + .dma_lost_irq = ide_dma_lost_irq, }; /** diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index 1584ebb6a185..789c66dfbde5 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c @@ -558,12 +558,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; int i; - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR "%s: too many IDE interfaces, " - "no room in table\n", SCC_PATA_NAME); + hwif = ide_find_port_slot(d); + if (hwif == NULL) return -ENOMEM; - } memset(&hw, 0, sizeof(hw)); for (i = 0; i <= 8; i++) @@ -572,7 +569,6 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, hw.dev = &dev->dev; hw.chipset = ide_pci; ide_init_port_hw(hwif, &hw); - hwif->dev = &dev->dev; idx[0] = hwif->index; diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 24513e3dcd6b..c79ff5b41088 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c @@ -568,6 +568,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = { }; static const struct ide_port_info sgiioc4_port_info __devinitdata = { + .name = DRV_NAME, .chipset = ide_pci, .init_dma = ide_dma_sgiioc4, .port_ops = &sgiioc4_port_ops, @@ -587,13 +588,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) hw_regs_t hw; struct ide_port_info d = sgiioc4_port_info; - hwif = ide_find_port(); - if (hwif == NULL) { - printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", - DRV_NAME); - return -ENOMEM; - } - /* Get the CmdBlk and CtrlBlk Base Registers */ bar0 = pci_resource_start(dev, 0); virt_base = ioremap(bar0, pci_resource_len(dev, 0)); @@ -608,11 +602,11 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) cmd_phys_base = bar0 + IOC4_CMD_OFFSET; if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, - hwif->name)) { + DRV_NAME)) { printk(KERN_ERR "%s : %s -- ERROR, Addresses " "0x%p to 0x%p ALREADY in use\n", - __func__, hwif->name, (void *) cmd_phys_base, + __func__, DRV_NAME, (void *) cmd_phys_base, (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); return -ENOMEM; } @@ -623,9 +617,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) hw.irq = dev->irq; hw.chipset = ide_pci; hw.dev = &dev->dev; - ide_init_port_hw(hwif, &hw); - hwif->dev = &dev->dev; + hwif = ide_find_port_slot(&d); + if (hwif == NULL) + goto err; + + ide_init_port_hw(hwif, &hw); /* The IOC4 uses MMIO rather than Port IO. */ default_hwif_mmiops(hwif); @@ -641,6 +638,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) return -EIO; return 0; +err: + release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); + iounmap(virt_base); + return -ENOMEM; } static unsigned int __devinit diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index b75e9bb390a7..6e9d7655d89c 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c @@ -421,8 +421,7 @@ static int sil_sata_reset_poll(ide_drive_t *drive) if ((sata_stat & 0x03) != 0x03) { printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", hwif->name, sata_stat); - HWGROUP(drive)->polling = 0; - return ide_started; + return -ENXIO; } } diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index e127eb25ab63..2389945ca95d 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -52,8 +52,6 @@ #include <linux/init.h> #include <linux/ide.h> -#include "ide-timing.h" - /* registers layout and init values are chipset family dependant */ #define ATA_16 0x01 @@ -616,7 +614,6 @@ MODULE_LICENSE("GPL"); /* * TODO: * - CLEANUP - * - Use drivers/ide/ide-timing.h ! * - More checks in the config registers (force values instead of * relying on the BIOS setting them correctly). * - Further optimisations ? diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c index ce84fa045d39..6efbde297174 100644 --- a/drivers/ide/pci/sl82c105.c +++ b/drivers/ide/pci/sl82c105.c @@ -47,10 +47,11 @@ */ static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio) { + struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cmd_on, cmd_off; u8 iordy = 0; - cmd_on = (ide_pio_timings[pio].active_time + 29) / 30; + cmd_on = (t->active + 29) / 30; cmd_off = (ide_pio_cycle_time(drive, pio) - 30 * cmd_on + 29) / 30; if (cmd_on == 0) diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index 3ed9728abd24..e47384c70c40 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c @@ -35,8 +35,6 @@ #include <asm/processor.h> #endif -#include "ide-timing.h" - #define VIA_IDE_ENABLE 0x40 #define VIA_IDE_CONFIG 0x41 #define VIA_FIFO_CONFIG 0x43 diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile index 65af5848b28c..74e52adcdf4b 100644 --- a/drivers/ide/ppc/Makefile +++ b/drivers/ide/ppc/Makefile @@ -1,3 +1,2 @@ obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o -obj-$(CONFIG_BLK_DEV_MPC8xx_IDE) += mpc8xx.o diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c deleted file mode 100644 index 236f9c38e519..000000000000 --- a/drivers/ide/ppc/mpc8xx.c +++ /dev/null @@ -1,851 +0,0 @@ -/* - * Copyright (C) 2000, 2001 Wolfgang Denk, wd@denx.de - * Modified for direct IDE interface - * by Thomas Lange, thomas@corelatus.com - * Modified for direct IDE interface on 8xx without using the PCMCIA - * controller - * by Steven.Scholz@imc-berlin.de - * Moved out of arch/ppc/kernel/m8xx_setup.c, other minor cleanups - * by Mathew Locke <mattl@mvista.com> - */ - -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/slab.h> -#include <linux/user.h> -#include <linux/tty.h> -#include <linux/major.h> -#include <linux/interrupt.h> -#include <linux/reboot.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/ide.h> -#include <linux/bootmem.h> - -#include <asm/mpc8xx.h> -#include <asm/mmu.h> -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/pgtable.h> -#include <asm/ide.h> -#include <asm/8xx_immap.h> -#include <asm/machdep.h> -#include <asm/irq.h> - -#define DRV_NAME "ide-mpc8xx" - -static int identify (volatile u8 *p); -static void print_fixed (volatile u8 *p); -static void print_funcid (int func); -static int check_ide_device (unsigned long base); - -static void ide_interrupt_ack (void *dev); -static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio); - -typedef struct ide_ioport_desc { - unsigned long base_off; /* Offset to PCMCIA memory */ - unsigned long reg_off[IDE_NR_PORTS]; /* controller register offsets */ - int irq; /* IRQ */ -} ide_ioport_desc_t; - -ide_ioport_desc_t ioport_dsc[MAX_HWIFS] = { -#ifdef IDE0_BASE_OFFSET - { IDE0_BASE_OFFSET, - { - IDE0_DATA_REG_OFFSET, - IDE0_ERROR_REG_OFFSET, - IDE0_NSECTOR_REG_OFFSET, - IDE0_SECTOR_REG_OFFSET, - IDE0_LCYL_REG_OFFSET, - IDE0_HCYL_REG_OFFSET, - IDE0_SELECT_REG_OFFSET, - IDE0_STATUS_REG_OFFSET, - IDE0_CONTROL_REG_OFFSET, - IDE0_IRQ_REG_OFFSET, - }, - IDE0_INTERRUPT, - }, -#ifdef IDE1_BASE_OFFSET - { IDE1_BASE_OFFSET, - { - IDE1_DATA_REG_OFFSET, - IDE1_ERROR_REG_OFFSET, - IDE1_NSECTOR_REG_OFFSET, - IDE1_SECTOR_REG_OFFSET, - IDE1_LCYL_REG_OFFSET, - IDE1_HCYL_REG_OFFSET, - IDE1_SELECT_REG_OFFSET, - IDE1_STATUS_REG_OFFSET, - IDE1_CONTROL_REG_OFFSET, - IDE1_IRQ_REG_OFFSET, - }, - IDE1_INTERRUPT, - }, -#endif /* IDE1_BASE_OFFSET */ -#endif /* IDE0_BASE_OFFSET */ -}; - -ide_pio_timings_t ide_pio_clocks[6]; -int hold_time[6] = {30, 20, 15, 10, 10, 10 }; /* PIO Mode 5 with IORDY (nonstandard) */ - -/* - * Warning: only 1 (ONE) PCMCIA slot supported here, - * which must be correctly initialized by the firmware (PPCBoot). - */ -static int _slot_ = -1; /* will be read from PCMCIA registers */ - -/* Make clock cycles and always round up */ -#define PCMCIA_MK_CLKS( t, T ) (( (t) * ((T)/1000000) + 999U ) / 1000U ) - -#define M8XX_PCMCIA_CD2(slot) (0x10000000 >> (slot << 4)) -#define M8XX_PCMCIA_CD1(slot) (0x08000000 >> (slot << 4)) - -/* - * The TQM850L hardware has two pins swapped! Grrrrgh! - */ -#ifdef CONFIG_TQM850L -#define __MY_PCMCIA_GCRX_CXRESET PCMCIA_GCRX_CXOE -#define __MY_PCMCIA_GCRX_CXOE PCMCIA_GCRX_CXRESET -#else -#define __MY_PCMCIA_GCRX_CXRESET PCMCIA_GCRX_CXRESET -#define __MY_PCMCIA_GCRX_CXOE PCMCIA_GCRX_CXOE -#endif - -#if defined(CONFIG_BLK_DEV_MPC8xx_IDE) && defined(CONFIG_IDE_8xx_PCCARD) -#define PCMCIA_SCHLVL IDE0_INTERRUPT /* Status Change Interrupt Level */ -static int pcmcia_schlvl = PCMCIA_SCHLVL; -#endif - -/* - * See include/linux/ide.h for definition of hw_regs_t (p, base) - */ - -/* - * m8xx_ide_init_ports() for a direct IDE interface _using_ - * MPC8xx's internal PCMCIA interface - */ -#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) -static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) -{ - unsigned long *p = hw->io_ports_array; - int i; - - typedef struct { - ulong br; - ulong or; - } pcmcia_win_t; - volatile pcmcia_win_t *win; - volatile pcmconf8xx_t *pcmp; - - uint *pgcrx; - u32 pcmcia_phy_base; - u32 pcmcia_phy_end; - static unsigned long pcmcia_base = 0; - unsigned long base; - - *p = 0; - - pcmp = (pcmconf8xx_t *)(&(((immap_t *)IMAP_ADDR)->im_pcmcia)); - - if (!pcmcia_base) { - /* - * Read out PCMCIA registers. Since the reset values - * are undefined, we sure hope that they have been - * set up by firmware - */ - - /* Scan all registers for valid settings */ - pcmcia_phy_base = 0xFFFFFFFF; - pcmcia_phy_end = 0; - /* br0 is start of brX and orX regs */ - win = (pcmcia_win_t *) \ - (&(((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0)); - for (i = 0; i < 8; i++) { - if (win->or & 1) { /* This bank is marked as valid */ - if (win->br < pcmcia_phy_base) { - pcmcia_phy_base = win->br; - } - if ((win->br + PCMCIA_MEM_SIZE) > pcmcia_phy_end) { - pcmcia_phy_end = win->br + PCMCIA_MEM_SIZE; - } - /* Check which slot that has been defined */ - _slot_ = (win->or >> 2) & 1; - - } /* Valid bank */ - win++; - } /* for */ - - printk ("PCMCIA slot %c: phys mem %08x...%08x (size %08x)\n", - 'A' + _slot_, - pcmcia_phy_base, pcmcia_phy_end, - pcmcia_phy_end - pcmcia_phy_base); - - if (!request_mem_region(pcmcia_phy_base, - pcmcia_phy_end - pcmcia_phy_base, - DRV_NAME)) { - printk(KERN_ERR "%s: resources busy\n", DRV_NAME); - return -EBUSY; - } - - pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base, - pcmcia_phy_end-pcmcia_phy_base); - -#ifdef DEBUG - printk ("PCMCIA virt base: %08lx\n", pcmcia_base); -#endif - /* Compute clock cycles for PIO timings */ - for (i=0; i<6; ++i) { - bd_t *binfo = (bd_t *)__res; - - hold_time[i] = - PCMCIA_MK_CLKS (hold_time[i], - binfo->bi_busfreq); - ide_pio_clocks[i].setup_time = - PCMCIA_MK_CLKS (ide_pio_timings[i].setup_time, - binfo->bi_busfreq); - ide_pio_clocks[i].active_time = - PCMCIA_MK_CLKS (ide_pio_timings[i].active_time, - binfo->bi_busfreq); - ide_pio_clocks[i].cycle_time = - PCMCIA_MK_CLKS (ide_pio_timings[i].cycle_time, - binfo->bi_busfreq); -#if 0 - printk ("PIO mode %d timings: %d/%d/%d => %d/%d/%d\n", - i, - ide_pio_clocks[i].setup_time, - ide_pio_clocks[i].active_time, - ide_pio_clocks[i].hold_time, - ide_pio_clocks[i].cycle_time, - ide_pio_timings[i].setup_time, - ide_pio_timings[i].active_time, - ide_pio_timings[i].hold_time, - ide_pio_timings[i].cycle_time); -#endif - } - } - - if (_slot_ == -1) { - printk ("PCMCIA slot has not been defined! Using A as default\n"); - _slot_ = 0; - } - -#ifdef CONFIG_IDE_8xx_PCCARD - -#ifdef DEBUG - printk ("PIPR = 0x%08X slot %c ==> mask = 0x%X\n", - pcmp->pcmc_pipr, - 'A' + _slot_, - M8XX_PCMCIA_CD1(_slot_) | M8XX_PCMCIA_CD2(_slot_) ); -#endif /* DEBUG */ - - if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) { - printk ("No card in slot %c: PIPR=%08x\n", - 'A' + _slot_, (u32) pcmp->pcmc_pipr); - return -ENODEV; /* No card in slot */ - } - - check_ide_device (pcmcia_base); - -#endif /* CONFIG_IDE_8xx_PCCARD */ - - base = pcmcia_base + ioport_dsc[data_port].base_off; -#ifdef DEBUG - printk ("base: %08x + %08x = %08x\n", - pcmcia_base, ioport_dsc[data_port].base_off, base); -#endif - - for (i = 0; i < IDE_NR_PORTS; ++i) { -#ifdef DEBUG - printk ("port[%d]: %08x + %08x = %08x\n", - i, - base, - ioport_dsc[data_port].reg_off[i], - i, base + ioport_dsc[data_port].reg_off[i]); -#endif - *p++ = base + ioport_dsc[data_port].reg_off[i]; - } - - hw->irq = ioport_dsc[data_port].irq; - hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack; - -#ifdef CONFIG_IDE_8xx_PCCARD - { - unsigned int reg; - - if (_slot_) - pgcrx = &((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pgcrb; - else - pgcrx = &((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pgcra; - - reg = *pgcrx; - reg |= mk_int_int_mask (pcmcia_schlvl) << 24; - reg |= mk_int_int_mask (pcmcia_schlvl) << 16; - *pgcrx = reg; - } -#endif /* CONFIG_IDE_8xx_PCCARD */ - - /* Enable Harddisk Interrupt, - * and make it edge sensitive - */ - /* (11-18) Set edge detect for irq, no wakeup from low power mode */ - ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel |= - (0x80000000 >> ioport_dsc[data_port].irq); - -#ifdef CONFIG_IDE_8xx_PCCARD - /* Make sure we don't get garbage irq */ - ((immap_t *) IMAP_ADDR)->im_pcmcia.pcmc_pscr = 0xFFFF; - - /* Enable falling edge irq */ - pcmp->pcmc_per = 0x100000 >> (16 * _slot_); -#endif /* CONFIG_IDE_8xx_PCCARD */ - - hw->chipset = ide_generic; - - return 0; -} -#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */ - -/* - * m8xx_ide_init_ports() for a direct IDE interface _not_ using - * MPC8xx's internal PCMCIA interface - */ -#if defined(CONFIG_IDE_EXT_DIRECT) -static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) -{ - unsigned long *p = hw->io_ports_array; - int i; - - u32 ide_phy_base; - u32 ide_phy_end; - static unsigned long ide_base = 0; - unsigned long base; - - *p = 0; - - if (!ide_base) { - - /* TODO: - * - add code to read ORx, BRx - */ - ide_phy_base = CFG_ATA_BASE_ADDR; - ide_phy_end = CFG_ATA_BASE_ADDR + 0x200; - - printk ("IDE phys mem : %08x...%08x (size %08x)\n", - ide_phy_base, ide_phy_end, - ide_phy_end - ide_phy_base); - - if (!request_mem_region(ide_phy_base, 0x200, DRV_NAME)) { - printk(KERN_ERR "%s: resources busy\n", DRV_NAME); - return -EBUSY; - } - - ide_base=(unsigned long)ioremap(ide_phy_base, - ide_phy_end-ide_phy_base); - -#ifdef DEBUG - printk ("IDE virt base: %08lx\n", ide_base); -#endif - } - - base = ide_base + ioport_dsc[data_port].base_off; -#ifdef DEBUG - printk ("base: %08x + %08x = %08x\n", - ide_base, ioport_dsc[data_port].base_off, base); -#endif - - for (i = 0; i < IDE_NR_PORTS; ++i) { -#ifdef DEBUG - printk ("port[%d]: %08x + %08x = %08x\n", - i, - base, - ioport_dsc[data_port].reg_off[i], - i, base + ioport_dsc[data_port].reg_off[i]); -#endif - *p++ = base + ioport_dsc[data_port].reg_off[i]; - } - - /* direct connected IDE drive, i.e. external IRQ */ - hw->irq = ioport_dsc[data_port].irq; - hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack; - - /* Enable Harddisk Interrupt, - * and make it edge sensitive - */ - /* (11-18) Set edge detect for irq, no wakeup from low power mode */ - ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |= - (0x80000000 >> ioport_dsc[data_port].irq); - - hw->chipset = ide_generic; - - return 0; -} -#endif /* CONFIG_IDE_8xx_DIRECT */ - - -/* -------------------------------------------------------------------- */ - - -/* PCMCIA Timing */ -#ifndef PCMCIA_SHT -#define PCMCIA_SHT(t) ((t & 0x0F)<<16) /* Strobe Hold Time */ -#define PCMCIA_SST(t) ((t & 0x0F)<<12) /* Strobe Setup Time */ -#define PCMCIA_SL(t) ((t==32) ? 0 : ((t & 0x1F)<<7)) /* Strobe Length */ -#endif - -/* Calculate PIO timings */ -static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) -{ -#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) - volatile pcmconf8xx_t *pcmp; - ulong timing, mask, reg; - - pcmp = (pcmconf8xx_t *)(&(((immap_t *)IMAP_ADDR)->im_pcmcia)); - - mask = ~(PCMCIA_SHT(0xFF) | PCMCIA_SST(0xFF) | PCMCIA_SL(0xFF)); - - timing = PCMCIA_SHT(hold_time[pio] ) - | PCMCIA_SST(ide_pio_clocks[pio].setup_time ) - | PCMCIA_SL (ide_pio_clocks[pio].active_time) - ; - -#if 1 - printk ("Setting timing bits 0x%08lx in PCMCIA controller\n", timing); -#endif - if ((reg = pcmp->pcmc_por0 & mask) != 0) - pcmp->pcmc_por0 = reg | timing; - - if ((reg = pcmp->pcmc_por1 & mask) != 0) - pcmp->pcmc_por1 = reg | timing; - - if ((reg = pcmp->pcmc_por2 & mask) != 0) - pcmp->pcmc_por2 = reg | timing; - - if ((reg = pcmp->pcmc_por3 & mask) != 0) - pcmp->pcmc_por3 = reg | timing; - - if ((reg = pcmp->pcmc_por4 & mask) != 0) - pcmp->pcmc_por4 = reg | timing; - - if ((reg = pcmp->pcmc_por5 & mask) != 0) - pcmp->pcmc_por5 = reg | timing; - - if ((reg = pcmp->pcmc_por6 & mask) != 0) - pcmp->pcmc_por6 = reg | timing; - - if ((reg = pcmp->pcmc_por7 & mask) != 0) - pcmp->pcmc_por7 = reg | timing; - -#elif defined(CONFIG_IDE_EXT_DIRECT) - - printk("%s[%d] %s: not implemented yet!\n", - __FILE__, __LINE__, __func__); -#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */ -} - -static const struct ide_port_ops m8xx_port_ops = { - .set_pio_mode = m8xx_ide_set_pio_mode, -}; - -static void -ide_interrupt_ack (void *dev) -{ -#ifdef CONFIG_IDE_8xx_PCCARD - u_int pscr, pipr; - -#if (PCMCIA_SOCKETS_NO == 2) - u_int _slot_; -#endif - - /* get interrupt sources */ - - pscr = ((volatile immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr; - pipr = ((volatile immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr; - - /* - * report only if both card detect signals are the same - * not too nice done, - * we depend on that CD2 is the bit to the left of CD1... - */ - - if(_slot_==-1){ - printk("PCMCIA slot has not been defined! Using A as default\n"); - _slot_=0; - } - - if(((pipr & M8XX_PCMCIA_CD2(_slot_)) >> 1) ^ - (pipr & M8XX_PCMCIA_CD1(_slot_)) ) { - printk ("card detect interrupt\n"); - } - /* clear the interrupt sources */ - ((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr = pscr; - -#else /* ! CONFIG_IDE_8xx_PCCARD */ - /* - * Only CONFIG_IDE_8xx_PCCARD is using the interrupt of the - * MPC8xx's PCMCIA controller, so there is nothing to be done here - * for CONFIG_IDE_8xx_DIRECT and CONFIG_IDE_EXT_DIRECT. - * The interrupt is handled somewhere else. -- Steven - */ -#endif /* CONFIG_IDE_8xx_PCCARD */ -} - - - -/* - * CIS Tupel codes - */ -#define CISTPL_NULL 0x00 -#define CISTPL_DEVICE 0x01 -#define CISTPL_LONGLINK_CB 0x02 -#define CISTPL_INDIRECT 0x03 -#define CISTPL_CONFIG_CB 0x04 -#define CISTPL_CFTABLE_ENTRY_CB 0x05 -#define CISTPL_LONGLINK_MFC 0x06 -#define CISTPL_BAR 0x07 -#define CISTPL_PWR_MGMNT 0x08 -#define CISTPL_EXTDEVICE 0x09 -#define CISTPL_CHECKSUM 0x10 -#define CISTPL_LONGLINK_A 0x11 -#define CISTPL_LONGLINK_C 0x12 -#define CISTPL_LINKTARGET 0x13 -#define CISTPL_NO_LINK 0x14 -#define CISTPL_VERS_1 0x15 -#define CISTPL_ALTSTR 0x16 -#define CISTPL_DEVICE_A 0x17 -#define CISTPL_JEDEC_C 0x18 -#define CISTPL_JEDEC_A 0x19 -#define CISTPL_CONFIG 0x1a -#define CISTPL_CFTABLE_ENTRY 0x1b -#define CISTPL_DEVICE_OC 0x1c -#define CISTPL_DEVICE_OA 0x1d -#define CISTPL_DEVICE_GEO 0x1e -#define CISTPL_DEVICE_GEO_A 0x1f -#define CISTPL_MANFID 0x20 -#define CISTPL_FUNCID 0x21 -#define CISTPL_FUNCE 0x22 -#define CISTPL_SWIL 0x23 -#define CISTPL_END 0xff - -/* - * CIS Function ID codes - */ -#define CISTPL_FUNCID_MULTI 0x00 -#define CISTPL_FUNCID_MEMORY 0x01 -#define CISTPL_FUNCID_SERIAL 0x02 -#define CISTPL_FUNCID_PARALLEL 0x03 -#define CISTPL_FUNCID_FIXED 0x04 -#define CISTPL_FUNCID_VIDEO 0x05 -#define CISTPL_FUNCID_NETWORK 0x06 -#define CISTPL_FUNCID_AIMS 0x07 -#define CISTPL_FUNCID_SCSI 0x08 - -/* - * Fixed Disk FUNCE codes - */ -#define CISTPL_IDE_INTERFACE 0x01 - -#define CISTPL_FUNCE_IDE_IFACE 0x01 -#define CISTPL_FUNCE_IDE_MASTER 0x02 -#define CISTPL_FUNCE_IDE_SLAVE 0x03 - -/* First feature byte */ -#define CISTPL_IDE_SILICON 0x04 -#define CISTPL_IDE_UNIQUE 0x08 -#define CISTPL_IDE_DUAL 0x10 - -/* Second feature byte */ -#define CISTPL_IDE_HAS_SLEEP 0x01 -#define CISTPL_IDE_HAS_STANDBY 0x02 -#define CISTPL_IDE_HAS_IDLE 0x04 -#define CISTPL_IDE_LOW_POWER 0x08 -#define CISTPL_IDE_REG_INHIBIT 0x10 -#define CISTPL_IDE_HAS_INDEX 0x20 -#define CISTPL_IDE_IOIS16 0x40 - - -/* -------------------------------------------------------------------- */ - - -#define MAX_TUPEL_SZ 512 -#define MAX_FEATURES 4 - -static int check_ide_device (unsigned long base) -{ - volatile u8 *ident = NULL; - volatile u8 *feature_p[MAX_FEATURES]; - volatile u8 *p, *start; - int n_features = 0; - u8 func_id = ~0; - u8 code, len; - unsigned short config_base = 0; - int found = 0; - int i; - -#ifdef DEBUG - printk ("PCMCIA MEM: %08lX\n", base); -#endif - start = p = (volatile u8 *) base; - - while ((p - start) < MAX_TUPEL_SZ) { - - code = *p; p += 2; - - if (code == 0xFF) { /* End of chain */ - break; - } - - len = *p; p += 2; -#ifdef DEBUG_PCMCIA - { volatile u8 *q = p; - printk ("\nTuple code %02x length %d\n\tData:", - code, len); - - for (i = 0; i < len; ++i) { - printk (" %02x", *q); - q+= 2; - } - } -#endif /* DEBUG_PCMCIA */ - switch (code) { - case CISTPL_VERS_1: - ident = p + 4; - break; - case CISTPL_FUNCID: - func_id = *p; - break; - case CISTPL_FUNCE: - if (n_features < MAX_FEATURES) - feature_p[n_features++] = p; - break; - case CISTPL_CONFIG: - config_base = (*(p+6) << 8) + (*(p+4)); - default: - break; - } - p += 2 * len; - } - - found = identify (ident); - - if (func_id != ((u8)~0)) { - print_funcid (func_id); - - if (func_id == CISTPL_FUNCID_FIXED) - found = 1; - else - return (1); /* no disk drive */ - } - - for (i=0; i<n_features; ++i) { - print_fixed (feature_p[i]); - } - - if (!found) { - printk ("unknown card type\n"); - return (1); - } - - /* set level mode irq and I/O mapped device in config reg*/ - *((u8 *)(base + config_base)) = 0x41; - - return (0); -} - -/* ------------------------------------------------------------------------- */ - -static void print_funcid (int func) -{ - switch (func) { - case CISTPL_FUNCID_MULTI: - printk (" Multi-Function"); - break; - case CISTPL_FUNCID_MEMORY: - printk (" Memory"); - break; - case CISTPL_FUNCID_SERIAL: - printk (" Serial Port"); - break; - case CISTPL_FUNCID_PARALLEL: - printk (" Parallel Port"); - break; - case CISTPL_FUNCID_FIXED: - printk (" Fixed Disk"); - break; - case CISTPL_FUNCID_VIDEO: - printk (" Video Adapter"); - break; - case CISTPL_FUNCID_NETWORK: - printk (" Network Adapter"); - break; - case CISTPL_FUNCID_AIMS: - printk (" AIMS Card"); - break; - case CISTPL_FUNCID_SCSI: - printk (" SCSI Adapter"); - break; - default: - printk (" Unknown"); - break; - } - printk (" Card\n"); -} - -/* ------------------------------------------------------------------------- */ - -static void print_fixed (volatile u8 *p) -{ - if (p == NULL) - return; - - switch (*p) { - case CISTPL_FUNCE_IDE_IFACE: - { u8 iface = *(p+2); - - printk ((iface == CISTPL_IDE_INTERFACE) ? " IDE" : " unknown"); - printk (" interface "); - break; - } - case CISTPL_FUNCE_IDE_MASTER: - case CISTPL_FUNCE_IDE_SLAVE: - { u8 f1 = *(p+2); - u8 f2 = *(p+4); - - printk ((f1 & CISTPL_IDE_SILICON) ? " [silicon]" : " [rotating]"); - - if (f1 & CISTPL_IDE_UNIQUE) - printk (" [unique]"); - - printk ((f1 & CISTPL_IDE_DUAL) ? " [dual]" : " [single]"); - - if (f2 & CISTPL_IDE_HAS_SLEEP) - printk (" [sleep]"); - - if (f2 & CISTPL_IDE_HAS_STANDBY) - printk (" [standby]"); - - if (f2 & CISTPL_IDE_HAS_IDLE) - printk (" [idle]"); - - if (f2 & CISTPL_IDE_LOW_POWER) - printk (" [low power]"); - - if (f2 & CISTPL_IDE_REG_INHIBIT) - printk (" [reg inhibit]"); - - if (f2 & CISTPL_IDE_HAS_INDEX) - printk (" [index]"); - - if (f2 & CISTPL_IDE_IOIS16) - printk (" [IOis16]"); - - break; - } - } - printk ("\n"); -} - -/* ------------------------------------------------------------------------- */ - - -#define MAX_IDENT_CHARS 64 -#define MAX_IDENT_FIELDS 4 - -static u8 *known_cards[] = { - "ARGOSY PnPIDE D5", - NULL -}; - -static int identify (volatile u8 *p) -{ - u8 id_str[MAX_IDENT_CHARS]; - u8 data; - u8 *t; - u8 **card; - int i, done; - - if (p == NULL) - return (0); /* Don't know */ - - t = id_str; - done =0; - - for (i=0; i<=4 && !done; ++i, p+=2) { - while ((data = *p) != '\0') { - if (data == 0xFF) { - done = 1; - break; - } - *t++ = data; - if (t == &id_str[MAX_IDENT_CHARS-1]) { - done = 1; - break; - } - p += 2; - } - if (!done) - *t++ = ' '; - } - *t = '\0'; - while (--t > id_str) { - if (*t == ' ') - *t = '\0'; - else - break; - } - printk ("Card ID: %s\n", id_str); - - for (card=known_cards; *card; ++card) { - if (strcmp(*card, id_str) == 0) { /* found! */ - return (1); - } - } - - return (0); /* don't know */ -} - -static int __init mpc8xx_ide_probe(void) -{ - hw_regs_t hw; - u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; - -#ifdef IDE0_BASE_OFFSET - memset(&hw, 0, sizeof(hw)); - if (!m8xx_ide_init_ports(&hw, 0)) { - ide_hwif_t *hwif = ide_find_port(); - - if (hwif) { - ide_init_port_hw(hwif, &hw); - hwif->pio_mask = ATA_PIO4; - hwif->port_ops = &m8xx_port_ops; - - idx[0] = hwif->index; - } - } -#ifdef IDE1_BASE_OFFSET - memset(&hw, 0, sizeof(hw)); - if (!m8xx_ide_init_ports(&hw, 1)) { - ide_hwif_t *mate = ide_find_port(); - - if (mate) { - ide_init_port_hw(mate, &hw); - mate->pio_mask = ATA_PIO4; - mate->port_ops = &m8xx_port_ops; - - idx[1] = mate->index; - } - } -#endif -#endif - - ide_device_add(idx, NULL); - - return 0; -} - -module_init(mpc8xx_ide_probe); - -MODULE_LICENSE("GPL"); diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index dcb2c466bb97..93fb9067c043 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c @@ -5,7 +5,7 @@ * for doing DMA. * * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt - * Copyright (C) 2007 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -48,8 +48,6 @@ #include <asm/mediabay.h> #endif -#include "../ide-timing.h" - #undef IDE_PMAC_DEBUG #define DMA_WAIT_TIMEOUT 50 @@ -495,6 +493,7 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) static void pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) { + struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); u32 *timings, t; unsigned accessTicks, recTicks; unsigned accessTime, recTime; @@ -526,10 +525,9 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) } case controller_kl_ata4: /* 66Mhz cell */ - recTime = cycle_time - ide_pio_timings[pio].active_time - - ide_pio_timings[pio].setup_time; + recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); - accessTime = ide_pio_timings[pio].active_time; + accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); @@ -542,10 +540,9 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) default: { /* 33Mhz cell */ int ebit = 0; - recTime = cycle_time - ide_pio_timings[pio].active_time - - ide_pio_timings[pio].setup_time; + recTime = cycle_time - tim->active - tim->setup; recTime = max(recTime, 150U); - accessTime = ide_pio_timings[pio].active_time; + accessTime = tim->active; accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS(accessTime); accessTicks = min(accessTicks, 0x1fU); @@ -1151,8 +1148,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) base = ioremap(macio_resource_start(mdev, 0), 0x400); regbase = (unsigned long) base; - hwif->dev = &mdev->bus->pdev->dev; - pmif->mdev = mdev; pmif->node = mdev->ofdev.node; pmif->regbase = regbase; @@ -1174,7 +1169,8 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) memset(&hw, 0, sizeof(hw)); pmac_ide_init_ports(&hw, pmif->regbase); hw.irq = irq; - hw.dev = &mdev->ofdev.dev; + hw.dev = &mdev->bus->pdev->dev; + hw.parent = &mdev->ofdev.dev; rc = pmac_ide_setup_device(pmif, hwif, &hw); if (rc != 0) { @@ -1274,7 +1270,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) goto out_free_pmif; } - hwif->dev = &pdev->dev; pmif->mdev = NULL; pmif->node = np; diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index abcfb1739d4d..65fc08b6b6d0 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -6,19 +6,15 @@ * May be copied or modified under the terms of the GNU General Public License */ -#include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> -#include <linux/timer.h> -#include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ide.h> #include <linux/dma-mapping.h> #include <asm/io.h> -#include <asm/irq.h> /** * ide_setup_pci_baseregs - place a PCI IDE controller native @@ -319,25 +315,22 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, ctl = pci_resource_start(dev, 2*port+1); base = pci_resource_start(dev, 2*port); - if ((ctl && !base) || (base && !ctl)) { - printk(KERN_ERR "%s: inconsistent baseregs (BIOS) " - "for port %d, skipping\n", d->name, port); - return NULL; - } - } - if (!ctl) { + } else { /* Use default values */ ctl = port ? 0x374 : 0x3f4; base = port ? 0x170 : 0x1f0; } - hwif = ide_find_port_slot(d); - if (hwif == NULL) { - printk(KERN_ERR "%s: too many IDE interfaces, no room in " - "table\n", d->name); + if (!base || !ctl) { + printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n", + d->name, port); return NULL; } + hwif = ide_find_port_slot(d); + if (hwif == NULL) + return NULL; + memset(&hw, 0, sizeof(hw)); hw.irq = irq; hw.dev = &dev->dev; @@ -346,8 +339,6 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, ide_init_port_hw(hwif, &hw); - hwif->dev = &dev->dev; - return hwif; } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 356fac6d105a..5a1cf2580e16 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -71,6 +71,10 @@ #include "iscsi_iser.h" +static struct scsi_host_template iscsi_iser_sht; +static struct iscsi_transport iscsi_iser_transport; +static struct scsi_transport_template *iscsi_iser_scsi_transport; + static unsigned int iscsi_max_lun = 512; module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); @@ -91,7 +95,6 @@ iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *rx_data, int rx_data_len) { int rc = 0; - uint32_t ret_itt; int datalen; int ahslen; @@ -107,12 +110,7 @@ iscsi_iser_recv(struct iscsi_conn *conn, /* read AHS */ ahslen = hdr->hlength * 4; - /* verify itt (itt encoding: age+cid+itt) */ - rc = iscsi_verify_itt(conn, hdr, &ret_itt); - - if (!rc) - rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); - + rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len); if (rc && rc != ISCSI_ERR_NO_SCSI_CMD) goto error; @@ -123,25 +121,33 @@ error: /** - * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands + * iscsi_iser_task_init - Initialize task + * @task: iscsi task * - **/ + * Initialize the task for the scsi command or mgmt command. + */ static int -iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) +iscsi_iser_task_init(struct iscsi_task *task) { - struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data; - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; + struct iscsi_iser_conn *iser_conn = task->conn->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + + /* mgmt task */ + if (!task->sc) { + iser_task->desc.data = task->data; + return 0; + } - iser_ctask->command_sent = 0; - iser_ctask->iser_conn = iser_conn; - iser_ctask_rdma_init(iser_ctask); + iser_task->command_sent = 0; + iser_task->iser_conn = iser_conn; + iser_task_rdma_init(iser_task); return 0; } /** - * iscsi_mtask_xmit - xmit management(immediate) task + * iscsi_iser_mtask_xmit - xmit management(immediate) task * @conn: iscsi connection - * @mtask: task management task + * @task: task management task * * Notes: * The function can return -EAGAIN in which case caller must @@ -150,20 +156,19 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) * **/ static int -iscsi_iser_mtask_xmit(struct iscsi_conn *conn, - struct iscsi_mgmt_task *mtask) +iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { int error = 0; - debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt); + debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt); - error = iser_send_control(conn, mtask); + error = iser_send_control(conn, task); - /* since iser xmits control with zero copy, mtasks can not be recycled + /* since iser xmits control with zero copy, tasks can not be recycled * right after sending them. * The recycling scheme is based on whether a response is expected - * - if yes, the mtask is recycled at iscsi_complete_pdu - * - if no, the mtask is recycled at iser_snd_completion + * - if yes, the task is recycled at iscsi_complete_pdu + * - if no, the task is recycled at iser_snd_completion */ if (error && error != -ENOBUFS) iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); @@ -172,97 +177,86 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, } static int -iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn, - struct iscsi_cmd_task *ctask) +iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, + struct iscsi_task *task) { struct iscsi_data hdr; int error = 0; /* Send data-out PDUs while there's still unsolicited data to send */ - while (ctask->unsol_count > 0) { - iscsi_prep_unsolicit_data_pdu(ctask, &hdr); + while (task->unsol_count > 0) { + iscsi_prep_unsolicit_data_pdu(task, &hdr); debug_scsi("Sending data-out: itt 0x%x, data count %d\n", - hdr.itt, ctask->data_count); + hdr.itt, task->data_count); /* the buffer description has been passed with the command */ /* Send the command */ - error = iser_send_data_out(conn, ctask, &hdr); + error = iser_send_data_out(conn, task, &hdr); if (error) { - ctask->unsol_datasn--; - goto iscsi_iser_ctask_xmit_unsol_data_exit; + task->unsol_datasn--; + goto iscsi_iser_task_xmit_unsol_data_exit; } - ctask->unsol_count -= ctask->data_count; + task->unsol_count -= task->data_count; debug_scsi("Need to send %d more as data-out PDUs\n", - ctask->unsol_count); + task->unsol_count); } -iscsi_iser_ctask_xmit_unsol_data_exit: +iscsi_iser_task_xmit_unsol_data_exit: return error; } static int -iscsi_iser_ctask_xmit(struct iscsi_conn *conn, - struct iscsi_cmd_task *ctask) +iscsi_iser_task_xmit(struct iscsi_task *task) { - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; + struct iscsi_conn *conn = task->conn; + struct iscsi_iser_task *iser_task = task->dd_data; int error = 0; - if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { - BUG_ON(scsi_bufflen(ctask->sc) == 0); + if (!task->sc) + return iscsi_iser_mtask_xmit(conn, task); + + if (task->sc->sc_data_direction == DMA_TO_DEVICE) { + BUG_ON(scsi_bufflen(task->sc) == 0); debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", - ctask->itt, scsi_bufflen(ctask->sc), - ctask->imm_count, ctask->unsol_count); + task->itt, scsi_bufflen(task->sc), + task->imm_count, task->unsol_count); } - debug_scsi("ctask deq [cid %d itt 0x%x]\n", - conn->id, ctask->itt); + debug_scsi("task deq [cid %d itt 0x%x]\n", + conn->id, task->itt); /* Send the cmd PDU */ - if (!iser_ctask->command_sent) { - error = iser_send_command(conn, ctask); + if (!iser_task->command_sent) { + error = iser_send_command(conn, task); if (error) - goto iscsi_iser_ctask_xmit_exit; - iser_ctask->command_sent = 1; + goto iscsi_iser_task_xmit_exit; + iser_task->command_sent = 1; } /* Send unsolicited data-out PDU(s) if necessary */ - if (ctask->unsol_count) - error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); + if (task->unsol_count) + error = iscsi_iser_task_xmit_unsol_data(conn, task); - iscsi_iser_ctask_xmit_exit: + iscsi_iser_task_xmit_exit: if (error && error != -ENOBUFS) iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return error; } static void -iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task) { - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; - if (iser_ctask->status == ISER_TASK_STATUS_STARTED) { - iser_ctask->status = ISER_TASK_STATUS_COMPLETED; - iser_ctask_rdma_finalize(iser_ctask); - } -} - -static struct iser_conn * -iscsi_iser_ib_conn_lookup(__u64 ep_handle) -{ - struct iser_conn *ib_conn; - struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle; + /* mgmt tasks do not need special cleanup */ + if (!task->sc) + return; - mutex_lock(&ig.connlist_mutex); - list_for_each_entry(ib_conn, &ig.connlist, conn_list) { - if (ib_conn == uib_conn) { - mutex_unlock(&ig.connlist_mutex); - return ib_conn; - } + if (iser_task->status == ISER_TASK_STATUS_STARTED) { + iser_task->status = ISER_TASK_STATUS_COMPLETED; + iser_task_rdma_finalize(iser_task); } - mutex_unlock(&ig.connlist_mutex); - iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle); - return NULL; } static struct iscsi_cls_conn * @@ -272,7 +266,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) struct iscsi_cls_conn *cls_conn; struct iscsi_iser_conn *iser_conn; - cls_conn = iscsi_conn_setup(cls_session, conn_idx); + cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; @@ -283,21 +277,11 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) */ conn->max_recv_dlength = 128; - iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL); - if (!iser_conn) - goto conn_alloc_fail; - - /* currently this is the only field which need to be initiated */ - rwlock_init(&iser_conn->lock); - + iser_conn = conn->dd_data; conn->dd_data = iser_conn; iser_conn->iscsi_conn = conn; return cls_conn; - -conn_alloc_fail: - iscsi_conn_teardown(cls_conn); - return NULL; } static void @@ -305,11 +289,18 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data; + struct iser_conn *ib_conn = iser_conn->ib_conn; iscsi_conn_teardown(cls_conn); - if (iser_conn->ib_conn) - iser_conn->ib_conn->iser_conn = NULL; - kfree(iser_conn); + /* + * Userspace will normally call the stop callback and + * already have freed the ib_conn, but if it goofed up then + * we free it here. + */ + if (ib_conn) { + ib_conn->iser_conn = NULL; + iser_conn_put(ib_conn); + } } static int @@ -320,6 +311,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_iser_conn *iser_conn; struct iser_conn *ib_conn; + struct iscsi_endpoint *ep; int error; error = iscsi_conn_bind(cls_session, cls_conn, is_leading); @@ -328,12 +320,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, /* the transport ep handle comes from user space so it must be * verified against the global ib connections list */ - ib_conn = iscsi_iser_ib_conn_lookup(transport_eph); - if (!ib_conn) { + ep = iscsi_lookup_endpoint(transport_eph); + if (!ep) { iser_err("can't bind eph %llx\n", (unsigned long long)transport_eph); return -EINVAL; } + ib_conn = ep->dd_data; + /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges * connection pointers */ @@ -341,10 +335,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, iser_conn = conn->dd_data; ib_conn->iser_conn = iser_conn; iser_conn->ib_conn = ib_conn; + iser_conn_get(ib_conn); + return 0; +} - conn->recv_lock = &iser_conn->lock; +static void +iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_iser_conn *iser_conn = conn->dd_data; + struct iser_conn *ib_conn = iser_conn->ib_conn; - return 0; + /* + * Userspace may have goofed up and not bound the connection or + * might have only partially setup the connection. + */ + if (ib_conn) { + iscsi_conn_stop(cls_conn, flag); + /* + * There is no unbind event so the stop callback + * must release the ref from the bind. + */ + iser_conn_put(ib_conn); + } + iser_conn->ib_conn = NULL; } static int @@ -360,55 +374,75 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) return iscsi_conn_start(cls_conn); } -static struct iscsi_transport iscsi_iser_transport; +static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + + iscsi_host_remove(shost); + iscsi_host_free(shost); +} static struct iscsi_cls_session * -iscsi_iser_session_create(struct iscsi_transport *iscsit, - struct scsi_transport_template *scsit, - uint16_t cmds_max, uint16_t qdepth, - uint32_t initial_cmdsn, uint32_t *hostno) +iscsi_iser_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn, uint32_t *hostno) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; + struct Scsi_Host *shost; int i; - uint32_t hn; - struct iscsi_cmd_task *ctask; - struct iscsi_mgmt_task *mtask; - struct iscsi_iser_cmd_task *iser_ctask; - struct iser_desc *desc; + struct iscsi_task *task; + struct iscsi_iser_task *iser_task; + struct iser_conn *ib_conn; + + shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN); + if (!shost) + return NULL; + shost->transportt = iscsi_iser_scsi_transport; + shost->max_lun = iscsi_max_lun; + shost->max_id = 0; + shost->max_channel = 0; + shost->max_cmd_len = 16; + + /* + * older userspace tools (before 2.0-870) did not pass us + * the leading conn's ep so this will be NULL; + */ + if (ep) + ib_conn = ep->dd_data; + + if (iscsi_host_add(shost, + ep ? ib_conn->device->ib_device->dma_device : NULL)) + goto free_host; + *hostno = shost->host_no; /* * we do not support setting can_queue cmd_per_lun from userspace yet * because we preallocate so many resources */ - cls_session = iscsi_session_setup(iscsit, scsit, + cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, ISCSI_DEF_XMIT_CMDS_MAX, - ISCSI_MAX_CMD_PER_LUN, - sizeof(struct iscsi_iser_cmd_task), - sizeof(struct iser_desc), - initial_cmdsn, &hn); + sizeof(struct iscsi_iser_task), + initial_cmdsn, 0); if (!cls_session) - return NULL; - - *hostno = hn; - session = class_to_transport_session(cls_session); + goto remove_host; + session = cls_session->dd_data; + shost->can_queue = session->scsi_cmds_max; /* libiscsi setup itts, data and pool so just set desc fields */ for (i = 0; i < session->cmds_max; i++) { - ctask = session->cmds[i]; - iser_ctask = ctask->dd_data; - ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header; - ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header); - } - - for (i = 0; i < session->mgmtpool_max; i++) { - mtask = session->mgmt_cmds[i]; - desc = mtask->dd_data; - mtask->hdr = &desc->iscsi_header; - desc->data = mtask->data; + task = session->cmds[i]; + iser_task = task->dd_data; + task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header; + task->hdr_max = sizeof(iser_task->desc.iscsi_header); } - return cls_session; + +remove_host: + iscsi_host_remove(shost); +free_host: + iscsi_host_free(shost); + return NULL; } static int @@ -481,34 +515,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s stats->custom[3].value = conn->fmr_unalign_cnt; } -static int -iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking, - __u64 *ep_handle) +static struct iscsi_endpoint * +iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) { int err; struct iser_conn *ib_conn; + struct iscsi_endpoint *ep; - err = iser_conn_init(&ib_conn); - if (err) - goto out; + ep = iscsi_create_endpoint(sizeof(*ib_conn)); + if (!ep) + return ERR_PTR(-ENOMEM); - err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking); - if (!err) - *ep_handle = (__u64)(unsigned long)ib_conn; + ib_conn = ep->dd_data; + ib_conn->ep = ep; + iser_conn_init(ib_conn); -out: - return err; + err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, + non_blocking); + if (err) { + iscsi_destroy_endpoint(ep); + return ERR_PTR(err); + } + return ep; } static int -iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) +iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { - struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); + struct iser_conn *ib_conn; int rc; - if (!ib_conn) - return -EINVAL; - + ib_conn = ep->dd_data; rc = wait_event_interruptible_timeout(ib_conn->wait, ib_conn->state == ISER_CONN_UP, msecs_to_jiffies(timeout_ms)); @@ -530,13 +567,21 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) } static void -iscsi_iser_ep_disconnect(__u64 ep_handle) +iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) { struct iser_conn *ib_conn; - ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); - if (!ib_conn) - return; + ib_conn = ep->dd_data; + if (ib_conn->iser_conn) + /* + * Must suspend xmit path if the ep is bound to the + * iscsi_conn, so we know we are not accessing the ib_conn + * when we free it. + * + * This may not be bound if the ep poll failed. + */ + iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); + iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); iser_conn_terminate(ib_conn); @@ -547,7 +592,6 @@ static struct scsi_host_template iscsi_iser_sht = { .name = "iSCSI Initiator over iSER, v." DRV_VER, .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, - .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, .max_sectors = 1024, .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, @@ -581,17 +625,14 @@ static struct iscsi_transport iscsi_iser_transport = { ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO, + ISCSI_PING_TMO | ISCSI_RECV_TMO | + ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME | ISCSI_HOST_INITIATOR_NAME, - .host_template = &iscsi_iser_sht, - .conndata_size = sizeof(struct iscsi_conn), - .max_lun = ISCSI_ISER_MAX_LUN, - .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN, /* session management */ .create_session = iscsi_iser_session_create, - .destroy_session = iscsi_session_teardown, + .destroy_session = iscsi_iser_session_destroy, /* connection management */ .create_conn = iscsi_iser_conn_create, .bind_conn = iscsi_iser_conn_bind, @@ -600,17 +641,16 @@ static struct iscsi_transport iscsi_iser_transport = { .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_iser_conn_start, - .stop_conn = iscsi_conn_stop, + .stop_conn = iscsi_iser_conn_stop, /* iscsi host params */ .get_host_param = iscsi_host_get_param, .set_host_param = iscsi_host_set_param, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_iser_conn_get_stats, - .init_cmd_task = iscsi_iser_cmd_init, - .xmit_cmd_task = iscsi_iser_ctask_xmit, - .xmit_mgmt_task = iscsi_iser_mtask_xmit, - .cleanup_cmd_task = iscsi_iser_cleanup_ctask, + .init_task = iscsi_iser_task_init, + .xmit_task = iscsi_iser_task_xmit, + .cleanup_task = iscsi_iser_cleanup_task, /* recovery */ .session_recovery_timedout = iscsi_session_recovery_timedout, @@ -630,8 +670,6 @@ static int __init iser_init(void) return -EINVAL; } - iscsi_iser_transport.max_lun = iscsi_max_lun; - memset(&ig, 0, sizeof(struct iser_global)); ig.desc_cache = kmem_cache_create("iser_descriptors", @@ -647,7 +685,9 @@ static int __init iser_init(void) mutex_init(&ig.connlist_mutex); INIT_LIST_HEAD(&ig.connlist); - if (!iscsi_register_transport(&iscsi_iser_transport)) { + iscsi_iser_scsi_transport = iscsi_register_transport( + &iscsi_iser_transport); + if (!iscsi_iser_scsi_transport) { iser_err("iscsi_register_transport failed\n"); err = -EINVAL; goto register_transport_failure; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 0e10703cf59e..81a82628a5f1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -94,7 +94,6 @@ /* support upto 512KB in one RDMA */ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) #define ISCSI_ISER_MAX_LUN 256 -#define ISCSI_ISER_MAX_CMD_LEN 16 /* QP settings */ /* Maximal bounds on received asynchronous PDUs */ @@ -172,7 +171,8 @@ struct iser_data_buf { /* fwd declarations */ struct iser_device; struct iscsi_iser_conn; -struct iscsi_iser_cmd_task; +struct iscsi_iser_task; +struct iscsi_endpoint; struct iser_mem_reg { u32 lkey; @@ -196,7 +196,7 @@ struct iser_regd_buf { #define MAX_REGD_BUF_VECTOR_LEN 2 struct iser_dto { - struct iscsi_iser_cmd_task *ctask; + struct iscsi_iser_task *task; struct iser_conn *ib_conn; int notify_enable; @@ -240,7 +240,9 @@ struct iser_device { struct iser_conn { struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ + struct iscsi_endpoint *ep; enum iser_ib_conn_state state; /* rdma connection state */ + atomic_t refcount; spinlock_t lock; /* used for state changes */ struct iser_device *device; /* device context */ struct rdma_cm_id *cma_id; /* CMA ID */ @@ -259,11 +261,9 @@ struct iser_conn { struct iscsi_iser_conn { struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ struct iser_conn *ib_conn; /* iSER IB conn */ - - rwlock_t lock; }; -struct iscsi_iser_cmd_task { +struct iscsi_iser_task { struct iser_desc desc; struct iscsi_iser_conn *iser_conn; enum iser_task_status status; @@ -296,22 +296,26 @@ extern int iser_debug_level; /* allocate connection resources needed for rdma functionality */ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); -int iser_send_control(struct iscsi_conn *conn, - struct iscsi_mgmt_task *mtask); +int iser_send_control(struct iscsi_conn *conn, + struct iscsi_task *task); -int iser_send_command(struct iscsi_conn *conn, - struct iscsi_cmd_task *ctask); +int iser_send_command(struct iscsi_conn *conn, + struct iscsi_task *task); -int iser_send_data_out(struct iscsi_conn *conn, - struct iscsi_cmd_task *ctask, - struct iscsi_data *hdr); +int iser_send_data_out(struct iscsi_conn *conn, + struct iscsi_task *task, + struct iscsi_data *hdr); void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *rx_data, int rx_data_len); -int iser_conn_init(struct iser_conn **ib_conn); +void iser_conn_init(struct iser_conn *ib_conn); + +void iser_conn_get(struct iser_conn *ib_conn); + +void iser_conn_put(struct iser_conn *ib_conn); void iser_conn_terminate(struct iser_conn *ib_conn); @@ -320,9 +324,9 @@ void iser_rcv_completion(struct iser_desc *desc, void iser_snd_completion(struct iser_desc *desc); -void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask); +void iser_task_rdma_init(struct iscsi_iser_task *task); -void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask); +void iser_task_rdma_finalize(struct iscsi_iser_task *task); void iser_dto_buffs_release(struct iser_dto *dto); @@ -332,10 +336,10 @@ void iser_reg_single(struct iser_device *device, struct iser_regd_buf *regd_buf, enum dma_data_direction direction); -void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask, +void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, enum iser_data_dir cmd_dir); -int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask, +int iser_reg_rdma_mem(struct iscsi_iser_task *task, enum iser_data_dir cmd_dir); int iser_connect(struct iser_conn *ib_conn, @@ -355,10 +359,10 @@ int iser_post_send(struct iser_desc *tx_desc); int iser_conn_state_comp(struct iser_conn *ib_conn, enum iser_ib_conn_state comp); -int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, +int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, enum iser_data_dir iser_dir, enum dma_data_direction dma_dir); -void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask); +void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); #endif diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 31ad498bdc51..cdd283189047 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -64,46 +64,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto, /* Register user buffer memory and initialize passive rdma * dto descriptor. Total data size is stored in - * iser_ctask->data[ISER_DIR_IN].data_len + * iser_task->data[ISER_DIR_IN].data_len */ -static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, +static int iser_prepare_read_cmd(struct iscsi_task *task, unsigned int edtl) { - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; struct iser_regd_buf *regd_buf; int err; - struct iser_hdr *hdr = &iser_ctask->desc.iser_header; - struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN]; + struct iser_hdr *hdr = &iser_task->desc.iser_header; + struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; - err = iser_dma_map_task_data(iser_ctask, + err = iser_dma_map_task_data(iser_task, buf_in, ISER_DIR_IN, DMA_FROM_DEVICE); if (err) return err; - if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) { + if (edtl > iser_task->data[ISER_DIR_IN].data_len) { iser_err("Total data length: %ld, less than EDTL: " "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", - iser_ctask->data[ISER_DIR_IN].data_len, edtl, - ctask->itt, iser_ctask->iser_conn); + iser_task->data[ISER_DIR_IN].data_len, edtl, + task->itt, iser_task->iser_conn); return -EINVAL; } - err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN); + err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN); if (err) { iser_err("Failed to set up Data-IN RDMA\n"); return err; } - regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN]; + regd_buf = &iser_task->rdma_regd[ISER_DIR_IN]; hdr->flags |= ISER_RSV; hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); hdr->read_va = cpu_to_be64(regd_buf->reg.va); iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", - ctask->itt, regd_buf->reg.rkey, + task->itt, regd_buf->reg.rkey, (unsigned long long)regd_buf->reg.va); return 0; @@ -111,43 +111,43 @@ static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask, /* Register user buffer memory and initialize passive rdma * dto descriptor. Total data size is stored in - * ctask->data[ISER_DIR_OUT].data_len + * task->data[ISER_DIR_OUT].data_len */ static int -iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, +iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz, unsigned int unsol_sz, unsigned int edtl) { - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; struct iser_regd_buf *regd_buf; int err; - struct iser_dto *send_dto = &iser_ctask->desc.dto; - struct iser_hdr *hdr = &iser_ctask->desc.iser_header; - struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT]; + struct iser_dto *send_dto = &iser_task->desc.dto; + struct iser_hdr *hdr = &iser_task->desc.iser_header; + struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; - err = iser_dma_map_task_data(iser_ctask, + err = iser_dma_map_task_data(iser_task, buf_out, ISER_DIR_OUT, DMA_TO_DEVICE); if (err) return err; - if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) { + if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { iser_err("Total data length: %ld, less than EDTL: %d, " "in WRITE cmd BHS itt: %d, conn: 0x%p\n", - iser_ctask->data[ISER_DIR_OUT].data_len, - edtl, ctask->itt, ctask->conn); + iser_task->data[ISER_DIR_OUT].data_len, + edtl, task->itt, task->conn); return -EINVAL; } - err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT); + err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT); if (err != 0) { iser_err("Failed to register write cmd RDMA mem\n"); return err; } - regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT]; + regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT]; if (unsol_sz < edtl) { hdr->flags |= ISER_WSV; @@ -156,13 +156,13 @@ iser_prepare_write_cmd(struct iscsi_cmd_task *ctask, iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " "VA:%#llX + unsol:%d\n", - ctask->itt, regd_buf->reg.rkey, + task->itt, regd_buf->reg.rkey, (unsigned long long)regd_buf->reg.va, unsol_sz); } if (imm_sz > 0) { iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", - ctask->itt, imm_sz); + task->itt, imm_sz); iser_dto_add_regd_buff(send_dto, regd_buf, 0, @@ -314,38 +314,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task) /** * iser_send_command - send command PDU */ -int iser_send_command(struct iscsi_conn *conn, - struct iscsi_cmd_task *ctask) +int iser_send_command(struct iscsi_conn *conn, + struct iscsi_task *task) { struct iscsi_iser_conn *iser_conn = conn->dd_data; - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; struct iser_dto *send_dto = NULL; unsigned long edtl; int err = 0; struct iser_data_buf *data_buf; - struct iscsi_cmd *hdr = ctask->hdr; - struct scsi_cmnd *sc = ctask->sc; + struct iscsi_cmd *hdr = task->hdr; + struct scsi_cmnd *sc = task->sc; if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); return -EPERM; } - if (iser_check_xmit(conn, ctask)) + if (iser_check_xmit(conn, task)) return -ENOBUFS; edtl = ntohl(hdr->data_length); /* build the tx desc regd header and add it to the tx desc dto */ - iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND; - send_dto = &iser_ctask->desc.dto; - send_dto->ctask = iser_ctask; - iser_create_send_desc(iser_conn, &iser_ctask->desc); + iser_task->desc.type = ISCSI_TX_SCSI_COMMAND; + send_dto = &iser_task->desc.dto; + send_dto->task = iser_task; + iser_create_send_desc(iser_conn, &iser_task->desc); if (hdr->flags & ISCSI_FLAG_CMD_READ) - data_buf = &iser_ctask->data[ISER_DIR_IN]; + data_buf = &iser_task->data[ISER_DIR_IN]; else - data_buf = &iser_ctask->data[ISER_DIR_OUT]; + data_buf = &iser_task->data[ISER_DIR_OUT]; if (scsi_sg_count(sc)) { /* using a scatter list */ data_buf->buf = scsi_sglist(sc); @@ -355,15 +355,15 @@ int iser_send_command(struct iscsi_conn *conn, data_buf->data_len = scsi_bufflen(sc); if (hdr->flags & ISCSI_FLAG_CMD_READ) { - err = iser_prepare_read_cmd(ctask, edtl); + err = iser_prepare_read_cmd(task, edtl); if (err) goto send_command_error; } if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { - err = iser_prepare_write_cmd(ctask, - ctask->imm_count, - ctask->imm_count + - ctask->unsol_count, + err = iser_prepare_write_cmd(task, + task->imm_count, + task->imm_count + + task->unsol_count, edtl); if (err) goto send_command_error; @@ -378,27 +378,27 @@ int iser_send_command(struct iscsi_conn *conn, goto send_command_error; } - iser_ctask->status = ISER_TASK_STATUS_STARTED; + iser_task->status = ISER_TASK_STATUS_STARTED; - err = iser_post_send(&iser_ctask->desc); + err = iser_post_send(&iser_task->desc); if (!err) return 0; send_command_error: iser_dto_buffs_release(send_dto); - iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err); + iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); return err; } /** * iser_send_data_out - send data out PDU */ -int iser_send_data_out(struct iscsi_conn *conn, - struct iscsi_cmd_task *ctask, +int iser_send_data_out(struct iscsi_conn *conn, + struct iscsi_task *task, struct iscsi_data *hdr) { struct iscsi_iser_conn *iser_conn = conn->dd_data; - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; struct iser_desc *tx_desc = NULL; struct iser_dto *send_dto = NULL; unsigned long buf_offset; @@ -411,7 +411,7 @@ int iser_send_data_out(struct iscsi_conn *conn, return -EPERM; } - if (iser_check_xmit(conn, ctask)) + if (iser_check_xmit(conn, task)) return -ENOBUFS; itt = (__force uint32_t)hdr->itt; @@ -432,7 +432,7 @@ int iser_send_data_out(struct iscsi_conn *conn, /* build the tx desc regd header and add it to the tx desc dto */ send_dto = &tx_desc->dto; - send_dto->ctask = iser_ctask; + send_dto->task = iser_task; iser_create_send_desc(iser_conn, tx_desc); iser_reg_single(iser_conn->ib_conn->device, @@ -440,15 +440,15 @@ int iser_send_data_out(struct iscsi_conn *conn, /* all data was registered for RDMA, we can use the lkey */ iser_dto_add_regd_buff(send_dto, - &iser_ctask->rdma_regd[ISER_DIR_OUT], + &iser_task->rdma_regd[ISER_DIR_OUT], buf_offset, data_seg_len); - if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) { + if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { iser_err("Offset:%ld & DSL:%ld in Data-Out " "inconsistent with total len:%ld, itt:%d\n", buf_offset, data_seg_len, - iser_ctask->data[ISER_DIR_OUT].data_len, itt); + iser_task->data[ISER_DIR_OUT].data_len, itt); err = -EINVAL; goto send_data_out_error; } @@ -468,10 +468,11 @@ send_data_out_error: } int iser_send_control(struct iscsi_conn *conn, - struct iscsi_mgmt_task *mtask) + struct iscsi_task *task) { struct iscsi_iser_conn *iser_conn = conn->dd_data; - struct iser_desc *mdesc = mtask->dd_data; + struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_desc *mdesc = &iser_task->desc; struct iser_dto *send_dto = NULL; unsigned long data_seg_len; int err = 0; @@ -483,27 +484,27 @@ int iser_send_control(struct iscsi_conn *conn, return -EPERM; } - if (iser_check_xmit(conn,mtask)) + if (iser_check_xmit(conn, task)) return -ENOBUFS; /* build the tx desc regd header and add it to the tx desc dto */ mdesc->type = ISCSI_TX_CONTROL; send_dto = &mdesc->dto; - send_dto->ctask = NULL; + send_dto->task = NULL; iser_create_send_desc(iser_conn, mdesc); device = iser_conn->ib_conn->device; iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); - data_seg_len = ntoh24(mtask->hdr->dlength); + data_seg_len = ntoh24(task->hdr->dlength); if (data_seg_len > 0) { regd_buf = &mdesc->data_regd_buf; memset(regd_buf, 0, sizeof(struct iser_regd_buf)); regd_buf->device = device; - regd_buf->virt_addr = mtask->data; - regd_buf->data_size = mtask->data_count; + regd_buf->virt_addr = task->data; + regd_buf->data_size = task->data_count; iser_reg_single(device, regd_buf, DMA_TO_DEVICE); iser_dto_add_regd_buff(send_dto, regd_buf, @@ -533,15 +534,13 @@ send_control_error: void iser_rcv_completion(struct iser_desc *rx_desc, unsigned long dto_xfer_len) { - struct iser_dto *dto = &rx_desc->dto; + struct iser_dto *dto = &rx_desc->dto; struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; - struct iscsi_session *session = conn->iscsi_conn->session; - struct iscsi_cmd_task *ctask; - struct iscsi_iser_cmd_task *iser_ctask; + struct iscsi_task *task; + struct iscsi_iser_task *iser_task; struct iscsi_hdr *hdr; char *rx_data = NULL; int rx_data_len = 0; - unsigned int itt; unsigned char opcode; hdr = &rx_desc->iscsi_header; @@ -557,19 +556,24 @@ void iser_rcv_completion(struct iser_desc *rx_desc, opcode = hdr->opcode & ISCSI_OPCODE_MASK; if (opcode == ISCSI_OP_SCSI_CMD_RSP) { - itt = get_itt(hdr->itt); /* mask out cid and age bits */ - if (!(itt < session->cmds_max)) + spin_lock(&conn->iscsi_conn->session->lock); + task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt); + if (task) + __iscsi_get_task(task); + spin_unlock(&conn->iscsi_conn->session->lock); + + if (!task) iser_err("itt can't be matched to task!!! " - "conn %p opcode %d cmds_max %d itt %d\n", - conn->iscsi_conn,opcode,session->cmds_max,itt); - /* use the mapping given with the cmds array indexed by itt */ - ctask = (struct iscsi_cmd_task *)session->cmds[itt]; - iser_ctask = ctask->dd_data; - iser_dbg("itt %d ctask %p\n",itt,ctask); - iser_ctask->status = ISER_TASK_STATUS_COMPLETED; - iser_ctask_rdma_finalize(iser_ctask); + "conn %p opcode %d itt %d\n", + conn->iscsi_conn, opcode, hdr->itt); + else { + iser_task = task->dd_data; + iser_dbg("itt %d task %p\n",hdr->itt, task); + iser_task->status = ISER_TASK_STATUS_COMPLETED; + iser_task_rdma_finalize(iser_task); + iscsi_put_task(task); + } } - iser_dto_buffs_release(dto); iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); @@ -590,7 +594,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) struct iser_conn *ib_conn = dto->ib_conn; struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; struct iscsi_conn *conn = iser_conn->iscsi_conn; - struct iscsi_mgmt_task *mtask; + struct iscsi_task *task; int resume_tx = 0; iser_dbg("Initiator, Data sent dto=0x%p\n", dto); @@ -613,36 +617,31 @@ void iser_snd_completion(struct iser_desc *tx_desc) if (tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ - mtask = (void *) ((long)(void *)tx_desc - - sizeof(struct iscsi_mgmt_task)); - if (mtask->hdr->itt == RESERVED_ITT) { - struct iscsi_session *session = conn->session; - - spin_lock(&conn->session->lock); - iscsi_free_mgmt_task(conn, mtask); - spin_unlock(&session->lock); - } + task = (void *) ((long)(void *)tx_desc - + sizeof(struct iscsi_task)); + if (task->hdr->itt == RESERVED_ITT) + iscsi_put_task(task); } } -void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask) +void iser_task_rdma_init(struct iscsi_iser_task *iser_task) { - iser_ctask->status = ISER_TASK_STATUS_INIT; + iser_task->status = ISER_TASK_STATUS_INIT; - iser_ctask->dir[ISER_DIR_IN] = 0; - iser_ctask->dir[ISER_DIR_OUT] = 0; + iser_task->dir[ISER_DIR_IN] = 0; + iser_task->dir[ISER_DIR_OUT] = 0; - iser_ctask->data[ISER_DIR_IN].data_len = 0; - iser_ctask->data[ISER_DIR_OUT].data_len = 0; + iser_task->data[ISER_DIR_IN].data_len = 0; + iser_task->data[ISER_DIR_OUT].data_len = 0; - memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0, + memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, sizeof(struct iser_regd_buf)); - memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0, + memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, sizeof(struct iser_regd_buf)); } -void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) +void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) { int deferred; int is_rdma_aligned = 1; @@ -651,17 +650,17 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) /* if we were reading, copy back to unaligned sglist, * anyway dma_unmap and free the copy */ - if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) { + if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { is_rdma_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN); + iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN); } - if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) { + if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { is_rdma_aligned = 0; - iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT); + iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); } - if (iser_ctask->dir[ISER_DIR_IN]) { - regd = &iser_ctask->rdma_regd[ISER_DIR_IN]; + if (iser_task->dir[ISER_DIR_IN]) { + regd = &iser_task->rdma_regd[ISER_DIR_IN]; deferred = iser_regd_buff_release(regd); if (deferred) { iser_err("%d references remain for BUF-IN rdma reg\n", @@ -669,8 +668,8 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) } } - if (iser_ctask->dir[ISER_DIR_OUT]) { - regd = &iser_ctask->rdma_regd[ISER_DIR_OUT]; + if (iser_task->dir[ISER_DIR_OUT]) { + regd = &iser_task->rdma_regd[ISER_DIR_OUT]; deferred = iser_regd_buff_release(regd); if (deferred) { iser_err("%d references remain for BUF-OUT rdma reg\n", @@ -680,7 +679,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask) /* if the data was unaligned, it was already unmapped and then copied */ if (is_rdma_aligned) - iser_dma_unmap_task_data(iser_ctask); + iser_dma_unmap_task_data(iser_task); } void iser_dto_buffs_release(struct iser_dto *dto) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 81e49cb10ed3..b9453d068e9d 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -99,13 +99,13 @@ void iser_reg_single(struct iser_device *device, /** * iser_start_rdma_unaligned_sg */ -static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, +static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { int dma_nents; struct ib_device *dev; char *mem = NULL; - struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; + struct iser_data_buf *data = &iser_task->data[cmd_dir]; unsigned long cmd_data_len = data->data_len; if (cmd_data_len > ISER_KMALLOC_THRESHOLD) @@ -138,37 +138,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, } } - sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); - iser_ctask->data_copy[cmd_dir].buf = - &iser_ctask->data_copy[cmd_dir].sg_single; - iser_ctask->data_copy[cmd_dir].size = 1; + sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); + iser_task->data_copy[cmd_dir].buf = + &iser_task->data_copy[cmd_dir].sg_single; + iser_task->data_copy[cmd_dir].size = 1; - iser_ctask->data_copy[cmd_dir].copy_buf = mem; + iser_task->data_copy[cmd_dir].copy_buf = mem; - dev = iser_ctask->iser_conn->ib_conn->device->ib_device; + dev = iser_task->iser_conn->ib_conn->device->ib_device; dma_nents = ib_dma_map_sg(dev, - &iser_ctask->data_copy[cmd_dir].sg_single, + &iser_task->data_copy[cmd_dir].sg_single, 1, (cmd_dir == ISER_DIR_OUT) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); BUG_ON(dma_nents == 0); - iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; + iser_task->data_copy[cmd_dir].dma_nents = dma_nents; return 0; } /** * iser_finalize_rdma_unaligned_sg */ -void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, +void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { struct ib_device *dev; struct iser_data_buf *mem_copy; unsigned long cmd_data_len; - dev = iser_ctask->iser_conn->ib_conn->device->ib_device; - mem_copy = &iser_ctask->data_copy[cmd_dir]; + dev = iser_task->iser_conn->ib_conn->device->ib_device; + mem_copy = &iser_task->data_copy[cmd_dir]; ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, (cmd_dir == ISER_DIR_OUT) ? @@ -184,8 +184,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, /* copy back read RDMA to unaligned sg */ mem = mem_copy->copy_buf; - sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; - sg_size = iser_ctask->data[ISER_DIR_IN].size; + sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; + sg_size = iser_task->data[ISER_DIR_IN].size; p = mem; for_each_sg(sgl, sg, sg_size, i) { @@ -198,7 +198,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, } } - cmd_data_len = iser_ctask->data[cmd_dir].data_len; + cmd_data_len = iser_task->data[cmd_dir].data_len; if (cmd_data_len > ISER_KMALLOC_THRESHOLD) free_pages((unsigned long)mem_copy->copy_buf, @@ -376,15 +376,15 @@ static void iser_page_vec_build(struct iser_data_buf *data, } } -int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, - struct iser_data_buf *data, - enum iser_data_dir iser_dir, - enum dma_data_direction dma_dir) +int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, + struct iser_data_buf *data, + enum iser_data_dir iser_dir, + enum dma_data_direction dma_dir) { struct ib_device *dev; - iser_ctask->dir[iser_dir] = 1; - dev = iser_ctask->iser_conn->ib_conn->device->ib_device; + iser_task->dir[iser_dir] = 1; + dev = iser_task->iser_conn->ib_conn->device->ib_device; data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); if (data->dma_nents == 0) { @@ -394,20 +394,20 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, return 0; } -void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) +void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) { struct ib_device *dev; struct iser_data_buf *data; - dev = iser_ctask->iser_conn->ib_conn->device->ib_device; + dev = iser_task->iser_conn->ib_conn->device->ib_device; - if (iser_ctask->dir[ISER_DIR_IN]) { - data = &iser_ctask->data[ISER_DIR_IN]; + if (iser_task->dir[ISER_DIR_IN]) { + data = &iser_task->data[ISER_DIR_IN]; ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); } - if (iser_ctask->dir[ISER_DIR_OUT]) { - data = &iser_ctask->data[ISER_DIR_OUT]; + if (iser_task->dir[ISER_DIR_OUT]) { + data = &iser_task->data[ISER_DIR_OUT]; ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); } } @@ -418,21 +418,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) * * returns 0 on success, errno code on failure */ -int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, +int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { - struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn; - struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; + struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; + struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; struct ib_device *ibdev = device->ib_device; - struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; + struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_regd_buf *regd_buf; int aligned_len; int err; int i; struct scatterlist *sg; - regd_buf = &iser_ctask->rdma_regd[cmd_dir]; + regd_buf = &iser_task->rdma_regd[cmd_dir]; aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { @@ -442,13 +442,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, iser_data_buf_dump(mem, ibdev); /* unmap the command data before accessing it */ - iser_dma_unmap_task_data(iser_ctask); + iser_dma_unmap_task_data(iser_task); /* allocate copy buf, if we are writing, copy the */ /* unaligned scatterlist, dma map the copy */ - if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) + if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) return -ENOMEM; - mem = &iser_ctask->data_copy[cmd_dir]; + mem = &iser_task->data_copy[cmd_dir]; } /* if there a single dma entry, FMR is not needed */ @@ -472,8 +472,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); if (err) { iser_data_buf_dump(mem, ibdev); - iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, - ntoh24(iser_ctask->desc.iscsi_header.dlength)); + iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", + mem->dma_nents, + ntoh24(iser_task->desc.iscsi_header.dlength)); iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", ib_conn->page_vec->data_size, ib_conn->page_vec->length, ib_conn->page_vec->offset); diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 77cabee7cc08..3a917c1f796f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -323,7 +323,18 @@ static void iser_conn_release(struct iser_conn *ib_conn) iser_device_try_release(device); if (ib_conn->iser_conn) ib_conn->iser_conn->ib_conn = NULL; - kfree(ib_conn); + iscsi_destroy_endpoint(ib_conn->ep); +} + +void iser_conn_get(struct iser_conn *ib_conn) +{ + atomic_inc(&ib_conn->refcount); +} + +void iser_conn_put(struct iser_conn *ib_conn) +{ + if (atomic_dec_and_test(&ib_conn->refcount)) + iser_conn_release(ib_conn); } /** @@ -347,7 +358,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn) wait_event_interruptible(ib_conn->wait, ib_conn->state == ISER_CONN_DOWN); - iser_conn_release(ib_conn); + iser_conn_put(ib_conn); } static void iser_connect_error(struct rdma_cm_id *cma_id) @@ -481,24 +492,15 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve return ret; } -int iser_conn_init(struct iser_conn **ibconn) +void iser_conn_init(struct iser_conn *ib_conn) { - struct iser_conn *ib_conn; - - ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL); - if (!ib_conn) { - iser_err("can't alloc memory for struct iser_conn\n"); - return -ENOMEM; - } ib_conn->state = ISER_CONN_INIT; init_waitqueue_head(&ib_conn->wait); atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0); + atomic_set(&ib_conn->refcount, 1); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); - - *ibconn = ib_conn; - return 0; } /** diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 40c70ba62bf0..e5d446804d32 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -46,7 +46,6 @@ #endif -EXPORT_SYMBOL(adb_controller); EXPORT_SYMBOL(adb_client_list); extern struct adb_driver via_macii_driver; @@ -80,7 +79,7 @@ static struct adb_driver *adb_driver_list[] = { static struct class *adb_dev_class; -struct adb_driver *adb_controller; +static struct adb_driver *adb_controller; BLOCKING_NOTIFIER_HEAD(adb_client_list); static int adb_got_sleep; static int adb_inited; @@ -290,7 +289,7 @@ static int adb_resume(struct platform_device *dev) } #endif /* CONFIG_PM */ -int __init adb_init(void) +static int __init adb_init(void) { struct adb_driver *driver; int i; diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c index ef4c117ea35f..59ea520a5d7a 100644 --- a/drivers/macintosh/adbhid.c +++ b/drivers/macintosh/adbhid.c @@ -75,7 +75,7 @@ static struct notifier_block adbhid_adb_notifier = { #define ADB_KEY_POWER_OLD 0x7e #define ADB_KEY_POWER 0x7f -u16 adb_to_linux_keycodes[128] = { +static const u16 adb_to_linux_keycodes[128] = { /* 0x00 */ KEY_A, /* 30 */ /* 0x01 */ KEY_S, /* 31 */ /* 0x02 */ KEY_D, /* 32 */ diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c index 112e5ef728f1..9e9453b58425 100644 --- a/drivers/macintosh/macio_sysfs.c +++ b/drivers/macintosh/macio_sysfs.c @@ -44,7 +44,7 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr, struct of_device *ofdev = to_of_device(dev); int len; - len = of_device_get_modalias(ofdev, buf, PAGE_SIZE); + len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2); buf[len] = '\n'; buf[len+1] = 0; @@ -52,6 +52,15 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr, return len+1; } +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct of_device *ofdev; + + ofdev = to_of_device(dev); + return sprintf(buf, "%s\n", ofdev->node->full_name); +} + macio_config_of_attr (name, "%s\n"); macio_config_of_attr (type, "%s\n"); @@ -60,5 +69,6 @@ struct device_attribute macio_dev_attrs[] = { __ATTR_RO(type), __ATTR_RO(compatible), __ATTR_RO(modalias), + __ATTR_RO(devspec), __ATTR_NULL }; diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index 818aba368541..b1e5b4705250 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/ide.h> #include <linux/kthread.h> +#include <linux/mutex.h> #include <asm/prom.h> #include <asm/pgtable.h> #include <asm/io.h> @@ -77,7 +78,7 @@ struct media_bay_info { int index; int cached_gpio; int sleeping; - struct semaphore lock; + struct mutex lock; #ifdef CONFIG_BLK_DEV_IDE_PMAC ide_hwif_t *cd_port; void __iomem *cd_base; @@ -459,27 +460,27 @@ int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, if (bay->mdev && which_bay == bay->mdev->ofdev.node) { int timeout = 5000, index = hwif->index; - down(&bay->lock); + mutex_lock(&bay->lock); bay->cd_port = hwif; bay->cd_base = (void __iomem *) base; bay->cd_irq = irq; if ((MB_CD != bay->content_id) || bay->state != mb_up) { - up(&bay->lock); + mutex_unlock(&bay->lock); return 0; } printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i); do { if (MB_IDE_READY(i)) { bay->cd_index = index; - up(&bay->lock); + mutex_unlock(&bay->lock); return 0; } mdelay(1); } while(--timeout); printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i); - up(&bay->lock); + mutex_unlock(&bay->lock); return -ENODEV; } } @@ -617,10 +618,10 @@ static int media_bay_task(void *x) while (!kthread_should_stop()) { for (i = 0; i < media_bay_count; ++i) { - down(&media_bays[i].lock); + mutex_lock(&media_bays[i].lock); if (!media_bays[i].sleeping) media_bay_step(i); - up(&media_bays[i].lock); + mutex_unlock(&media_bays[i].lock); } msleep_interruptible(MB_POLL_DELAY); @@ -660,7 +661,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de bay->index = i; bay->ops = match->data; bay->sleeping = 0; - init_MUTEX(&bay->lock); + mutex_init(&bay->lock); /* Init HW probing */ if (bay->ops->init) @@ -698,10 +699,10 @@ static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state) if (state.event != mdev->ofdev.dev.power.power_state.event && (state.event & PM_EVENT_SLEEP)) { - down(&bay->lock); + mutex_lock(&bay->lock); bay->sleeping = 1; set_mb_power(bay, 0); - up(&bay->lock); + mutex_unlock(&bay->lock); msleep(MB_POLL_DELAY); mdev->ofdev.dev.power.power_state = state; } @@ -720,12 +721,12 @@ static int media_bay_resume(struct macio_dev *mdev) they seem to help the 3400 get it right. */ /* Force MB power to 0 */ - down(&bay->lock); + mutex_lock(&bay->lock); set_mb_power(bay, 0); msleep(MB_POWER_DELAY); if (bay->ops->content(bay) != bay->content_id) { printk("mediabay%d: content changed during sleep...\n", bay->index); - up(&bay->lock); + mutex_unlock(&bay->lock); return 0; } set_mb_power(bay, 1); @@ -741,7 +742,7 @@ static int media_bay_resume(struct macio_dev *mdev) } while((bay->state != mb_empty) && (bay->state != mb_up)); bay->sleeping = 0; - up(&bay->lock); + mutex_unlock(&bay->lock); } return 0; } diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 32cb0298f88e..96faa799b82a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -36,6 +36,8 @@ #include <linux/sysdev.h> #include <linux/poll.h> #include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -46,8 +48,6 @@ #include <asm/sections.h> #include <asm/abs_addr.h> #include <asm/uaccess.h> -#include <asm/of_device.h> -#include <asm/of_platform.h> #define VERSION "0.7" #define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp." @@ -475,6 +475,7 @@ int __init smu_init (void) { struct device_node *np; const u32 *data; + int ret = 0; np = of_find_node_by_type(NULL, "smu"); if (np == NULL) @@ -484,16 +485,11 @@ int __init smu_init (void) if (smu_cmdbuf_abs == 0) { printk(KERN_ERR "SMU: Command buffer not allocated !\n"); - of_node_put(np); - return -EINVAL; + ret = -EINVAL; + goto fail_np; } smu = alloc_bootmem(sizeof(struct smu_device)); - if (smu == NULL) { - of_node_put(np); - return -ENOMEM; - } - memset(smu, 0, sizeof(*smu)); spin_lock_init(&smu->lock); INIT_LIST_HEAD(&smu->cmd_list); @@ -511,14 +507,14 @@ int __init smu_init (void) smu->db_node = of_find_node_by_name(NULL, "smu-doorbell"); if (smu->db_node == NULL) { printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n"); - goto fail; + ret = -ENXIO; + goto fail_bootmem; } data = of_get_property(smu->db_node, "reg", NULL); if (data == NULL) { - of_node_put(smu->db_node); - smu->db_node = NULL; printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); - goto fail; + ret = -ENXIO; + goto fail_db_node; } /* Current setup has one doorbell GPIO that does both doorbell @@ -552,7 +548,8 @@ int __init smu_init (void) smu->db_buf = ioremap(0x8000860c, 0x1000); if (smu->db_buf == NULL) { printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n"); - goto fail; + ret = -ENXIO; + goto fail_msg_node; } /* U3 has an issue with NAP mode when issuing SMU commands */ @@ -563,10 +560,17 @@ int __init smu_init (void) sys_ctrler = SYS_CTRLER_SMU; return 0; - fail: +fail_msg_node: + if (smu->msg_node) + of_node_put(smu->msg_node); +fail_db_node: + of_node_put(smu->db_node); +fail_bootmem: + free_bootmem((unsigned long)smu, sizeof(struct smu_device)); smu = NULL; - return -ENXIO; - +fail_np: + of_node_put(np); + return ret; } diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index 5366dc93fb38..22bf981d393b 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -24,13 +24,13 @@ #include <linux/kthread.h> #include <linux/moduleparam.h> #include <linux/freezer.h> +#include <linux/of_platform.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sections.h> -#include <asm/of_platform.h> #undef DEBUG diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index ddfb426a9abd..817607e2af6a 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -123,14 +123,14 @@ #include <linux/i2c.h> #include <linux/kthread.h> #include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sections.h> -#include <asm/of_device.h> #include <asm/macio.h> -#include <asm/of_platform.h> #include "therm_pm72.h" diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index d11821af3b8d..3da0a02efd76 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -37,13 +37,13 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/kthread.h> +#include <linux/of_platform.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sections.h> -#include <asm/of_platform.h> #include <asm/macio.h> #define LOG_TEMP 0 /* continously log temperature */ @@ -62,7 +62,7 @@ static struct { volatile int running; struct task_struct *poll_task; - struct semaphore lock; + struct mutex lock; struct of_device *of_dev; struct i2c_client *thermostat; @@ -286,23 +286,23 @@ restore_regs( void ) static int control_loop(void *dummy) { - down(&x.lock); + mutex_lock(&x.lock); setup_hardware(); - up(&x.lock); + mutex_unlock(&x.lock); for (;;) { msleep_interruptible(8000); if (kthread_should_stop()) break; - down(&x.lock); + mutex_lock(&x.lock); poll_temp(); - up(&x.lock); + mutex_unlock(&x.lock); } - down(&x.lock); + mutex_lock(&x.lock); restore_regs(); - up(&x.lock); + mutex_unlock(&x.lock); return 0; } @@ -489,7 +489,7 @@ g4fan_init( void ) const struct apple_thermal_info *info; struct device_node *np; - init_MUTEX( &x.lock ); + mutex_init(&x.lock); if( !(np=of_find_node_by_name(NULL, "power-mgt")) ) return -ENODEV; diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c index e2f84da09e7c..b64741c95ac4 100644 --- a/drivers/macintosh/via-pmu68k.c +++ b/drivers/macintosh/via-pmu68k.c @@ -101,7 +101,6 @@ static int pmu_kind = PMU_UNKNOWN; static int pmu_fully_inited; int asleep; -BLOCKING_NOTIFIER_HEAD(sleep_notifier_list); static int pmu_probe(void); static int pmu_init(void); @@ -741,8 +740,8 @@ pmu_handle_data(unsigned char *data, int len) } } -int backlight_level = -1; -int backlight_enabled = 0; +static int backlight_level = -1; +static int backlight_enabled = 0; #define LEVEL_TO_BRIGHT(lev) ((lev) < 1? 0x7f: 0x4a - ((lev) << 1)) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 610af916891e..07d92c11b5d8 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -252,27 +252,14 @@ config DM_ZERO config DM_MULTIPATH tristate "Multipath target" depends on BLK_DEV_DM + # nasty syntax but means make DM_MULTIPATH independent + # of SCSI_DH if the latter isn't defined but if + # it is, DM_MULTIPATH must depend on it. We get a build + # error if SCSI_DH=m and DM_MULTIPATH=y + depends on SCSI_DH || !SCSI_DH ---help--- Allow volume managers to support multipath hardware. -config DM_MULTIPATH_EMC - tristate "EMC CX/AX multipath support" - depends on DM_MULTIPATH && BLK_DEV_DM - ---help--- - Multipath support for EMC CX/AX series hardware. - -config DM_MULTIPATH_RDAC - tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)" - depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL - ---help--- - Multipath support for LSI/Engenio RDAC. - -config DM_MULTIPATH_HP - tristate "HP MSA multipath support (EXPERIMENTAL)" - depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL - ---help--- - Multipath support for HP MSA (Active/Passive) series hardware. - config DM_DELAY tristate "I/O delaying target (EXPERIMENTAL)" depends on BLK_DEV_DM && EXPERIMENTAL diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 7be09eeea293..f1ef33dfd8cf 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -4,11 +4,9 @@ dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ dm-ioctl.o dm-io.o dm-kcopyd.o -dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o +dm-multipath-objs := dm-path-selector.o dm-mpath.o dm-snapshot-objs := dm-snap.o dm-exception-store.o dm-mirror-objs := dm-raid1.o -dm-rdac-objs := dm-mpath-rdac.o -dm-hp-sw-objs := dm-mpath-hp-sw.o md-mod-objs := md.o bitmap.o raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \ raid6int1.o raid6int2.o raid6int4.o \ @@ -35,9 +33,6 @@ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o -obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o -obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o -obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o obj-$(CONFIG_DM_ZERO) += dm-zero.o diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c deleted file mode 100644 index 3ea5ad4b7805..000000000000 --- a/drivers/md/dm-emc.c +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright (C) 2004 SUSE LINUX Products GmbH. All rights reserved. - * Copyright (C) 2004 Red Hat, Inc. All rights reserved. - * - * This file is released under the GPL. - * - * Multipath support for EMC CLARiiON AX/CX-series hardware. - */ - -#include "dm.h" -#include "dm-hw-handler.h" -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> - -#define DM_MSG_PREFIX "multipath emc" - -struct emc_handler { - spinlock_t lock; - - /* Whether we should send the short trespass command (FC-series) - * or the long version (default for AX/CX CLARiiON arrays). */ - unsigned short_trespass; - /* Whether or not to honor SCSI reservations when initiating a - * switch-over. Default: Don't. */ - unsigned hr; - - unsigned char sense[SCSI_SENSE_BUFFERSIZE]; -}; - -#define TRESPASS_PAGE 0x22 -#define EMC_FAILOVER_TIMEOUT (60 * HZ) - -/* Code borrowed from dm-lsi-rdac by Mike Christie */ - -static inline void free_bio(struct bio *bio) -{ - __free_page(bio->bi_io_vec[0].bv_page); - bio_put(bio); -} - -static void emc_endio(struct bio *bio, int error) -{ - struct dm_path *path = bio->bi_private; - - /* We also need to look at the sense keys here whether or not to - * switch to the next PG etc. - * - * For now simple logic: either it works or it doesn't. - */ - if (error) - dm_pg_init_complete(path, MP_FAIL_PATH); - else - dm_pg_init_complete(path, 0); - - /* request is freed in block layer */ - free_bio(bio); -} - -static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size) -{ - struct bio *bio; - struct page *page; - - bio = bio_alloc(GFP_ATOMIC, 1); - if (!bio) { - DMERR("get_failover_bio: bio_alloc() failed."); - return NULL; - } - - bio->bi_rw |= (1 << BIO_RW); - bio->bi_bdev = path->dev->bdev; - bio->bi_sector = 0; - bio->bi_private = path; - bio->bi_end_io = emc_endio; - - page = alloc_page(GFP_ATOMIC); - if (!page) { - DMERR("get_failover_bio: alloc_page() failed."); - bio_put(bio); - return NULL; - } - - if (bio_add_page(bio, page, data_size, 0) != data_size) { - DMERR("get_failover_bio: bio_add_page() failed."); - __free_page(page); - bio_put(bio); - return NULL; - } - - return bio; -} - -static struct request *get_failover_req(struct emc_handler *h, - struct bio *bio, struct dm_path *path) -{ - struct request *rq; - struct block_device *bdev = bio->bi_bdev; - struct request_queue *q = bdev_get_queue(bdev); - - /* FIXME: Figure out why it fails with GFP_ATOMIC. */ - rq = blk_get_request(q, WRITE, __GFP_WAIT); - if (!rq) { - DMERR("get_failover_req: blk_get_request failed"); - return NULL; - } - - blk_rq_append_bio(q, rq, bio); - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = 0; - - rq->timeout = EMC_FAILOVER_TIMEOUT; - rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; - - return rq; -} - -static struct request *emc_trespass_get(struct emc_handler *h, - struct dm_path *path) -{ - struct bio *bio; - struct request *rq; - unsigned char *page22; - unsigned char long_trespass_pg[] = { - 0, 0, 0, 0, - TRESPASS_PAGE, /* Page code */ - 0x09, /* Page length - 2 */ - h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ - 0xff, 0xff, /* Trespass target */ - 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ - }; - unsigned char short_trespass_pg[] = { - 0, 0, 0, 0, - TRESPASS_PAGE, /* Page code */ - 0x02, /* Page length - 2 */ - h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ - 0xff, /* Trespass target */ - }; - unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) : - sizeof(long_trespass_pg); - - /* get bio backing */ - if (data_size > PAGE_SIZE) - /* this should never happen */ - return NULL; - - bio = get_failover_bio(path, data_size); - if (!bio) { - DMERR("emc_trespass_get: no bio"); - return NULL; - } - - page22 = (unsigned char *)bio_data(bio); - memset(page22, 0, data_size); - - memcpy(page22, h->short_trespass ? - short_trespass_pg : long_trespass_pg, data_size); - - /* get request for block layer packet command */ - rq = get_failover_req(h, bio, path); - if (!rq) { - DMERR("emc_trespass_get: no rq"); - free_bio(bio); - return NULL; - } - - /* Prepare the command. */ - rq->cmd[0] = MODE_SELECT; - rq->cmd[1] = 0x10; - rq->cmd[4] = data_size; - rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); - - return rq; -} - -static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed, - struct dm_path *path) -{ - struct request *rq; - struct request_queue *q = bdev_get_queue(path->dev->bdev); - - /* - * We can either blindly init the pg (then look at the sense), - * or we can send some commands to get the state here (then - * possibly send the fo cmnd), or we can also have the - * initial state passed into us and then get an update here. - */ - if (!q) { - DMINFO("emc_pg_init: no queue"); - goto fail_path; - } - - /* FIXME: The request should be pre-allocated. */ - rq = emc_trespass_get(hwh->context, path); - if (!rq) { - DMERR("emc_pg_init: no rq"); - goto fail_path; - } - - DMINFO("emc_pg_init: sending switch-over command"); - elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); - return; - -fail_path: - dm_pg_init_complete(path, MP_FAIL_PATH); -} - -static struct emc_handler *alloc_emc_handler(void) -{ - struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL); - - if (h) - spin_lock_init(&h->lock); - - return h; -} - -static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv) -{ - struct emc_handler *h; - unsigned hr, short_trespass; - - if (argc == 0) { - /* No arguments: use defaults */ - hr = 0; - short_trespass = 0; - } else if (argc != 2) { - DMWARN("incorrect number of arguments"); - return -EINVAL; - } else { - if ((sscanf(argv[0], "%u", &short_trespass) != 1) - || (short_trespass > 1)) { - DMWARN("invalid trespass mode selected"); - return -EINVAL; - } - - if ((sscanf(argv[1], "%u", &hr) != 1) - || (hr > 1)) { - DMWARN("invalid honor reservation flag selected"); - return -EINVAL; - } - } - - h = alloc_emc_handler(); - if (!h) - return -ENOMEM; - - hwh->context = h; - - if ((h->short_trespass = short_trespass)) - DMWARN("short trespass command will be send"); - else - DMWARN("long trespass command will be send"); - - if ((h->hr = hr)) - DMWARN("honor reservation bit will be set"); - else - DMWARN("honor reservation bit will not be set (default)"); - - return 0; -} - -static void emc_destroy(struct hw_handler *hwh) -{ - struct emc_handler *h = (struct emc_handler *) hwh->context; - - kfree(h); - hwh->context = NULL; -} - -static unsigned emc_error(struct hw_handler *hwh, struct bio *bio) -{ - /* FIXME: Patch from axboe still missing */ -#if 0 - int sense; - - if (bio->bi_error & BIO_SENSE) { - sense = bio->bi_error & 0xffffff; /* sense key / asc / ascq */ - - if (sense == 0x020403) { - /* LUN Not Ready - Manual Intervention Required - * indicates this is a passive path. - * - * FIXME: However, if this is seen and EVPD C0 - * indicates that this is due to a NDU in - * progress, we should set FAIL_PATH too. - * This indicates we might have to do a SCSI - * inquiry in the end_io path. Ugh. */ - return MP_BYPASS_PG | MP_RETRY_IO; - } else if (sense == 0x052501) { - /* An array based copy is in progress. Do not - * fail the path, do not bypass to another PG, - * do not retry. Fail the IO immediately. - * (Actually this is the same conclusion as in - * the default handler, but lets make sure.) */ - return 0; - } else if (sense == 0x062900) { - /* Unit Attention Code. This is the first IO - * to the new path, so just retry. */ - return MP_RETRY_IO; - } - } -#endif - - /* Try default handler */ - return dm_scsi_err_handler(hwh, bio); -} - -static struct hw_handler_type emc_hwh = { - .name = "emc", - .module = THIS_MODULE, - .create = emc_create, - .destroy = emc_destroy, - .pg_init = emc_pg_init, - .error = emc_error, -}; - -static int __init dm_emc_init(void) -{ - int r = dm_register_hw_handler(&emc_hwh); - - if (r < 0) - DMERR("register failed %d", r); - - DMINFO("version 0.0.3 loaded"); - - return r; -} - -static void __exit dm_emc_exit(void) -{ - int r = dm_unregister_hw_handler(&emc_hwh); - - if (r < 0) - DMERR("unregister failed %d", r); -} - -module_init(dm_emc_init); -module_exit(dm_emc_exit); - -MODULE_DESCRIPTION(DM_NAME " EMC CX/AX/FC-family multipath"); -MODULE_AUTHOR("Lars Marowsky-Bree <lmb@suse.de>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-hw-handler.c b/drivers/md/dm-hw-handler.c deleted file mode 100644 index 2ee84d8aa0bf..000000000000 --- a/drivers/md/dm-hw-handler.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (C) 2004 Red Hat, Inc. All rights reserved. - * - * This file is released under the GPL. - * - * Multipath hardware handler registration. - */ - -#include "dm.h" -#include "dm-hw-handler.h" - -#include <linux/slab.h> - -struct hwh_internal { - struct hw_handler_type hwht; - - struct list_head list; - long use; -}; - -#define hwht_to_hwhi(__hwht) container_of((__hwht), struct hwh_internal, hwht) - -static LIST_HEAD(_hw_handlers); -static DECLARE_RWSEM(_hwh_lock); - -static struct hwh_internal *__find_hw_handler_type(const char *name) -{ - struct hwh_internal *hwhi; - - list_for_each_entry(hwhi, &_hw_handlers, list) { - if (!strcmp(name, hwhi->hwht.name)) - return hwhi; - } - - return NULL; -} - -static struct hwh_internal *get_hw_handler(const char *name) -{ - struct hwh_internal *hwhi; - - down_read(&_hwh_lock); - hwhi = __find_hw_handler_type(name); - if (hwhi) { - if ((hwhi->use == 0) && !try_module_get(hwhi->hwht.module)) - hwhi = NULL; - else - hwhi->use++; - } - up_read(&_hwh_lock); - - return hwhi; -} - -struct hw_handler_type *dm_get_hw_handler(const char *name) -{ - struct hwh_internal *hwhi; - - if (!name) - return NULL; - - hwhi = get_hw_handler(name); - if (!hwhi) { - request_module("dm-%s", name); - hwhi = get_hw_handler(name); - } - - return hwhi ? &hwhi->hwht : NULL; -} - -void dm_put_hw_handler(struct hw_handler_type *hwht) -{ - struct hwh_internal *hwhi; - - if (!hwht) - return; - - down_read(&_hwh_lock); - hwhi = __find_hw_handler_type(hwht->name); - if (!hwhi) - goto out; - - if (--hwhi->use == 0) - module_put(hwhi->hwht.module); - - BUG_ON(hwhi->use < 0); - - out: - up_read(&_hwh_lock); -} - -static struct hwh_internal *_alloc_hw_handler(struct hw_handler_type *hwht) -{ - struct hwh_internal *hwhi = kzalloc(sizeof(*hwhi), GFP_KERNEL); - - if (hwhi) - hwhi->hwht = *hwht; - - return hwhi; -} - -int dm_register_hw_handler(struct hw_handler_type *hwht) -{ - int r = 0; - struct hwh_internal *hwhi = _alloc_hw_handler(hwht); - - if (!hwhi) - return -ENOMEM; - - down_write(&_hwh_lock); - - if (__find_hw_handler_type(hwht->name)) { - kfree(hwhi); - r = -EEXIST; - } else - list_add(&hwhi->list, &_hw_handlers); - - up_write(&_hwh_lock); - - return r; -} - -int dm_unregister_hw_handler(struct hw_handler_type *hwht) -{ - struct hwh_internal *hwhi; - - down_write(&_hwh_lock); - - hwhi = __find_hw_handler_type(hwht->name); - if (!hwhi) { - up_write(&_hwh_lock); - return -EINVAL; - } - - if (hwhi->use) { - up_write(&_hwh_lock); - return -ETXTBSY; - } - - list_del(&hwhi->list); - - up_write(&_hwh_lock); - - kfree(hwhi); - - return 0; -} - -unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio) -{ -#if 0 - int sense_key, asc, ascq; - - if (bio->bi_error & BIO_SENSE) { - /* FIXME: This is just an initial guess. */ - /* key / asc / ascq */ - sense_key = (bio->bi_error >> 16) & 0xff; - asc = (bio->bi_error >> 8) & 0xff; - ascq = bio->bi_error & 0xff; - - switch (sense_key) { - /* This block as a whole comes from the device. - * So no point retrying on another path. */ - case 0x03: /* Medium error */ - case 0x05: /* Illegal request */ - case 0x07: /* Data protect */ - case 0x08: /* Blank check */ - case 0x0a: /* copy aborted */ - case 0x0c: /* obsolete - no clue ;-) */ - case 0x0d: /* volume overflow */ - case 0x0e: /* data miscompare */ - case 0x0f: /* reserved - no idea either. */ - return MP_ERROR_IO; - - /* For these errors it's unclear whether they - * come from the device or the controller. - * So just lets try a different path, and if - * it eventually succeeds, user-space will clear - * the paths again... */ - case 0x02: /* Not ready */ - case 0x04: /* Hardware error */ - case 0x09: /* vendor specific */ - case 0x0b: /* Aborted command */ - return MP_FAIL_PATH; - - case 0x06: /* Unit attention - might want to decode */ - if (asc == 0x04 && ascq == 0x01) - /* "Unit in the process of - * becoming ready" */ - return 0; - return MP_FAIL_PATH; - - /* FIXME: For Unit Not Ready we may want - * to have a generic pg activation - * feature (START_UNIT). */ - - /* Should these two ever end up in the - * error path? I don't think so. */ - case 0x00: /* No sense */ - case 0x01: /* Recovered error */ - return 0; - } - } -#endif - - /* We got no idea how to decode the other kinds of errors -> - * assume generic error condition. */ - return MP_FAIL_PATH; -} - -EXPORT_SYMBOL_GPL(dm_register_hw_handler); -EXPORT_SYMBOL_GPL(dm_unregister_hw_handler); -EXPORT_SYMBOL_GPL(dm_scsi_err_handler); diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h deleted file mode 100644 index 46809dcb121a..000000000000 --- a/drivers/md/dm-hw-handler.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (C) 2004 Red Hat, Inc. All rights reserved. - * - * This file is released under the GPL. - * - * Multipath hardware handler registration. - */ - -#ifndef DM_HW_HANDLER_H -#define DM_HW_HANDLER_H - -#include <linux/device-mapper.h> - -#include "dm-mpath.h" - -struct hw_handler_type; -struct hw_handler { - struct hw_handler_type *type; - struct mapped_device *md; - void *context; -}; - -/* - * Constructs a hardware handler object, takes custom arguments - */ -/* Information about a hardware handler type */ -struct hw_handler_type { - char *name; - struct module *module; - - int (*create) (struct hw_handler *handler, unsigned int argc, - char **argv); - void (*destroy) (struct hw_handler *hwh); - - void (*pg_init) (struct hw_handler *hwh, unsigned bypassed, - struct dm_path *path); - unsigned (*error) (struct hw_handler *hwh, struct bio *bio); - int (*status) (struct hw_handler *hwh, status_type_t type, - char *result, unsigned int maxlen); -}; - -/* Register a hardware handler */ -int dm_register_hw_handler(struct hw_handler_type *type); - -/* Unregister a hardware handler */ -int dm_unregister_hw_handler(struct hw_handler_type *type); - -/* Returns a registered hardware handler type */ -struct hw_handler_type *dm_get_hw_handler(const char *name); - -/* Releases a hardware handler */ -void dm_put_hw_handler(struct hw_handler_type *hwht); - -/* Default err function */ -unsigned dm_scsi_err_handler(struct hw_handler *hwh, struct bio *bio); - -/* Error flags for err and dm_pg_init_complete */ -#define MP_FAIL_PATH 1 -#define MP_BYPASS_PG 2 -#define MP_ERROR_IO 4 /* Don't retry this I/O */ -#define MP_RETRY 8 - -#endif diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c deleted file mode 100644 index b63a0ab37c53..000000000000 --- a/drivers/md/dm-mpath-hp-sw.c +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright (C) 2005 Mike Christie, All rights reserved. - * Copyright (C) 2007 Red Hat, Inc. All rights reserved. - * Authors: Mike Christie - * Dave Wysochanski - * - * This file is released under the GPL. - * - * This module implements the specific path activation code for - * HP StorageWorks and FSC FibreCat Asymmetric (Active/Passive) - * storage arrays. - * These storage arrays have controller-based failover, not - * LUN-based failover. However, LUN-based failover is the design - * of dm-multipath. Thus, this module is written for LUN-based failover. - */ -#include <linux/blkdev.h> -#include <linux/list.h> -#include <linux/types.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_dbg.h> - -#include "dm.h" -#include "dm-hw-handler.h" - -#define DM_MSG_PREFIX "multipath hp-sw" -#define DM_HP_HWH_NAME "hp-sw" -#define DM_HP_HWH_VER "1.0.0" - -struct hp_sw_context { - unsigned char sense[SCSI_SENSE_BUFFERSIZE]; -}; - -/* - * hp_sw_error_is_retryable - Is an HP-specific check condition retryable? - * @req: path activation request - * - * Examine error codes of request and determine whether the error is retryable. - * Some error codes are already retried by scsi-ml (see - * scsi_decide_disposition), but some HP specific codes are not. - * The intent of this routine is to supply the logic for the HP specific - * check conditions. - * - * Returns: - * 1 - command completed with retryable error - * 0 - command completed with non-retryable error - * - * Possible optimizations - * 1. More hardware-specific error codes - */ -static int hp_sw_error_is_retryable(struct request *req) -{ - /* - * NOT_READY is known to be retryable - * For now we just dump out the sense data and call it retryable - */ - if (status_byte(req->errors) == CHECK_CONDITION) - __scsi_print_sense(DM_HP_HWH_NAME, req->sense, req->sense_len); - - /* - * At this point we don't have complete information about all the error - * codes from this hardware, so we are just conservative and retry - * when in doubt. - */ - return 1; -} - -/* - * hp_sw_end_io - Completion handler for HP path activation. - * @req: path activation request - * @error: scsi-ml error - * - * Check sense data, free request structure, and notify dm that - * pg initialization has completed. - * - * Context: scsi-ml softirq - * - */ -static void hp_sw_end_io(struct request *req, int error) -{ - struct dm_path *path = req->end_io_data; - unsigned err_flags = 0; - - if (!error) { - DMDEBUG("%s path activation command - success", - path->dev->name); - goto out; - } - - if (hp_sw_error_is_retryable(req)) { - DMDEBUG("%s path activation command - retry", - path->dev->name); - err_flags = MP_RETRY; - goto out; - } - - DMWARN("%s path activation fail - error=0x%x", - path->dev->name, error); - err_flags = MP_FAIL_PATH; - -out: - req->end_io_data = NULL; - __blk_put_request(req->q, req); - dm_pg_init_complete(path, err_flags); -} - -/* - * hp_sw_get_request - Allocate an HP specific path activation request - * @path: path on which request will be sent (needed for request queue) - * - * The START command is used for path activation request. - * These arrays are controller-based failover, not LUN based. - * One START command issued to a single path will fail over all - * LUNs for the same controller. - * - * Possible optimizations - * 1. Make timeout configurable - * 2. Preallocate request - */ -static struct request *hp_sw_get_request(struct dm_path *path) -{ - struct request *req; - struct block_device *bdev = path->dev->bdev; - struct request_queue *q = bdev_get_queue(bdev); - struct hp_sw_context *h = path->hwhcontext; - - req = blk_get_request(q, WRITE, GFP_NOIO); - if (!req) - goto out; - - req->timeout = 60 * HZ; - - req->errors = 0; - req->cmd_type = REQ_TYPE_BLOCK_PC; - req->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; - req->end_io_data = path; - req->sense = h->sense; - memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); - - req->cmd[0] = START_STOP; - req->cmd[4] = 1; - req->cmd_len = COMMAND_SIZE(req->cmd[0]); - -out: - return req; -} - -/* - * hp_sw_pg_init - HP path activation implementation. - * @hwh: hardware handler specific data - * @bypassed: unused; is the path group bypassed? (see dm-mpath.c) - * @path: path to send initialization command - * - * Send an HP-specific path activation command on 'path'. - * Do not try to optimize in any way, just send the activation command. - * More than one path activation command may be sent to the same controller. - * This seems to work fine for basic failover support. - * - * Possible optimizations - * 1. Detect an in-progress activation request and avoid submitting another one - * 2. Model the controller and only send a single activation request at a time - * 3. Determine the state of a path before sending an activation request - * - * Context: kmpathd (see process_queued_ios() in dm-mpath.c) - */ -static void hp_sw_pg_init(struct hw_handler *hwh, unsigned bypassed, - struct dm_path *path) -{ - struct request *req; - struct hp_sw_context *h; - - path->hwhcontext = hwh->context; - h = hwh->context; - - req = hp_sw_get_request(path); - if (!req) { - DMERR("%s path activation command - allocation fail", - path->dev->name); - goto retry; - } - - DMDEBUG("%s path activation command - sent", path->dev->name); - - blk_execute_rq_nowait(req->q, NULL, req, 1, hp_sw_end_io); - return; - -retry: - dm_pg_init_complete(path, MP_RETRY); -} - -static int hp_sw_create(struct hw_handler *hwh, unsigned argc, char **argv) -{ - struct hp_sw_context *h; - - h = kmalloc(sizeof(*h), GFP_KERNEL); - if (!h) - return -ENOMEM; - - hwh->context = h; - - return 0; -} - -static void hp_sw_destroy(struct hw_handler *hwh) -{ - struct hp_sw_context *h = hwh->context; - - kfree(h); -} - -static struct hw_handler_type hp_sw_hwh = { - .name = DM_HP_HWH_NAME, - .module = THIS_MODULE, - .create = hp_sw_create, - .destroy = hp_sw_destroy, - .pg_init = hp_sw_pg_init, -}; - -static int __init hp_sw_init(void) -{ - int r; - - r = dm_register_hw_handler(&hp_sw_hwh); - if (r < 0) - DMERR("register failed %d", r); - else - DMINFO("version " DM_HP_HWH_VER " loaded"); - - return r; -} - -static void __exit hp_sw_exit(void) -{ - int r; - - r = dm_unregister_hw_handler(&hp_sw_hwh); - if (r < 0) - DMERR("unregister failed %d", r); -} - -module_init(hp_sw_init); -module_exit(hp_sw_exit); - -MODULE_DESCRIPTION("DM Multipath HP StorageWorks / FSC FibreCat (A/P) support"); -MODULE_AUTHOR("Mike Christie, Dave Wysochanski <dm-devel@redhat.com>"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DM_HP_HWH_VER); diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c deleted file mode 100644 index 95e77734880a..000000000000 --- a/drivers/md/dm-mpath-rdac.c +++ /dev/null @@ -1,700 +0,0 @@ -/* - * Engenio/LSI RDAC DM HW handler - * - * Copyright (C) 2005 Mike Christie. All rights reserved. - * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - */ -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_eh.h> - -#define DM_MSG_PREFIX "multipath rdac" - -#include "dm.h" -#include "dm-hw-handler.h" - -#define RDAC_DM_HWH_NAME "rdac" -#define RDAC_DM_HWH_VER "0.4" - -/* - * LSI mode page stuff - * - * These struct definitions and the forming of the - * mode page were taken from the LSI RDAC 2.4 GPL'd - * driver, and then converted to Linux conventions. - */ -#define RDAC_QUIESCENCE_TIME 20; -/* - * Page Codes - */ -#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c - -/* - * Controller modes definitions - */ -#define RDAC_MODE_TRANSFER_ALL_LUNS 0x01 -#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 - -/* - * RDAC Options field - */ -#define RDAC_FORCED_QUIESENCE 0x02 - -#define RDAC_FAILOVER_TIMEOUT (60 * HZ) - -struct rdac_mode_6_hdr { - u8 data_len; - u8 medium_type; - u8 device_params; - u8 block_desc_len; -}; - -struct rdac_mode_10_hdr { - u16 data_len; - u8 medium_type; - u8 device_params; - u16 reserved; - u16 block_desc_len; -}; - -struct rdac_mode_common { - u8 controller_serial[16]; - u8 alt_controller_serial[16]; - u8 rdac_mode[2]; - u8 alt_rdac_mode[2]; - u8 quiescence_timeout; - u8 rdac_options; -}; - -struct rdac_pg_legacy { - struct rdac_mode_6_hdr hdr; - u8 page_code; - u8 page_len; - struct rdac_mode_common common; -#define MODE6_MAX_LUN 32 - u8 lun_table[MODE6_MAX_LUN]; - u8 reserved2[32]; - u8 reserved3; - u8 reserved4; -}; - -struct rdac_pg_expanded { - struct rdac_mode_10_hdr hdr; - u8 page_code; - u8 subpage_code; - u8 page_len[2]; - struct rdac_mode_common common; - u8 lun_table[256]; - u8 reserved3; - u8 reserved4; -}; - -struct c9_inquiry { - u8 peripheral_info; - u8 page_code; /* 0xC9 */ - u8 reserved1; - u8 page_len; - u8 page_id[4]; /* "vace" */ - u8 avte_cvp; - u8 path_prio; - u8 reserved2[38]; -}; - -#define SUBSYS_ID_LEN 16 -#define SLOT_ID_LEN 2 - -struct c4_inquiry { - u8 peripheral_info; - u8 page_code; /* 0xC4 */ - u8 reserved1; - u8 page_len; - u8 page_id[4]; /* "subs" */ - u8 subsys_id[SUBSYS_ID_LEN]; - u8 revision[4]; - u8 slot_id[SLOT_ID_LEN]; - u8 reserved[2]; -}; - -struct rdac_controller { - u8 subsys_id[SUBSYS_ID_LEN]; - u8 slot_id[SLOT_ID_LEN]; - int use_10_ms; - struct kref kref; - struct list_head node; /* list of all controllers */ - spinlock_t lock; - int submitted; - struct list_head cmd_list; /* list of commands to be submitted */ - union { - struct rdac_pg_legacy legacy; - struct rdac_pg_expanded expanded; - } mode_select; -}; -struct c8_inquiry { - u8 peripheral_info; - u8 page_code; /* 0xC8 */ - u8 reserved1; - u8 page_len; - u8 page_id[4]; /* "edid" */ - u8 reserved2[3]; - u8 vol_uniq_id_len; - u8 vol_uniq_id[16]; - u8 vol_user_label_len; - u8 vol_user_label[60]; - u8 array_uniq_id_len; - u8 array_unique_id[16]; - u8 array_user_label_len; - u8 array_user_label[60]; - u8 lun[8]; -}; - -struct c2_inquiry { - u8 peripheral_info; - u8 page_code; /* 0xC2 */ - u8 reserved1; - u8 page_len; - u8 page_id[4]; /* "swr4" */ - u8 sw_version[3]; - u8 sw_date[3]; - u8 features_enabled; - u8 max_lun_supported; - u8 partitions[239]; /* Total allocation length should be 0xFF */ -}; - -struct rdac_handler { - struct list_head entry; /* list waiting to submit MODE SELECT */ - unsigned timeout; - struct rdac_controller *ctlr; -#define UNINITIALIZED_LUN (1 << 8) - unsigned lun; - unsigned char sense[SCSI_SENSE_BUFFERSIZE]; - struct dm_path *path; - struct work_struct work; -#define SEND_C2_INQUIRY 1 -#define SEND_C4_INQUIRY 2 -#define SEND_C8_INQUIRY 3 -#define SEND_C9_INQUIRY 4 -#define SEND_MODE_SELECT 5 - int cmd_to_send; - union { - struct c2_inquiry c2; - struct c4_inquiry c4; - struct c8_inquiry c8; - struct c9_inquiry c9; - } inq; -}; - -static LIST_HEAD(ctlr_list); -static DEFINE_SPINLOCK(list_lock); -static struct workqueue_struct *rdac_wkqd; - -static inline int had_failures(struct request *req, int error) -{ - return (error || host_byte(req->errors) != DID_OK || - msg_byte(req->errors) != COMMAND_COMPLETE); -} - -static void rdac_resubmit_all(struct rdac_handler *h) -{ - struct rdac_controller *ctlr = h->ctlr; - struct rdac_handler *tmp, *h1; - - spin_lock(&ctlr->lock); - list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) { - h1->cmd_to_send = SEND_C9_INQUIRY; - queue_work(rdac_wkqd, &h1->work); - list_del(&h1->entry); - } - ctlr->submitted = 0; - spin_unlock(&ctlr->lock); -} - -static void mode_select_endio(struct request *req, int error) -{ - struct rdac_handler *h = req->end_io_data; - struct scsi_sense_hdr sense_hdr; - int sense = 0, fail = 0; - - if (had_failures(req, error)) { - fail = 1; - goto failed; - } - - if (status_byte(req->errors) == CHECK_CONDITION) { - scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE, - &sense_hdr); - sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) | - sense_hdr.ascq; - /* If it is retryable failure, submit the c9 inquiry again */ - if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 || - sense == 0x62900) { - /* 0x59136 - Command lock contention - * 0x[6b]8b02 - Quiesense in progress or achieved - * 0x62900 - Power On, Reset, or Bus Device Reset - */ - h->cmd_to_send = SEND_C9_INQUIRY; - queue_work(rdac_wkqd, &h->work); - goto done; - } - if (sense) - DMINFO("MODE_SELECT failed on %s with sense 0x%x", - h->path->dev->name, sense); - } -failed: - if (fail || sense) - dm_pg_init_complete(h->path, MP_FAIL_PATH); - else - dm_pg_init_complete(h->path, 0); - -done: - rdac_resubmit_all(h); - __blk_put_request(req->q, req); -} - -static struct request *get_rdac_req(struct rdac_handler *h, - void *buffer, unsigned buflen, int rw) -{ - struct request *rq; - struct request_queue *q = bdev_get_queue(h->path->dev->bdev); - - rq = blk_get_request(q, rw, GFP_KERNEL); - - if (!rq) { - DMINFO("get_rdac_req: blk_get_request failed"); - return NULL; - } - - if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { - blk_put_request(rq); - DMINFO("get_rdac_req: blk_rq_map_kern failed"); - return NULL; - } - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = 0; - - rq->end_io_data = h; - rq->timeout = h->timeout; - rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; - return rq; -} - -static struct request *rdac_failover_get(struct rdac_handler *h) -{ - struct request *rq; - struct rdac_mode_common *common; - unsigned data_size; - - if (h->ctlr->use_10_ms) { - struct rdac_pg_expanded *rdac_pg; - - data_size = sizeof(struct rdac_pg_expanded); - rdac_pg = &h->ctlr->mode_select.expanded; - memset(rdac_pg, 0, data_size); - common = &rdac_pg->common; - rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; - rdac_pg->subpage_code = 0x1; - rdac_pg->page_len[0] = 0x01; - rdac_pg->page_len[1] = 0x28; - rdac_pg->lun_table[h->lun] = 0x81; - } else { - struct rdac_pg_legacy *rdac_pg; - - data_size = sizeof(struct rdac_pg_legacy); - rdac_pg = &h->ctlr->mode_select.legacy; - memset(rdac_pg, 0, data_size); - common = &rdac_pg->common; - rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; - rdac_pg->page_len = 0x68; - rdac_pg->lun_table[h->lun] = 0x81; - } - common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; - common->quiescence_timeout = RDAC_QUIESCENCE_TIME; - common->rdac_options = RDAC_FORCED_QUIESENCE; - - /* get request for block layer packet command */ - rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE); - if (!rq) { - DMERR("rdac_failover_get: no rq"); - return NULL; - } - - /* Prepare the command. */ - if (h->ctlr->use_10_ms) { - rq->cmd[0] = MODE_SELECT_10; - rq->cmd[7] = data_size >> 8; - rq->cmd[8] = data_size & 0xff; - } else { - rq->cmd[0] = MODE_SELECT; - rq->cmd[4] = data_size; - } - rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); - - return rq; -} - -/* Acquires h->ctlr->lock */ -static void submit_mode_select(struct rdac_handler *h) -{ - struct request *rq; - struct request_queue *q = bdev_get_queue(h->path->dev->bdev); - - spin_lock(&h->ctlr->lock); - if (h->ctlr->submitted) { - list_add(&h->entry, &h->ctlr->cmd_list); - goto drop_lock; - } - - if (!q) { - DMINFO("submit_mode_select: no queue"); - goto fail_path; - } - - rq = rdac_failover_get(h); - if (!rq) { - DMERR("submit_mode_select: no rq"); - goto fail_path; - } - - DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name); - - blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio); - h->ctlr->submitted = 1; - goto drop_lock; -fail_path: - dm_pg_init_complete(h->path, MP_FAIL_PATH); -drop_lock: - spin_unlock(&h->ctlr->lock); -} - -static void release_ctlr(struct kref *kref) -{ - struct rdac_controller *ctlr; - ctlr = container_of(kref, struct rdac_controller, kref); - - spin_lock(&list_lock); - list_del(&ctlr->node); - spin_unlock(&list_lock); - kfree(ctlr); -} - -static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) -{ - struct rdac_controller *ctlr, *tmp; - - spin_lock(&list_lock); - - list_for_each_entry(tmp, &ctlr_list, node) { - if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && - (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { - kref_get(&tmp->kref); - spin_unlock(&list_lock); - return tmp; - } - } - ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); - if (!ctlr) - goto done; - - /* initialize fields of controller */ - memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); - memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); - kref_init(&ctlr->kref); - spin_lock_init(&ctlr->lock); - ctlr->submitted = 0; - ctlr->use_10_ms = -1; - INIT_LIST_HEAD(&ctlr->cmd_list); - list_add(&ctlr->node, &ctlr_list); -done: - spin_unlock(&list_lock); - return ctlr; -} - -static void c4_endio(struct request *req, int error) -{ - struct rdac_handler *h = req->end_io_data; - struct c4_inquiry *sp; - - if (had_failures(req, error)) { - dm_pg_init_complete(h->path, MP_FAIL_PATH); - goto done; - } - - sp = &h->inq.c4; - - h->ctlr = get_controller(sp->subsys_id, sp->slot_id); - - if (h->ctlr) { - h->cmd_to_send = SEND_C9_INQUIRY; - queue_work(rdac_wkqd, &h->work); - } else - dm_pg_init_complete(h->path, MP_FAIL_PATH); -done: - __blk_put_request(req->q, req); -} - -static void c2_endio(struct request *req, int error) -{ - struct rdac_handler *h = req->end_io_data; - struct c2_inquiry *sp; - - if (had_failures(req, error)) { - dm_pg_init_complete(h->path, MP_FAIL_PATH); - goto done; - } - - sp = &h->inq.c2; - - /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */ - if (sp->max_lun_supported >= MODE6_MAX_LUN) - h->ctlr->use_10_ms = 1; - else - h->ctlr->use_10_ms = 0; - - h->cmd_to_send = SEND_MODE_SELECT; - queue_work(rdac_wkqd, &h->work); -done: - __blk_put_request(req->q, req); -} - -static void c9_endio(struct request *req, int error) -{ - struct rdac_handler *h = req->end_io_data; - struct c9_inquiry *sp; - - if (had_failures(req, error)) { - dm_pg_init_complete(h->path, MP_FAIL_PATH); - goto done; - } - - /* We need to look at the sense keys here to take clear action. - * For now simple logic: If the host is in AVT mode or if controller - * owns the lun, return dm_pg_init_complete(), otherwise submit - * MODE SELECT. - */ - sp = &h->inq.c9; - - /* If in AVT mode, return success */ - if ((sp->avte_cvp >> 7) == 0x1) { - dm_pg_init_complete(h->path, 0); - goto done; - } - - /* If the controller on this path owns the LUN, return success */ - if (sp->avte_cvp & 0x1) { - dm_pg_init_complete(h->path, 0); - goto done; - } - - if (h->ctlr) { - if (h->ctlr->use_10_ms == -1) - h->cmd_to_send = SEND_C2_INQUIRY; - else - h->cmd_to_send = SEND_MODE_SELECT; - } else - h->cmd_to_send = SEND_C4_INQUIRY; - queue_work(rdac_wkqd, &h->work); -done: - __blk_put_request(req->q, req); -} - -static void c8_endio(struct request *req, int error) -{ - struct rdac_handler *h = req->end_io_data; - struct c8_inquiry *sp; - - if (had_failures(req, error)) { - dm_pg_init_complete(h->path, MP_FAIL_PATH); - goto done; - } - - /* We need to look at the sense keys here to take clear action. - * For now simple logic: Get the lun from the inquiry page. - */ - sp = &h->inq.c8; - h->lun = sp->lun[7]; /* currently it uses only one byte */ - h->cmd_to_send = SEND_C9_INQUIRY; - queue_work(rdac_wkqd, &h->work); -done: - __blk_put_request(req->q, req); -} - -static void submit_inquiry(struct rdac_handler *h, int page_code, - unsigned int len, rq_end_io_fn endio) -{ - struct request *rq; - struct request_queue *q = bdev_get_queue(h->path->dev->bdev); - - if (!q) - goto fail_path; - - rq = get_rdac_req(h, &h->inq, len, READ); - if (!rq) - goto fail_path; - - /* Prepare the command. */ - rq->cmd[0] = INQUIRY; - rq->cmd[1] = 1; - rq->cmd[2] = page_code; - rq->cmd[4] = len; - rq->cmd_len = COMMAND_SIZE(INQUIRY); - blk_execute_rq_nowait(q, NULL, rq, 1, endio); - return; - -fail_path: - dm_pg_init_complete(h->path, MP_FAIL_PATH); -} - -static void service_wkq(struct work_struct *work) -{ - struct rdac_handler *h = container_of(work, struct rdac_handler, work); - - switch (h->cmd_to_send) { - case SEND_C2_INQUIRY: - submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio); - break; - case SEND_C4_INQUIRY: - submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio); - break; - case SEND_C8_INQUIRY: - submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); - break; - case SEND_C9_INQUIRY: - submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); - break; - case SEND_MODE_SELECT: - submit_mode_select(h); - break; - default: - BUG(); - } -} -/* - * only support subpage2c until we confirm that this is just a matter of - * of updating firmware or not, and RDAC (basic AVT works already) for now - * but we can add these in in when we get time and testers - */ -static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv) -{ - struct rdac_handler *h; - unsigned timeout; - - if (argc == 0) { - /* No arguments: use defaults */ - timeout = RDAC_FAILOVER_TIMEOUT; - } else if (argc != 1) { - DMWARN("incorrect number of arguments"); - return -EINVAL; - } else { - if (sscanf(argv[1], "%u", &timeout) != 1) { - DMWARN("invalid timeout value"); - return -EINVAL; - } - } - - h = kzalloc(sizeof(*h), GFP_KERNEL); - if (!h) - return -ENOMEM; - - hwh->context = h; - h->timeout = timeout; - h->lun = UNINITIALIZED_LUN; - INIT_WORK(&h->work, service_wkq); - DMWARN("using RDAC command with timeout %u", h->timeout); - - return 0; -} - -static void rdac_destroy(struct hw_handler *hwh) -{ - struct rdac_handler *h = hwh->context; - - if (h->ctlr) - kref_put(&h->ctlr->kref, release_ctlr); - kfree(h); - hwh->context = NULL; -} - -static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio) -{ - /* Try default handler */ - return dm_scsi_err_handler(hwh, bio); -} - -static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed, - struct dm_path *path) -{ - struct rdac_handler *h = hwh->context; - - h->path = path; - switch (h->lun) { - case UNINITIALIZED_LUN: - submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio); - break; - default: - submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio); - } -} - -static struct hw_handler_type rdac_handler = { - .name = RDAC_DM_HWH_NAME, - .module = THIS_MODULE, - .create = rdac_create, - .destroy = rdac_destroy, - .pg_init = rdac_pg_init, - .error = rdac_error, -}; - -static int __init rdac_init(void) -{ - int r; - - rdac_wkqd = create_singlethread_workqueue("rdac_wkqd"); - if (!rdac_wkqd) { - DMERR("Failed to create workqueue rdac_wkqd."); - return -ENOMEM; - } - - r = dm_register_hw_handler(&rdac_handler); - if (r < 0) { - DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r); - destroy_workqueue(rdac_wkqd); - return r; - } - - DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER); - return 0; -} - -static void __exit rdac_exit(void) -{ - int r = dm_unregister_hw_handler(&rdac_handler); - - destroy_workqueue(rdac_wkqd); - if (r < 0) - DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r); -} - -module_init(rdac_init); -module_exit(rdac_exit); - -MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support"); -MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(RDAC_DM_HWH_VER); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e7ee59e655d5..9f7302d4878d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -7,7 +7,6 @@ #include "dm.h" #include "dm-path-selector.h" -#include "dm-hw-handler.h" #include "dm-bio-list.h" #include "dm-bio-record.h" #include "dm-uevent.h" @@ -20,6 +19,7 @@ #include <linux/slab.h> #include <linux/time.h> #include <linux/workqueue.h> +#include <scsi/scsi_dh.h> #include <asm/atomic.h> #define DM_MSG_PREFIX "multipath" @@ -61,7 +61,8 @@ struct multipath { spinlock_t lock; - struct hw_handler hw_handler; + const char *hw_handler_name; + struct work_struct activate_path; unsigned nr_priority_groups; struct list_head priority_groups; unsigned pg_init_required; /* pg_init needs calling? */ @@ -106,9 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath); static struct kmem_cache *_mpio_cache; -static struct workqueue_struct *kmultipathd; +static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); +static void activate_path(struct work_struct *work); /*----------------------------------------------- @@ -178,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) m->queue_io = 1; INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); + INIT_WORK(&m->activate_path, activate_path); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); if (!m->mpio_pool) { kfree(m); @@ -193,18 +196,13 @@ static struct multipath *alloc_multipath(struct dm_target *ti) static void free_multipath(struct multipath *m) { struct priority_group *pg, *tmp; - struct hw_handler *hwh = &m->hw_handler; list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { list_del(&pg->list); free_priority_group(pg, m->ti); } - if (hwh->type) { - hwh->type->destroy(hwh); - dm_put_hw_handler(hwh->type); - } - + kfree(m->hw_handler_name); mempool_destroy(m->mpio_pool); kfree(m); } @@ -216,12 +214,10 @@ static void free_multipath(struct multipath *m) static void __switch_pg(struct multipath *m, struct pgpath *pgpath) { - struct hw_handler *hwh = &m->hw_handler; - m->current_pg = pgpath->pg; /* Must we initialise the PG first, and queue I/O till it's ready? */ - if (hwh->type && hwh->type->pg_init) { + if (m->hw_handler_name) { m->pg_init_required = 1; m->queue_io = 1; } else { @@ -409,7 +405,6 @@ static void process_queued_ios(struct work_struct *work) { struct multipath *m = container_of(work, struct multipath, process_queued_ios); - struct hw_handler *hwh = &m->hw_handler; struct pgpath *pgpath = NULL; unsigned init_required = 0, must_queue = 1; unsigned long flags; @@ -439,7 +434,7 @@ out: spin_unlock_irqrestore(&m->lock, flags); if (init_required) - hwh->type->pg_init(hwh, pgpath->pg->bypassed, &pgpath->path); + queue_work(kmpath_handlerd, &m->activate_path); if (!must_queue) dispatch_queued_ios(m); @@ -652,8 +647,6 @@ static struct priority_group *parse_priority_group(struct arg_set *as, static int parse_hw_handler(struct arg_set *as, struct multipath *m) { - int r; - struct hw_handler_type *hwht; unsigned hw_argc; struct dm_target *ti = m->ti; @@ -661,30 +654,20 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) {0, 1024, "invalid number of hardware handler args"}, }; - r = read_param(_params, shift(as), &hw_argc, &ti->error); - if (r) + if (read_param(_params, shift(as), &hw_argc, &ti->error)) return -EINVAL; if (!hw_argc) return 0; - hwht = dm_get_hw_handler(shift(as)); - if (!hwht) { + m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); + request_module("scsi_dh_%s", m->hw_handler_name); + if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; + kfree(m->hw_handler_name); + m->hw_handler_name = NULL; return -EINVAL; } - - m->hw_handler.md = dm_table_get_md(ti->table); - dm_put(m->hw_handler.md); - - r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); - if (r) { - dm_put_hw_handler(hwht); - ti->error = "hardware handler constructor failed"; - return r; - } - - m->hw_handler.type = hwht; consume(as, hw_argc - 1); return 0; @@ -808,6 +791,7 @@ static void multipath_dtr(struct dm_target *ti) { struct multipath *m = (struct multipath *) ti->private; + flush_workqueue(kmpath_handlerd); flush_workqueue(kmultipathd); free_multipath(m); } @@ -1025,52 +1009,85 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) return limit_reached; } -/* - * pg_init must call this when it has completed its initialisation - */ -void dm_pg_init_complete(struct dm_path *path, unsigned err_flags) +static void pg_init_done(struct dm_path *path, int errors) { struct pgpath *pgpath = path_to_pgpath(path); struct priority_group *pg = pgpath->pg; struct multipath *m = pg->m; unsigned long flags; - /* - * If requested, retry pg_init until maximum number of retries exceeded. - * If retry not requested and PG already bypassed, always fail the path. - */ - if (err_flags & MP_RETRY) { - if (pg_init_limit_reached(m, pgpath)) - err_flags |= MP_FAIL_PATH; - } else if (err_flags && pg->bypassed) - err_flags |= MP_FAIL_PATH; - - if (err_flags & MP_FAIL_PATH) + /* device or driver problems */ + switch (errors) { + case SCSI_DH_OK: + break; + case SCSI_DH_NOSYS: + if (!m->hw_handler_name) { + errors = 0; + break; + } + DMERR("Cannot failover device because scsi_dh_%s was not " + "loaded.", m->hw_handler_name); + /* + * Fail path for now, so we do not ping pong + */ fail_path(pgpath); - - if (err_flags & MP_BYPASS_PG) + break; + case SCSI_DH_DEV_TEMP_BUSY: + /* + * Probably doing something like FW upgrade on the + * controller so try the other pg. + */ bypass_pg(m, pg, 1); + break; + /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */ + case SCSI_DH_RETRY: + case SCSI_DH_IMM_RETRY: + case SCSI_DH_RES_TEMP_UNAVAIL: + if (pg_init_limit_reached(m, pgpath)) + fail_path(pgpath); + errors = 0; + break; + default: + /* + * We probably do not want to fail the path for a device + * error, but this is what the old dm did. In future + * patches we can do more advanced handling. + */ + fail_path(pgpath); + } spin_lock_irqsave(&m->lock, flags); - if (err_flags & ~MP_RETRY) { + if (errors) { + DMERR("Could not failover device. Error %d.", errors); m->current_pgpath = NULL; m->current_pg = NULL; - } else if (!m->pg_init_required) + } else if (!m->pg_init_required) { m->queue_io = 0; + pg->bypassed = 0; + } m->pg_init_in_progress = 0; queue_work(kmultipathd, &m->process_queued_ios); spin_unlock_irqrestore(&m->lock, flags); } +static void activate_path(struct work_struct *work) +{ + int ret; + struct multipath *m = + container_of(work, struct multipath, activate_path); + struct dm_path *path = &m->current_pgpath->path; + + ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); + pg_init_done(path, ret); +} + /* * end_io handling */ static int do_end_io(struct multipath *m, struct bio *bio, int error, struct dm_mpath_io *mpio) { - struct hw_handler *hwh = &m->hw_handler; - unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ unsigned long flags; if (!error) @@ -1097,19 +1114,8 @@ static int do_end_io(struct multipath *m, struct bio *bio, } spin_unlock_irqrestore(&m->lock, flags); - if (hwh->type && hwh->type->error) - err_flags = hwh->type->error(hwh, bio); - - if (mpio->pgpath) { - if (err_flags & MP_FAIL_PATH) - fail_path(mpio->pgpath); - - if (err_flags & MP_BYPASS_PG) - bypass_pg(m, mpio->pgpath->pg, 1); - } - - if (err_flags & MP_ERROR_IO) - return -EIO; + if (mpio->pgpath) + fail_path(mpio->pgpath); requeue: dm_bio_restore(&mpio->details, bio); @@ -1194,7 +1200,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type, int sz = 0; unsigned long flags; struct multipath *m = (struct multipath *) ti->private; - struct hw_handler *hwh = &m->hw_handler; struct priority_group *pg; struct pgpath *p; unsigned pg_num; @@ -1214,12 +1219,10 @@ static int multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("pg_init_retries %u ", m->pg_init_retries); } - if (hwh->type && hwh->type->status) - sz += hwh->type->status(hwh, type, result + sz, maxlen - sz); - else if (!hwh->type || type == STATUSTYPE_INFO) + if (!m->hw_handler_name || type == STATUSTYPE_INFO) DMEMIT("0 "); else - DMEMIT("1 %s ", hwh->type->name); + DMEMIT("1 %s ", m->hw_handler_name); DMEMIT("%u ", m->nr_priority_groups); @@ -1422,6 +1425,21 @@ static int __init dm_multipath_init(void) return -ENOMEM; } + /* + * A separate workqueue is used to handle the device handlers + * to avoid overloading existing workqueue. Overloading the + * old workqueue would also create a bottleneck in the + * path of the storage hardware device activation. + */ + kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); + if (!kmpath_handlerd) { + DMERR("failed to create workqueue kmpath_handlerd"); + destroy_workqueue(kmultipathd); + dm_unregister_target(&multipath_target); + kmem_cache_destroy(_mpio_cache); + return -ENOMEM; + } + DMINFO("version %u.%u.%u loaded", multipath_target.version[0], multipath_target.version[1], multipath_target.version[2]); @@ -1433,6 +1451,7 @@ static void __exit dm_multipath_exit(void) { int r; + destroy_workqueue(kmpath_handlerd); destroy_workqueue(kmultipathd); r = dm_unregister_target(&multipath_target); @@ -1441,8 +1460,6 @@ static void __exit dm_multipath_exit(void) kmem_cache_destroy(_mpio_cache); } -EXPORT_SYMBOL_GPL(dm_pg_init_complete); - module_init(dm_multipath_init); module_exit(dm_multipath_exit); diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h index b9cdcbb3ed59..c198b856a452 100644 --- a/drivers/md/dm-mpath.h +++ b/drivers/md/dm-mpath.h @@ -16,7 +16,6 @@ struct dm_path { unsigned is_active; /* Read-only */ void *pscontext; /* For path-selector use */ - void *hwhcontext; /* For hw-handler use */ }; /* Callback for hwh_pg_init_fn to use when complete */ diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h index 1acbdd61b670..10b6ef758725 100644 --- a/drivers/message/fusion/lsi/mpi.h +++ b/drivers/message/fusion/lsi/mpi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 LSI Corporation. + * Copyright (c) 2000-2008 LSI Corporation. * * * Name: mpi.h diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h index 2bd8adae0f00..b2db3330c591 100644 --- a/drivers/message/fusion/lsi/mpi_cnfg.h +++ b/drivers/message/fusion/lsi/mpi_cnfg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 LSI Corporation. + * Copyright (c) 2000-2008 LSI Corporation. * * * Name: mpi_cnfg.h diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index d40d6d15ae20..75e599b85b64 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -5,7 +5,7 @@ * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ @@ -103,7 +103,7 @@ static int mfcounter = 0; * Public data... */ -struct proc_dir_entry *mpt_proc_root_dir; +static struct proc_dir_entry *mpt_proc_root_dir; #define WHOINIT_UNKNOWN 0xAA @@ -253,6 +253,55 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass) return 0; } +/** + * mpt_fault_reset_work - work performed on workq after ioc fault + * @work: input argument, used to derive ioc + * +**/ +static void +mpt_fault_reset_work(struct work_struct *work) +{ + MPT_ADAPTER *ioc = + container_of(work, MPT_ADAPTER, fault_reset_work.work); + u32 ioc_raw_state; + int rc; + unsigned long flags; + + if (ioc->diagPending || !ioc->active) + goto out; + + ioc_raw_state = mpt_GetIocState(ioc, 0); + if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { + printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n", + ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); + printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", + ioc->name, __FUNCTION__); + rc = mpt_HardResetHandler(ioc, CAN_SLEEP); + printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name, + __FUNCTION__, (rc == 0) ? "success" : "failed"); + ioc_raw_state = mpt_GetIocState(ioc, 0); + if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) + printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " + "reset (%04xh)\n", ioc->name, ioc_raw_state & + MPI_DOORBELL_DATA_MASK); + } + + out: + /* + * Take turns polling alternate controller + */ + if (ioc->alt_ioc) + ioc = ioc->alt_ioc; + + /* rearm the timer */ + spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); + if (ioc->reset_work_q) + queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, + msecs_to_jiffies(MPT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); +} + + /* * Process turbo (context) reply... */ @@ -1616,6 +1665,22 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) /* Find lookup slot. */ INIT_LIST_HEAD(&ioc->list); + + /* Initialize workqueue */ + INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); + spin_lock_init(&ioc->fault_reset_work_lock); + + snprintf(ioc->reset_work_q_name, KOBJ_NAME_LEN, "mpt_poll_%d", ioc->id); + ioc->reset_work_q = + create_singlethread_workqueue(ioc->reset_work_q_name); + if (!ioc->reset_work_q) { + printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", + ioc->name); + pci_release_selected_regions(pdev, ioc->bars); + kfree(ioc); + return -ENOMEM; + } + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n", ioc->name, &ioc->facts, &ioc->pfacts[0])); @@ -1727,6 +1792,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) iounmap(ioc->memmap); if (r != -5) pci_release_selected_regions(pdev, ioc->bars); + + destroy_workqueue(ioc->reset_work_q); + ioc->reset_work_q = NULL; + kfree(ioc); pci_set_drvdata(pdev, NULL); return r; @@ -1759,6 +1828,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) } #endif + if (!ioc->alt_ioc) + queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, + msecs_to_jiffies(MPT_POLLING_INTERVAL)); + return 0; } @@ -1774,6 +1847,19 @@ mpt_detach(struct pci_dev *pdev) MPT_ADAPTER *ioc = pci_get_drvdata(pdev); char pname[32]; u8 cb_idx; + unsigned long flags; + struct workqueue_struct *wq; + + /* + * Stop polling ioc for fault condition + */ + spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); + wq = ioc->reset_work_q; + ioc->reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); + cancel_delayed_work(&ioc->fault_reset_work); + destroy_workqueue(wq); + sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); remove_proc_entry(pname, NULL); @@ -7456,7 +7542,6 @@ EXPORT_SYMBOL(mpt_resume); EXPORT_SYMBOL(mpt_suspend); #endif EXPORT_SYMBOL(ioc_list); -EXPORT_SYMBOL(mpt_proc_root_dir); EXPORT_SYMBOL(mpt_register); EXPORT_SYMBOL(mpt_deregister); EXPORT_SYMBOL(mpt_event_register); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index a8f617447d22..6adab648dbb9 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -5,7 +5,7 @@ * LSIFC9xx/LSI409xx Fibre Channel * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ @@ -73,11 +73,11 @@ #endif #ifndef COPYRIGHT -#define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR +#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.06" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.06" +#define MPT_LINUX_VERSION_COMMON "3.04.07" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -176,6 +176,8 @@ /* debug print string length used for events and iocstatus */ # define EVENT_DESCR_STR_SZ 100 +#define MPT_POLLING_INTERVAL 1000 /* in milliseconds */ + #ifdef __KERNEL__ /* { */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -709,6 +711,12 @@ typedef struct _MPT_ADAPTER struct workqueue_struct *fc_rescan_work_q; struct scsi_cmnd **ScsiLookup; spinlock_t scsi_lookup_lock; + + char reset_work_q_name[KOBJ_NAME_LEN]; + struct workqueue_struct *reset_work_q; + struct delayed_work fault_reset_work; + spinlock_t fault_reset_work_lock; + } MPT_ADAPTER; /* @@ -919,7 +927,6 @@ extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhys * Public data decl's... */ extern struct list_head ioc_list; -extern struct proc_dir_entry *mpt_proc_root_dir; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #endif /* } __KERNEL__ */ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index c5946560c4e2..a5920423e2b2 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -4,7 +4,7 @@ * For use with LSI PCI chip/adapters * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ @@ -66,7 +66,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> -#define COPYRIGHT "Copyright (c) 1999-2007 LSI Corporation" +#define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation" #define MODULEAUTHOR "LSI Corporation" #include "mptbase.h" #include "mptctl.h" diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h index 2c1890127e15..d564cc9ada6a 100644 --- a/drivers/message/fusion/mptctl.h +++ b/drivers/message/fusion/mptctl.h @@ -5,7 +5,7 @@ * LSIFC9xx/LSI409xx Fibre Channel * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h index ffdb0a6191b4..510b9f492093 100644 --- a/drivers/message/fusion/mptdebug.h +++ b/drivers/message/fusion/mptdebug.h @@ -3,7 +3,7 @@ * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 1e24ab4ac38c..fc31ca6829d8 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -3,7 +3,7 @@ * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 7950fc678ed1..d709d92b7b30 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -4,7 +4,7 @@ * For use with LSI Fibre Channel PCI chip/adapters * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 2000-2007 LSI Corporation + * Copyright (c) 2000-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h index bafb67fc8181..33927ee7dc3b 100644 --- a/drivers/message/fusion/mptlan.h +++ b/drivers/message/fusion/mptlan.h @@ -4,7 +4,7 @@ * For use with LSI Fibre Channel PCI chip/adapters * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 2000-2007 LSI Corporation + * Copyright (c) 2000-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 4d492ba232b0..b1147aa7afde 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -3,7 +3,7 @@ * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h index 7c150f50629a..2b544e0877e6 100644 --- a/drivers/message/fusion/mptsas.h +++ b/drivers/message/fusion/mptsas.h @@ -5,7 +5,7 @@ * LSIFC9xx/LSI409xx Fibre Channel * running LSI MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index c68ef00c2f92..d142b6b4b976 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -3,7 +3,7 @@ * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 7ea7da0e090c..319aa3033371 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -5,7 +5,7 @@ * LSIFC9xx/LSI409xx Fibre Channel * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 1effca4e40e1..61620144e49c 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -3,7 +3,7 @@ * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * - * Copyright (c) 1999-2007 LSI Corporation + * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ @@ -447,6 +447,7 @@ static int mptspi_target_alloc(struct scsi_target *starget) spi_max_offset(starget) = ioc->spi_data.maxSyncOffset; spi_offset(starget) = 0; + spi_period(starget) = 0xFF; mptspi_write_width(starget, 0); return 0; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 636af2862308..1921b8dbb242 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -179,17 +179,29 @@ config FUJITSU_LAPTOP tristate "Fujitsu Laptop Extras" depends on X86 depends on ACPI + depends on INPUT depends on BACKLIGHT_CLASS_DEVICE ---help--- This is a driver for laptops built by Fujitsu: * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks * Possibly other Fujitsu laptop models + * Tested with S6410 and S7020 - It adds support for LCD brightness control. + It adds support for LCD brightness control and some hotkeys. If you have a Fujitsu laptop, say Y or M here. +config FUJITSU_LAPTOP_DEBUG + bool "Verbose debug mode for Fujitsu Laptop Extras" + depends on FUJITSU_LAPTOP + default n + ---help--- + Enables extra debug output from the fujitsu extras driver, at the + expense of a slight increase in driver size. + + If you are not sure, say N here. + config TC1100_WMI tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)" depends on X86 && !X86_64 @@ -219,6 +231,23 @@ config MSI_LAPTOP If you have an MSI S270 laptop, say Y or M here. +config COMPAL_LAPTOP + tristate "Compal Laptop Extras" + depends on X86 + depends on ACPI_EC + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This is a driver for laptops built by Compal: + + Compal FL90/IFL90 + Compal FL91/IFL91 + Compal FL92/JFL92 + Compal FT00/IFT00 + + It adds support for Bluetooth, WLAN and LCD brightness control. + + If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here. + config SONY_LAPTOP tristate "Sony Laptop Extras" depends on X86 && ACPI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 1952875a272e..a6dac6a2e7e5 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -5,10 +5,11 @@ obj- := misc.o # Dummy rule to force built-in.o to be made obj-$(CONFIG_IBM_ASM) += ibmasm/ obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ -obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o -obj-$(CONFIG_ACER_WMI) += acer-wmi.o obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o +obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o +obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o +obj-$(CONFIG_ACER_WMI) += acer-wmi.o obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c index dd13a3749927..e7a3fe508dff 100644 --- a/drivers/misc/acer-wmi.c +++ b/drivers/misc/acer-wmi.c @@ -22,18 +22,18 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define ACER_WMI_VERSION "0.1" - #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/dmi.h> +#include <linux/fb.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/i8042.h> +#include <linux/debugfs.h> #include <acpi/acpi_drivers.h> @@ -87,6 +87,7 @@ struct acer_quirks { * Acer ACPI method GUIDs */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" +#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" #define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" @@ -150,6 +151,12 @@ struct acer_data { int brightness; }; +struct acer_debug { + struct dentry *root; + struct dentry *devices; + u32 wmid_devices; +}; + /* Each low-level interface must define at least some of the following */ struct wmi_interface { /* The WMI device type */ @@ -160,6 +167,9 @@ struct wmi_interface { /* Private data for the current interface */ struct acer_data data; + + /* debugfs entries associated with this interface */ + struct acer_debug debug; }; /* The static interface pointer, points to the currently detected interface */ @@ -174,7 +184,7 @@ static struct wmi_interface *interface; struct quirk_entry { u8 wireless; u8 mailled; - u8 brightness; + s8 brightness; u8 bluetooth; }; @@ -198,6 +208,10 @@ static int dmi_matched(const struct dmi_system_id *dmi) static struct quirk_entry quirk_unknown = { }; +static struct quirk_entry quirk_acer_aspire_1520 = { + .brightness = -1, +}; + static struct quirk_entry quirk_acer_travelmate_2490 = { .mailled = 1, }; @@ -207,9 +221,31 @@ static struct quirk_entry quirk_medion_md_98300 = { .wireless = 1, }; +static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { + .wireless = 2, +}; + static struct dmi_system_id acer_quirks[] = { { .callback = dmi_matched, + .ident = "Acer Aspire 1360", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), + }, + .driver_data = &quirk_acer_aspire_1520, + }, + { + .callback = dmi_matched, + .ident = "Acer Aspire 1520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"), + }, + .driver_data = &quirk_acer_aspire_1520, + }, + { + .callback = dmi_matched, .ident = "Acer Aspire 3100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -300,6 +336,15 @@ static struct dmi_system_id acer_quirks[] = { }, { .callback = dmi_matched, + .ident = "Fujitsu Siemens Amilo Li 1718", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"), + }, + .driver_data = &quirk_fujitsu_amilo_li_1718, + }, + { + .callback = dmi_matched, .ident = "Medion MD 98300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), @@ -393,6 +438,12 @@ struct wmi_interface *iface) return AE_ERROR; *value = result & 0x1; return AE_OK; + case 2: + err = ec_read(0x71, &result); + if (err) + return AE_ERROR; + *value = result & 0x1; + return AE_OK; default: err = ec_read(0xA, &result); if (err) @@ -506,6 +557,15 @@ static acpi_status AMW0_set_capabilities(void) struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; + /* + * On laptops with this strange GUID (non Acer), normal probing doesn't + * work. + */ + if (wmi_has_guid(AMW0_GUID2)) { + interface->capability |= ACER_CAP_WIRELESS; + return AE_OK; + } + args.eax = ACER_AMW0_WRITE; args.ecx = args.edx = 0; @@ -552,7 +612,8 @@ static acpi_status AMW0_set_capabilities(void) * appear to use the same EC register for brightness, even if they * differ for wireless, etc */ - interface->capability |= ACER_CAP_BRIGHTNESS; + if (quirks->brightness >= 0) + interface->capability |= ACER_CAP_BRIGHTNESS; return AE_OK; } @@ -807,7 +868,15 @@ static int read_brightness(struct backlight_device *bd) static int update_bl_status(struct backlight_device *bd) { - set_u32(bd->props.brightness, ACER_CAP_BRIGHTNESS); + int intensity = bd->props.brightness; + + if (bd->props.power != FB_BLANK_UNBLANK) + intensity = 0; + if (bd->props.fb_blank != FB_BLANK_UNBLANK) + intensity = 0; + + set_u32(intensity, ACER_CAP_BRIGHTNESS); + return 0; } @@ -829,8 +898,9 @@ static int __devinit acer_backlight_init(struct device *dev) acer_backlight_device = bd; + bd->props.power = FB_BLANK_UNBLANK; + bd->props.brightness = max_brightness; bd->props.max_brightness = max_brightness; - bd->props.brightness = read_brightness(NULL); backlight_update_status(bd); return 0; } @@ -894,6 +964,28 @@ static DEVICE_ATTR(interface, S_IWUGO | S_IRUGO | S_IWUSR, show_interface, NULL); /* + * debugfs functions + */ +static u32 get_wmid_devices(void) +{ + struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *obj; + acpi_status status; + + status = wmi_query_block(WMID_GUID2, 1, &out); + if (ACPI_FAILURE(status)) + return 0; + + obj = (union acpi_object *) out.pointer; + if (obj && obj->type == ACPI_TYPE_BUFFER && + obj->buffer.length == sizeof(u32)) { + return *((u32 *) obj->buffer.pointer); + } else { + return 0; + } +} + +/* * Platform device */ static int __devinit acer_platform_probe(struct platform_device *device) @@ -1052,12 +1144,40 @@ error_sysfs: return retval; } +static void remove_debugfs(void) +{ + debugfs_remove(interface->debug.devices); + debugfs_remove(interface->debug.root); +} + +static int create_debugfs(void) +{ + interface->debug.root = debugfs_create_dir("acer-wmi", NULL); + if (!interface->debug.root) { + printk(ACER_ERR "Failed to create debugfs directory"); + return -ENOMEM; + } + + interface->debug.devices = debugfs_create_u32("devices", S_IRUGO, + interface->debug.root, + &interface->debug.wmid_devices); + if (!interface->debug.devices) + goto error_debugfs; + + return 0; + +error_debugfs: + remove_debugfs(); + return -ENOMEM; +} + static int __init acer_wmi_init(void) { int err; - printk(ACER_INFO "Acer Laptop ACPI-WMI Extras version %s\n", - ACER_WMI_VERSION); + printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); + + find_quirks(); /* * Detect which ACPI-WMI interface we're using. @@ -1092,8 +1212,6 @@ static int __init acer_wmi_init(void) if (wmi_has_guid(AMW0_GUID1)) AMW0_find_mailled(); - find_quirks(); - if (!interface) { printk(ACER_ERR "No or unsupported WMI interface, unable to " "load\n"); @@ -1111,6 +1229,13 @@ static int __init acer_wmi_init(void) if (err) return err; + if (wmi_has_guid(WMID_GUID2)) { + interface->debug.wmid_devices = get_wmid_devices(); + err = create_debugfs(); + if (err) + return err; + } + /* Override any initial settings with values from the commandline */ acer_commandline_init(); diff --git a/drivers/misc/compal-laptop.c b/drivers/misc/compal-laptop.c new file mode 100644 index 000000000000..344b790a6253 --- /dev/null +++ b/drivers/misc/compal-laptop.c @@ -0,0 +1,404 @@ +/*-*-linux-c-*-*/ + +/* + Copyright (C) 2008 Cezary Jackiewicz <cezary.jackiewicz (at) gmail.com> + + based on MSI driver + + Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + */ + +/* + * comapl-laptop.c - Compal laptop support. + * + * This driver exports a few files in /sys/devices/platform/compal-laptop/: + * + * wlan - wlan subsystem state: contains 0 or 1 (rw) + * + * bluetooth - Bluetooth subsystem state: contains 0 or 1 (rw) + * + * raw - raw value taken from embedded controller register (ro) + * + * In addition to these platform device attributes the driver + * registers itself in the Linux backlight control subsystem and is + * available to userspace under /sys/class/backlight/compal-laptop/. + * + * This driver might work on other laptops produced by Compal. If you + * want to try it you can pass force=1 as argument to the module which + * will force it to load even when the DMI data doesn't identify the + * laptop as FL9x. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/backlight.h> +#include <linux/platform_device.h> +#include <linux/autoconf.h> + +#define COMPAL_DRIVER_VERSION "0.2.6" + +#define COMPAL_LCD_LEVEL_MAX 8 + +#define COMPAL_EC_COMMAND_WIRELESS 0xBB +#define COMPAL_EC_COMMAND_LCD_LEVEL 0xB9 + +#define KILLSWITCH_MASK 0x10 +#define WLAN_MASK 0x01 +#define BT_MASK 0x02 + +static int force; +module_param(force, bool, 0); +MODULE_PARM_DESC(force, "Force driver load, ignore DMI data"); + +/* Hardware access */ + +static int set_lcd_level(int level) +{ + if (level < 0 || level >= COMPAL_LCD_LEVEL_MAX) + return -EINVAL; + + ec_write(COMPAL_EC_COMMAND_LCD_LEVEL, level); + + return 0; +} + +static int get_lcd_level(void) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_LCD_LEVEL, &result); + + return (int) result; +} + +static int set_wlan_state(int state) +{ + u8 result, value; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if ((result & KILLSWITCH_MASK) == 0) + return -EINVAL; + else { + if (state) + value = (u8) (result | WLAN_MASK); + else + value = (u8) (result & ~WLAN_MASK); + ec_write(COMPAL_EC_COMMAND_WIRELESS, value); + } + + return 0; +} + +static int set_bluetooth_state(int state) +{ + u8 result, value; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if ((result & KILLSWITCH_MASK) == 0) + return -EINVAL; + else { + if (state) + value = (u8) (result | BT_MASK); + else + value = (u8) (result & ~BT_MASK); + ec_write(COMPAL_EC_COMMAND_WIRELESS, value); + } + + return 0; +} + +static int get_wireless_state(int *wlan, int *bluetooth) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + if (wlan) { + if ((result & KILLSWITCH_MASK) == 0) + *wlan = 0; + else + *wlan = result & WLAN_MASK; + } + + if (bluetooth) { + if ((result & KILLSWITCH_MASK) == 0) + *bluetooth = 0; + else + *bluetooth = (result & BT_MASK) >> 1; + } + + return 0; +} + +/* Backlight device stuff */ + +static int bl_get_brightness(struct backlight_device *b) +{ + return get_lcd_level(); +} + + +static int bl_update_status(struct backlight_device *b) +{ + return set_lcd_level(b->props.brightness); +} + +static struct backlight_ops compalbl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, +}; + +static struct backlight_device *compalbl_device; + +/* Platform device */ + +static ssize_t show_wlan(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret, enabled; + + ret = get_wireless_state(&enabled, NULL); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t show_raw(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 result; + + ec_read(COMPAL_EC_COMMAND_WIRELESS, &result); + + return sprintf(buf, "%i\n", result); +} + +static ssize_t show_bluetooth(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret, enabled; + + ret = get_wireless_state(NULL, &enabled); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", enabled); +} + +static ssize_t store_wlan_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state, ret; + + if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1)) + return -EINVAL; + + ret = set_wlan_state(state); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t store_bluetooth_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state, ret; + + if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1)) + return -EINVAL; + + ret = set_bluetooth_state(state); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(bluetooth, 0644, show_bluetooth, store_bluetooth_state); +static DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan_state); +static DEVICE_ATTR(raw, 0444, show_raw, NULL); + +static struct attribute *compal_attributes[] = { + &dev_attr_bluetooth.attr, + &dev_attr_wlan.attr, + &dev_attr_raw.attr, + NULL +}; + +static struct attribute_group compal_attribute_group = { + .attrs = compal_attributes +}; + +static struct platform_driver compal_driver = { + .driver = { + .name = "compal-laptop", + .owner = THIS_MODULE, + } +}; + +static struct platform_device *compal_device; + +/* Initialization */ + +static int dmi_check_cb(const struct dmi_system_id *id) +{ + printk(KERN_INFO "compal-laptop: Identified laptop model '%s'.\n", + id->ident); + + return 0; +} + +static struct dmi_system_id __initdata compal_dmi_table[] = { + { + .ident = "FL90/IFL90", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL90"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL90/IFL90", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL90"), + DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL91/IFL91", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFL91"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FL92/JFL92", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "JFL92"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { + .ident = "FT00/IFT00", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "IFT00"), + DMI_MATCH(DMI_BOARD_VERSION, "IFT00"), + }, + .callback = dmi_check_cb + }, + { } +}; + +static int __init compal_init(void) +{ + int ret; + + if (acpi_disabled) + return -ENODEV; + + if (!force && !dmi_check_system(compal_dmi_table)) + return -ENODEV; + + /* Register backlight stuff */ + + compalbl_device = backlight_device_register("compal-laptop", NULL, NULL, + &compalbl_ops); + if (IS_ERR(compalbl_device)) + return PTR_ERR(compalbl_device); + + compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1; + + ret = platform_driver_register(&compal_driver); + if (ret) + goto fail_backlight; + + /* Register platform stuff */ + + compal_device = platform_device_alloc("compal-laptop", -1); + if (!compal_device) { + ret = -ENOMEM; + goto fail_platform_driver; + } + + ret = platform_device_add(compal_device); + if (ret) + goto fail_platform_device1; + + ret = sysfs_create_group(&compal_device->dev.kobj, + &compal_attribute_group); + if (ret) + goto fail_platform_device2; + + printk(KERN_INFO "compal-laptop: driver "COMPAL_DRIVER_VERSION + " successfully loaded.\n"); + + return 0; + +fail_platform_device2: + + platform_device_del(compal_device); + +fail_platform_device1: + + platform_device_put(compal_device); + +fail_platform_driver: + + platform_driver_unregister(&compal_driver); + +fail_backlight: + + backlight_device_unregister(compalbl_device); + + return ret; +} + +static void __exit compal_cleanup(void) +{ + + sysfs_remove_group(&compal_device->dev.kobj, &compal_attribute_group); + platform_device_unregister(compal_device); + platform_driver_unregister(&compal_driver); + backlight_device_unregister(compalbl_device); + + printk(KERN_INFO "compal-laptop: driver unloaded.\n"); +} + +module_init(compal_init); +module_exit(compal_cleanup); + +MODULE_AUTHOR("Cezary Jackiewicz"); +MODULE_DESCRIPTION("Compal Laptop Support"); +MODULE_VERSION(COMPAL_DRIVER_VERSION); +MODULE_LICENSE("GPL"); + +MODULE_ALIAS("dmi:*:rnIFL90:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnIFL90:rvrREFERENCE:*"); +MODULE_ALIAS("dmi:*:rnIFL91:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnJFL92:rvrIFT00:*"); +MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*"); diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c index 6d727609097f..9e8d79e7e9f4 100644 --- a/drivers/misc/eeepc-laptop.c +++ b/drivers/misc/eeepc-laptop.c @@ -87,7 +87,7 @@ enum { CM_ASL_LID }; -const char *cm_getv[] = { +static const char *cm_getv[] = { "WLDG", NULL, NULL, NULL, "CAMG", NULL, NULL, NULL, NULL, "PBLG", NULL, NULL, @@ -96,7 +96,7 @@ const char *cm_getv[] = { "CRDG", "LIDG" }; -const char *cm_setv[] = { +static const char *cm_setv[] = { "WLDS", NULL, NULL, NULL, "CAMS", NULL, NULL, NULL, "SDSP", "PBLS", "HDPS", NULL, diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c index 6d14e8fe1537..7a1ef6c262de 100644 --- a/drivers/misc/fujitsu-laptop.c +++ b/drivers/misc/fujitsu-laptop.c @@ -1,12 +1,14 @@ /*-*-linux-c-*-*/ /* - Copyright (C) 2007 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> + Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au> + Copyright (C) 2008 Peter Gruber <nokos@gmx.net> Based on earlier work: Copyright (C) 2003 Shane Spencer <shane@bogomip.com> Adrian Yee <brewt-fujitsu@brewt.org> - Templated from msi-laptop.c which is copyright by its respective authors. + Templated from msi-laptop.c and thinkpad_acpi.c which is copyright + by its respective authors. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -39,8 +41,17 @@ * registers itself in the Linux backlight control subsystem and is * available to userspace under /sys/class/backlight/fujitsu-laptop/. * - * This driver has been tested on a Fujitsu Lifebook S7020. It should - * work on most P-series and S-series Lifebooks, but YMMV. + * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are + * also supported by this driver. + * + * This driver has been tested on a Fujitsu Lifebook S6410 and S7020. It + * should work on most P-series and S-series Lifebooks, but YMMV. + * + * The module parameter use_alt_lcd_levels switches between different ACPI + * brightness controls which are used by different Fujitsu laptops. In most + * cases the correct method is automatically detected. "use_alt_lcd_levels=1" + * is applicable for a Fujitsu Lifebook S6410 if autodetection fails. + * */ #include <linux/module.h> @@ -49,30 +60,105 @@ #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/backlight.h> +#include <linux/input.h> +#include <linux/kfifo.h> +#include <linux/video_output.h> #include <linux/platform_device.h> -#define FUJITSU_DRIVER_VERSION "0.3" +#define FUJITSU_DRIVER_VERSION "0.4.2" #define FUJITSU_LCD_N_LEVELS 8 #define ACPI_FUJITSU_CLASS "fujitsu" #define ACPI_FUJITSU_HID "FUJ02B1" -#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI extras driver" +#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver" #define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1" - +#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3" +#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver" +#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3" + +#define ACPI_FUJITSU_NOTIFY_CODE1 0x80 + +#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86 +#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87 + +/* Hotkey details */ +#define LOCK_KEY 0x410 /* codes for the keys in the GIRB register */ +#define DISPLAY_KEY 0x411 /* keys are mapped to KEY_SCREENLOCK (the key with the key symbol) */ +#define ENERGY_KEY 0x412 /* KEY_MEDIA (the key with the laptop symbol, KEY_EMAIL (E key)) */ +#define REST_KEY 0x413 /* KEY_SUSPEND (R key) */ + +#define MAX_HOTKEY_RINGBUFFER_SIZE 100 +#define RINGBUFFERSIZE 40 + +/* Debugging */ +#define FUJLAPTOP_LOG ACPI_FUJITSU_HID ": " +#define FUJLAPTOP_ERR KERN_ERR FUJLAPTOP_LOG +#define FUJLAPTOP_NOTICE KERN_NOTICE FUJLAPTOP_LOG +#define FUJLAPTOP_INFO KERN_INFO FUJLAPTOP_LOG +#define FUJLAPTOP_DEBUG KERN_DEBUG FUJLAPTOP_LOG + +#define FUJLAPTOP_DBG_ALL 0xffff +#define FUJLAPTOP_DBG_ERROR 0x0001 +#define FUJLAPTOP_DBG_WARN 0x0002 +#define FUJLAPTOP_DBG_INFO 0x0004 +#define FUJLAPTOP_DBG_TRACE 0x0008 + +#define dbg_printk(a_dbg_level, format, arg...) \ + do { if (dbg_level & a_dbg_level) \ + printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \ + } while (0) +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +#define vdbg_printk(a_dbg_level, format, arg...) \ + dbg_printk(a_dbg_level, format, ## arg) +#else +#define vdbg_printk(a_dbg_level, format, arg...) +#endif + +/* Device controlling the backlight and associated keys */ struct fujitsu_t { acpi_handle acpi_handle; + struct acpi_device *dev; + struct input_dev *input; + char phys[32]; struct backlight_device *bl_device; struct platform_device *pf_device; - unsigned long fuj02b1_state; + unsigned int max_brightness; unsigned int brightness_changed; unsigned int brightness_level; }; static struct fujitsu_t *fujitsu; +static int use_alt_lcd_levels = -1; +static int disable_brightness_keys = -1; +static int disable_brightness_adjust = -1; + +/* Device used to access other hotkeys on the laptop */ +struct fujitsu_hotkey_t { + acpi_handle acpi_handle; + struct acpi_device *dev; + struct input_dev *input; + char phys[32]; + struct platform_device *pf_device; + struct kfifo *fifo; + spinlock_t fifo_lock; + + unsigned int irb; /* info about the pressed buttons */ +}; -/* Hardware access */ +static struct fujitsu_hotkey_t *fujitsu_hotkey; + +static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, + void *data); + +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +static u32 dbg_level = 0x03; +#endif + +static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); + +/* Hardware access for LCD brightness control */ static int set_lcd_level(int level) { @@ -81,7 +167,10 @@ static int set_lcd_level(int level) struct acpi_object_list arg_list = { 1, &arg0 }; acpi_handle handle = NULL; - if (level < 0 || level >= FUJITSU_LCD_N_LEVELS) + vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", + level); + + if (level < 0 || level >= fujitsu->max_brightness) return -EINVAL; if (!fujitsu) @@ -89,7 +178,38 @@ static int set_lcd_level(int level) status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle); if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SBLL not present\n")); + vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n"); + return -ENODEV; + } + + arg0.integer.value = level; + + status = acpi_evaluate_object(handle, NULL, &arg_list, NULL); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return 0; +} + +static int set_lcd_level_alt(int level) +{ + acpi_status status = AE_OK; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list arg_list = { 1, &arg0 }; + acpi_handle handle = NULL; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", + level); + + if (level < 0 || level >= fujitsu->max_brightness) + return -EINVAL; + + if (!fujitsu) + return -EINVAL; + + status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle); + if (ACPI_FAILURE(status)) { + vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n"); return -ENODEV; } @@ -107,13 +227,52 @@ static int get_lcd_level(void) unsigned long state = 0; acpi_status status = AE_OK; - // Get the Brightness + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n"); + status = acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state); if (status < 0) return status; - fujitsu->fuj02b1_state = state; + fujitsu->brightness_level = state & 0x0fffffff; + + if (state & 0x80000000) + fujitsu->brightness_changed = 1; + else + fujitsu->brightness_changed = 0; + + return fujitsu->brightness_level; +} + +static int get_max_brightness(void) +{ + unsigned long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n"); + + status = + acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state); + if (status < 0) + return status; + + fujitsu->max_brightness = state; + + return fujitsu->max_brightness; +} + +static int get_lcd_level_alt(void) +{ + unsigned long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n"); + + status = + acpi_evaluate_integer(fujitsu->acpi_handle, "GBLS", NULL, &state); + if (status < 0) + return status; + fujitsu->brightness_level = state & 0x0fffffff; if (state & 0x80000000) @@ -128,12 +287,18 @@ static int get_lcd_level(void) static int bl_get_brightness(struct backlight_device *b) { - return get_lcd_level(); + if (use_alt_lcd_levels) + return get_lcd_level_alt(); + else + return get_lcd_level(); } static int bl_update_status(struct backlight_device *b) { - return set_lcd_level(b->props.brightness); + if (use_alt_lcd_levels) + return set_lcd_level_alt(b->props.brightness); + else + return set_lcd_level(b->props.brightness); } static struct backlight_ops fujitsubl_ops = { @@ -141,7 +306,35 @@ static struct backlight_ops fujitsubl_ops = { .update_status = bl_update_status, }; -/* Platform device */ +/* Platform LCD brightness device */ + +static ssize_t +show_max_brightness(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = get_max_brightness(); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t +show_brightness_changed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + int ret; + + ret = fujitsu->brightness_changed; + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} static ssize_t show_lcd_level(struct device *dev, struct device_attribute *attr, char *buf) @@ -149,7 +342,10 @@ static ssize_t show_lcd_level(struct device *dev, int ret; - ret = get_lcd_level(); + if (use_alt_lcd_levels) + ret = get_lcd_level_alt(); + else + ret = get_lcd_level(); if (ret < 0) return ret; @@ -164,19 +360,61 @@ static ssize_t store_lcd_level(struct device *dev, int level, ret; if (sscanf(buf, "%i", &level) != 1 - || (level < 0 || level >= FUJITSU_LCD_N_LEVELS)) + || (level < 0 || level >= fujitsu->max_brightness)) return -EINVAL; - ret = set_lcd_level(level); + if (use_alt_lcd_levels) + ret = set_lcd_level_alt(level); + else + ret = set_lcd_level(level); + if (ret < 0) + return ret; + + if (use_alt_lcd_levels) + ret = get_lcd_level_alt(); + else + ret = get_lcd_level(); if (ret < 0) return ret; return count; } +/* Hardware access for hotkey device */ + +static int get_irb(void) +{ + unsigned long state = 0; + acpi_status status = AE_OK; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n"); + + status = + acpi_evaluate_integer(fujitsu_hotkey->acpi_handle, "GIRB", NULL, + &state); + if (status < 0) + return status; + + fujitsu_hotkey->irb = state; + + return fujitsu_hotkey->irb; +} + +static ssize_t +ignore_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return count; +} + +static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store); +static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed, + ignore_store); static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level); static struct attribute *fujitsupf_attributes[] = { + &dev_attr_brightness_changed.attr, + &dev_attr_max_brightness.attr, &dev_attr_lcd_level.attr, NULL }; @@ -192,14 +430,52 @@ static struct platform_driver fujitsupf_driver = { } }; -/* ACPI device */ +static int dmi_check_cb_s6410(const struct dmi_system_id *id) +{ + acpi_handle handle; + int have_blnf; + printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n", + id->ident); + have_blnf = ACPI_SUCCESS + (acpi_get_handle(NULL, "\\_SB.PCI0.GFX0.LCD.BLNF", &handle)); + if (use_alt_lcd_levels == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detecting usealt\n"); + use_alt_lcd_levels = 1; + } + if (disable_brightness_keys == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "auto-detecting disable_keys\n"); + disable_brightness_keys = have_blnf ? 1 : 0; + } + if (disable_brightness_adjust == -1) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "auto-detecting disable_adjust\n"); + disable_brightness_adjust = have_blnf ? 0 : 1; + } + return 0; +} + +static struct dmi_system_id __initdata fujitsu_dmi_table[] = { + { + .ident = "Fujitsu Siemens", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"), + }, + .callback = dmi_check_cb_s6410}, + {} +}; + +/* ACPI device for LCD brightness control */ static int acpi_fujitsu_add(struct acpi_device *device) { + acpi_status status; + acpi_handle handle; int result = 0; int state = 0; - - ACPI_FUNCTION_TRACE("acpi_fujitsu_add"); + struct input_dev *input; + int error; if (!device) return -EINVAL; @@ -209,10 +485,42 @@ static int acpi_fujitsu_add(struct acpi_device *device) sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); acpi_driver_data(device) = fujitsu; + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify, fujitsu); + + if (ACPI_FAILURE(status)) { + printk(KERN_ERR "Error installing notify handler\n"); + error = -ENODEV; + goto err_stop; + } + + fujitsu->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_uninstall_notify; + } + + snprintf(fujitsu->phys, sizeof(fujitsu->phys), + "%s/video/input0", acpi_device_hid(device)); + + input->name = acpi_device_name(device); + input->phys = fujitsu->phys; + input->id.bustype = BUS_HOST; + input->id.product = 0x06; + input->dev.parent = &device->dev; + input->evbit[0] = BIT(EV_KEY); + set_bit(KEY_BRIGHTNESSUP, input->keybit); + set_bit(KEY_BRIGHTNESSDOWN, input->keybit); + set_bit(KEY_UNKNOWN, input->keybit); + + error = input_register_device(input); + if (error) + goto err_free_input_dev; + result = acpi_bus_get_power(fujitsu->acpi_handle, &state); if (result) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error reading power state\n")); + printk(KERN_ERR "Error reading power state\n"); goto end; } @@ -220,22 +528,373 @@ static int acpi_fujitsu_add(struct acpi_device *device) acpi_device_name(device), acpi_device_bid(device), !device->power.state ? "on" : "off"); - end: + fujitsu->dev = device; + + if (ACPI_SUCCESS + (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { + vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); + if (ACPI_FAILURE + (acpi_evaluate_object + (device->handle, METHOD_NAME__INI, NULL, NULL))) + printk(KERN_ERR "_INI Method failed\n"); + } + + /* do config (detect defaults) */ + dmi_check_system(fujitsu_dmi_table); + use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; + disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0; + disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; + vdbg_printk(FUJLAPTOP_DBG_INFO, + "config: [alt interface: %d], [key disable: %d], [adjust disable: %d]\n", + use_alt_lcd_levels, disable_brightness_keys, + disable_brightness_adjust); + + if (get_max_brightness() <= 0) + fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; + if (use_alt_lcd_levels) + get_lcd_level_alt(); + else + get_lcd_level(); + + return result; + +end: +err_free_input_dev: + input_free_device(input); +err_uninstall_notify: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify); +err_stop: return result; } static int acpi_fujitsu_remove(struct acpi_device *device, int type) { - ACPI_FUNCTION_TRACE("acpi_fujitsu_remove"); + acpi_status status; + struct fujitsu_t *fujitsu = NULL; if (!device || !acpi_driver_data(device)) return -EINVAL; + + fujitsu = acpi_driver_data(device); + + status = acpi_remove_notify_handler(fujitsu->acpi_handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_notify); + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + fujitsu->acpi_handle = NULL; return 0; } +/* Brightness notify */ + +static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) +{ + struct input_dev *input; + int keycode; + int oldb, newb; + + input = fujitsu->input; + + switch (event) { + case ACPI_FUJITSU_NOTIFY_CODE1: + keycode = 0; + oldb = fujitsu->brightness_level; + get_lcd_level(); /* the alt version always yields changed */ + newb = fujitsu->brightness_level; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "brightness button event [%i -> %i (%i)]\n", + oldb, newb, fujitsu->brightness_changed); + + if (oldb == newb && fujitsu->brightness_changed) { + keycode = 0; + if (disable_brightness_keys != 1) { + if (oldb == 0) { + acpi_bus_generate_proc_event(fujitsu-> + dev, + ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSDOWN; + } else if (oldb == + (fujitsu->max_brightness) - 1) { + acpi_bus_generate_proc_event(fujitsu-> + dev, + ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSUP; + } + } + } else if (oldb < newb) { + if (disable_brightness_adjust != 1) { + if (use_alt_lcd_levels) + set_lcd_level_alt(newb); + else + set_lcd_level(newb); + } + if (disable_brightness_keys != 1) { + acpi_bus_generate_proc_event(fujitsu->dev, + ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSUP; + } + } else if (oldb > newb) { + if (disable_brightness_adjust != 1) { + if (use_alt_lcd_levels) + set_lcd_level_alt(newb); + else + set_lcd_level(newb); + } + if (disable_brightness_keys != 1) { + acpi_bus_generate_proc_event(fujitsu->dev, + ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, + 0); + keycode = KEY_BRIGHTNESSDOWN; + } + } else { + keycode = KEY_UNKNOWN; + } + break; + default: + keycode = KEY_UNKNOWN; + vdbg_printk(FUJLAPTOP_DBG_WARN, + "unsupported event [0x%x]\n", event); + break; + } + + if (keycode != 0) { + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + } + + return; +} + +/* ACPI device for hotkey handling */ + +static int acpi_fujitsu_hotkey_add(struct acpi_device *device) +{ + acpi_status status; + acpi_handle handle; + int result = 0; + int state = 0; + struct input_dev *input; + int error; + int i; + + if (!device) + return -EINVAL; + + fujitsu_hotkey->acpi_handle = device->handle; + sprintf(acpi_device_name(device), "%s", + ACPI_FUJITSU_HOTKEY_DEVICE_NAME); + sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); + acpi_driver_data(device) = fujitsu_hotkey; + + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify, + fujitsu_hotkey); + + if (ACPI_FAILURE(status)) { + printk(KERN_ERR "Error installing notify handler\n"); + error = -ENODEV; + goto err_stop; + } + + /* kfifo */ + spin_lock_init(&fujitsu_hotkey->fifo_lock); + fujitsu_hotkey->fifo = + kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL, + &fujitsu_hotkey->fifo_lock); + if (IS_ERR(fujitsu_hotkey->fifo)) { + printk(KERN_ERR "kfifo_alloc failed\n"); + error = PTR_ERR(fujitsu_hotkey->fifo); + goto err_stop; + } + + fujitsu_hotkey->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_uninstall_notify; + } + + snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), + "%s/video/input0", acpi_device_hid(device)); + + input->name = acpi_device_name(device); + input->phys = fujitsu_hotkey->phys; + input->id.bustype = BUS_HOST; + input->id.product = 0x06; + input->dev.parent = &device->dev; + input->evbit[0] = BIT(EV_KEY); + set_bit(KEY_SCREENLOCK, input->keybit); + set_bit(KEY_MEDIA, input->keybit); + set_bit(KEY_EMAIL, input->keybit); + set_bit(KEY_SUSPEND, input->keybit); + set_bit(KEY_UNKNOWN, input->keybit); + + error = input_register_device(input); + if (error) + goto err_free_input_dev; + + result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state); + if (result) { + printk(KERN_ERR "Error reading power state\n"); + goto end; + } + + printk(KERN_INFO PREFIX "%s [%s] (%s)\n", + acpi_device_name(device), acpi_device_bid(device), + !device->power.state ? "on" : "off"); + + fujitsu_hotkey->dev = device; + + if (ACPI_SUCCESS + (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) { + vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); + if (ACPI_FAILURE + (acpi_evaluate_object + (device->handle, METHOD_NAME__INI, NULL, NULL))) + printk(KERN_ERR "_INI Method failed\n"); + } + + i = 0; /* Discard hotkey ringbuffer */ + while (get_irb() != 0 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) ; + vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); + + return result; + +end: +err_free_input_dev: + input_free_device(input); +err_uninstall_notify: + acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify); + kfifo_free(fujitsu_hotkey->fifo); +err_stop: + + return result; +} + +static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) +{ + acpi_status status; + struct fujitsu_hotkey_t *fujitsu_hotkey = NULL; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + fujitsu_hotkey = acpi_driver_data(device); + + status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle, + ACPI_DEVICE_NOTIFY, + acpi_fujitsu_hotkey_notify); + + fujitsu_hotkey->acpi_handle = NULL; + + kfifo_free(fujitsu_hotkey->fifo); + + return 0; +} + +static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, + void *data) +{ + struct input_dev *input; + int keycode, keycode_r; + unsigned int irb = 1; + int i, status; + + input = fujitsu_hotkey->input; + + vdbg_printk(FUJLAPTOP_DBG_TRACE, "Hotkey event\n"); + + switch (event) { + case ACPI_FUJITSU_NOTIFY_CODE1: + i = 0; + while ((irb = get_irb()) != 0 + && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, "GIRB result [%x]\n", + irb); + + switch (irb & 0x4ff) { + case LOCK_KEY: + keycode = KEY_SCREENLOCK; + break; + case DISPLAY_KEY: + keycode = KEY_MEDIA; + break; + case ENERGY_KEY: + keycode = KEY_EMAIL; + break; + case REST_KEY: + keycode = KEY_SUSPEND; + break; + case 0: + keycode = 0; + break; + default: + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unknown GIRB result [%x]\n", irb); + keycode = -1; + break; + } + if (keycode > 0) { + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Push keycode into ringbuffer [%d]\n", + keycode); + status = kfifo_put(fujitsu_hotkey->fifo, + (unsigned char *)&keycode, + sizeof(keycode)); + if (status != sizeof(keycode)) { + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Could not push keycode [0x%x]\n", + keycode); + } else { + input_report_key(input, keycode, 1); + input_sync(input); + } + } else if (keycode == 0) { + while ((status = + kfifo_get + (fujitsu_hotkey->fifo, (unsigned char *) + &keycode_r, + sizeof + (keycode_r))) == sizeof(keycode_r)) { + input_report_key(input, keycode_r, 0); + input_sync(input); + vdbg_printk(FUJLAPTOP_DBG_TRACE, + "Pop keycode from ringbuffer [%d]\n", + keycode_r); + } + } + } + + break; + default: + keycode = KEY_UNKNOWN; + vdbg_printk(FUJLAPTOP_DBG_WARN, + "Unsupported event [0x%x]\n", event); + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + break; + } + + return; +} + +/* Initialization */ + static const struct acpi_device_id fujitsu_device_ids[] = { {ACPI_FUJITSU_HID, 0}, {"", 0}, @@ -251,11 +910,24 @@ static struct acpi_driver acpi_fujitsu_driver = { }, }; -/* Initialization */ +static const struct acpi_device_id fujitsu_hotkey_device_ids[] = { + {ACPI_FUJITSU_HOTKEY_HID, 0}, + {"", 0}, +}; + +static struct acpi_driver acpi_fujitsu_hotkey_driver = { + .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME, + .class = ACPI_FUJITSU_CLASS, + .ids = fujitsu_hotkey_device_ids, + .ops = { + .add = acpi_fujitsu_hotkey_add, + .remove = acpi_fujitsu_hotkey_remove, + }, +}; static int __init fujitsu_init(void) { - int ret, result; + int ret, result, max_brightness; if (acpi_disabled) return -ENODEV; @@ -271,19 +943,6 @@ static int __init fujitsu_init(void) goto fail_acpi; } - /* Register backlight stuff */ - - fujitsu->bl_device = - backlight_device_register("fujitsu-laptop", NULL, NULL, - &fujitsubl_ops); - if (IS_ERR(fujitsu->bl_device)) - return PTR_ERR(fujitsu->bl_device); - - fujitsu->bl_device->props.max_brightness = FUJITSU_LCD_N_LEVELS - 1; - ret = platform_driver_register(&fujitsupf_driver); - if (ret) - goto fail_backlight; - /* Register platform stuff */ fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1); @@ -302,28 +961,68 @@ static int __init fujitsu_init(void) if (ret) goto fail_platform_device2; + /* Register backlight stuff */ + + fujitsu->bl_device = + backlight_device_register("fujitsu-laptop", NULL, NULL, + &fujitsubl_ops); + if (IS_ERR(fujitsu->bl_device)) + return PTR_ERR(fujitsu->bl_device); + + max_brightness = fujitsu->max_brightness; + + fujitsu->bl_device->props.max_brightness = max_brightness - 1; + fujitsu->bl_device->props.brightness = fujitsu->brightness_level; + + ret = platform_driver_register(&fujitsupf_driver); + if (ret) + goto fail_backlight; + + /* Register hotkey driver */ + + fujitsu_hotkey = kmalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL); + if (!fujitsu_hotkey) { + ret = -ENOMEM; + goto fail_hotkey; + } + memset(fujitsu_hotkey, 0, sizeof(struct fujitsu_hotkey_t)); + + result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver); + if (result < 0) { + ret = -ENODEV; + goto fail_hotkey1; + } + printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION " successfully loaded.\n"); return 0; - fail_platform_device2: +fail_hotkey1: - platform_device_del(fujitsu->pf_device); - - fail_platform_device1: - - platform_device_put(fujitsu->pf_device); + kfree(fujitsu_hotkey); - fail_platform_driver: +fail_hotkey: platform_driver_unregister(&fujitsupf_driver); - fail_backlight: +fail_backlight: backlight_device_unregister(fujitsu->bl_device); - fail_acpi: +fail_platform_device2: + + platform_device_del(fujitsu->pf_device); + +fail_platform_device1: + + platform_device_put(fujitsu->pf_device); + +fail_platform_driver: + + acpi_bus_unregister_driver(&acpi_fujitsu_driver); + +fail_acpi: kfree(fujitsu); @@ -342,19 +1041,43 @@ static void __exit fujitsu_cleanup(void) kfree(fujitsu); + acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver); + + kfree(fujitsu_hotkey); + printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n"); } module_init(fujitsu_init); module_exit(fujitsu_cleanup); -MODULE_AUTHOR("Jonathan Woithe"); +module_param(use_alt_lcd_levels, uint, 0644); +MODULE_PARM_DESC(use_alt_lcd_levels, + "Use alternative interface for lcd_levels (needed for Lifebook s6410)."); +module_param(disable_brightness_keys, uint, 0644); +MODULE_PARM_DESC(disable_brightness_keys, + "Disable brightness keys (eg. if they are already handled by the generic ACPI_VIDEO device)."); +module_param(disable_brightness_adjust, uint, 0644); +MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment ."); +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +module_param_named(debug, dbg_level, uint, 0644); +MODULE_PARM_DESC(debug, "Sets debug level bit-mask"); +#endif + +MODULE_AUTHOR("Jonathan Woithe, Peter Gruber"); MODULE_DESCRIPTION("Fujitsu laptop extras support"); MODULE_VERSION(FUJITSU_DRIVER_VERSION); MODULE_LICENSE("GPL"); +MODULE_ALIAS + ("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); +MODULE_ALIAS + ("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); + static struct pnp_device_id pnp_ids[] = { { .id = "FUJ02bf" }, + { .id = "FUJ02B1" }, + { .id = "FUJ02E3" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, pnp_ids); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f9ad960d7c1a..66e5a5487c20 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2,7 +2,7 @@ * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company - * Copyright 2005-2007 Pierre Ossman + * Copyright 2005-2008 Pierre Ossman * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is @@ -237,17 +237,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) if (brq.data.blocks > card->host->max_blk_count) brq.data.blocks = card->host->max_blk_count; - /* - * If the host doesn't support multiple block writes, force - * block writes to single block. SD cards are excepted from - * this rule as they support querying the number of - * successfully written sectors. - */ - if (rq_data_dir(req) != READ && - !(card->host->caps & MMC_CAP_MULTIWRITE) && - !mmc_card_sd(card)) - brq.data.blocks = 1; - if (brq.data.blocks > 1) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. @@ -296,22 +285,24 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) mmc_queue_bounce_post(mq); + /* + * Check for errors here, but don't jump to cmd_err + * until later as we need to wait for the card to leave + * programming mode even when things go wrong. + */ if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write command\n", req->rq_disk->disk_name, brq.cmd.error); - goto cmd_err; } if (brq.data.error) { printk(KERN_ERR "%s: error %d transferring data\n", req->rq_disk->disk_name, brq.data.error); - goto cmd_err; } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command\n", req->rq_disk->disk_name, brq.stop.error); - goto cmd_err; } if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { @@ -344,6 +335,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) #endif } + if (brq.cmd.error || brq.data.error || brq.stop.error) + goto cmd_err; + /* * A block was successfully transferred. */ @@ -362,30 +356,32 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors - * if the controller can do proper error reporting. + * as reported by the controller (which might be less than + * the real number of written sectors, but never more). * * For reads we just fail the entire chunk as that should * be safe in all cases. */ - if (rq_data_dir(req) != READ && mmc_card_sd(card)) { - u32 blocks; - unsigned int bytes; - - blocks = mmc_sd_num_wr_blocks(card); - if (blocks != (u32)-1) { - if (card->csd.write_partial) - bytes = blocks << md->block_bits; - else - bytes = blocks << 9; + if (rq_data_dir(req) != READ) { + if (mmc_card_sd(card)) { + u32 blocks; + unsigned int bytes; + + blocks = mmc_sd_num_wr_blocks(card); + if (blocks != (u32)-1) { + if (card->csd.write_partial) + bytes = blocks << md->block_bits; + else + bytes = blocks << 9; + spin_lock_irq(&md->lock); + ret = __blk_end_request(req, 0, bytes); + spin_unlock_irq(&md->lock); + } + } else { spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, bytes); + ret = __blk_end_request(req, 0, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } - } else if (rq_data_dir(req) != READ && - (card->host->caps & MMC_CAP_MULTIWRITE)) { - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, brq.data.bytes_xfered); - spin_unlock_irq(&md->lock); } mmc_release_host(card->host); diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index ffadee549a41..d6b9b486417c 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -1,7 +1,7 @@ /* * linux/drivers/mmc/card/mmc_test.c * - * Copyright 2007 Pierre Ossman + * Copyright 2007-2008 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,13 +26,17 @@ struct mmc_test_card { struct mmc_card *card; + u8 scratch[BUFFER_SIZE]; u8 *buffer; }; /*******************************************************************/ -/* Helper functions */ +/* General helper functions */ /*******************************************************************/ +/* + * Configure correct block size in card + */ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) { struct mmc_command cmd; @@ -48,117 +52,61 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) return 0; } -static int __mmc_test_transfer(struct mmc_test_card *test, int write, - unsigned broken_xfer, u8 *buffer, unsigned addr, - unsigned blocks, unsigned blksz) +/* + * Fill in the mmc_request structure given a set of transfer parameters. + */ +static void mmc_test_prepare_mrq(struct mmc_test_card *test, + struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, + unsigned dev_addr, unsigned blocks, unsigned blksz, int write) { - int ret, busy; - - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_command stop; - struct mmc_data data; - - struct scatterlist sg; - - memset(&mrq, 0, sizeof(struct mmc_request)); - - mrq.cmd = &cmd; - mrq.data = &data; - - memset(&cmd, 0, sizeof(struct mmc_command)); + BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop); - if (broken_xfer) { - if (blocks > 1) { - cmd.opcode = write ? - MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; - } else { - cmd.opcode = MMC_SEND_STATUS; - } + if (blocks > 1) { + mrq->cmd->opcode = write ? + MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; } else { - if (blocks > 1) { - cmd.opcode = write ? - MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; - } else { - cmd.opcode = write ? - MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; - } + mrq->cmd->opcode = write ? + MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; } - if (broken_xfer && blocks == 1) - cmd.arg = test->card->rca << 16; - else - cmd.arg = addr; - cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + mrq->cmd->arg = dev_addr; + mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; - memset(&stop, 0, sizeof(struct mmc_command)); - - if (!broken_xfer && (blocks > 1)) { - stop.opcode = MMC_STOP_TRANSMISSION; - stop.arg = 0; - stop.flags = MMC_RSP_R1B | MMC_CMD_AC; - - mrq.stop = &stop; + if (blocks == 1) + mrq->stop = NULL; + else { + mrq->stop->opcode = MMC_STOP_TRANSMISSION; + mrq->stop->arg = 0; + mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; } - memset(&data, 0, sizeof(struct mmc_data)); - - data.blksz = blksz; - data.blocks = blocks; - data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - - sg_init_one(&sg, buffer, blocks * blksz); - - mmc_set_data_timeout(&data, test->card); + mrq->data->blksz = blksz; + mrq->data->blocks = blocks; + mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + mrq->data->sg = sg; + mrq->data->sg_len = sg_len; - mmc_wait_for_req(test->card->host, &mrq); - - ret = 0; - - if (broken_xfer) { - if (!ret && cmd.error) - ret = cmd.error; - if (!ret && data.error == 0) - ret = RESULT_FAIL; - if (!ret && data.error != -ETIMEDOUT) - ret = data.error; - if (!ret && stop.error) - ret = stop.error; - if (blocks > 1) { - if (!ret && data.bytes_xfered > blksz) - ret = RESULT_FAIL; - } else { - if (!ret && data.bytes_xfered > 0) - ret = RESULT_FAIL; - } - } else { - if (!ret && cmd.error) - ret = cmd.error; - if (!ret && data.error) - ret = data.error; - if (!ret && stop.error) - ret = stop.error; - if (!ret && data.bytes_xfered != blocks * blksz) - ret = RESULT_FAIL; - } + mmc_set_data_timeout(mrq->data, test->card); +} - if (ret == -EINVAL) - ret = RESULT_UNSUP_HOST; +/* + * Wait for the card to finish the busy state + */ +static int mmc_test_wait_busy(struct mmc_test_card *test) +{ + int ret, busy; + struct mmc_command cmd; busy = 0; do { - int ret2; - memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = test->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - ret2 = mmc_wait_for_cmd(test->card->host, &cmd, 0); - if (ret2) + ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); + if (ret) break; if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { @@ -172,14 +120,57 @@ static int __mmc_test_transfer(struct mmc_test_card *test, int write, return ret; } -static int mmc_test_transfer(struct mmc_test_card *test, int write, - u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) +/* + * Transfer a single sector of kernel addressable data + */ +static int mmc_test_buffer_transfer(struct mmc_test_card *test, + u8 *buffer, unsigned addr, unsigned blksz, int write) { - return __mmc_test_transfer(test, write, 0, buffer, - addr, blocks, blksz); + int ret; + + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_data data; + + struct scatterlist sg; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + memset(&stop, 0, sizeof(struct mmc_command)); + + mrq.cmd = &cmd; + mrq.data = &data; + mrq.stop = &stop; + + sg_init_one(&sg, buffer, blksz); + + mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); + + mmc_wait_for_req(test->card->host, &mrq); + + if (cmd.error) + return cmd.error; + if (data.error) + return data.error; + + ret = mmc_test_wait_busy(test); + if (ret) + return ret; + + return 0; } -static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) +/*******************************************************************/ +/* Test preparation and cleanup */ +/*******************************************************************/ + +/* + * Fill the first couple of sectors of the card with known data + * so that bad reads/writes can be detected + */ +static int __mmc_test_prepare(struct mmc_test_card *test, int write) { int ret, i; @@ -188,15 +179,14 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) return ret; if (write) - memset(test->buffer, 0xDF, BUFFER_SIZE); + memset(test->buffer, 0xDF, 512); else { - for (i = 0;i < BUFFER_SIZE;i++) + for (i = 0;i < 512;i++) test->buffer[i] = i; } for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_transfer(test, 1, test->buffer + i * 512, - i * 512, 1, 512); + ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); if (ret) return ret; } @@ -204,41 +194,218 @@ static int mmc_test_prepare_verify(struct mmc_test_card *test, int write) return 0; } -static int mmc_test_prepare_verify_write(struct mmc_test_card *test) +static int mmc_test_prepare_write(struct mmc_test_card *test) +{ + return __mmc_test_prepare(test, 1); +} + +static int mmc_test_prepare_read(struct mmc_test_card *test) +{ + return __mmc_test_prepare(test, 0); +} + +static int mmc_test_cleanup(struct mmc_test_card *test) { - return mmc_test_prepare_verify(test, 1); + int ret, i; + + ret = mmc_test_set_blksize(test, 512); + if (ret) + return ret; + + memset(test->buffer, 0, 512); + + for (i = 0;i < BUFFER_SIZE / 512;i++) { + ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); + if (ret) + return ret; + } + + return 0; } -static int mmc_test_prepare_verify_read(struct mmc_test_card *test) +/*******************************************************************/ +/* Test execution helpers */ +/*******************************************************************/ + +/* + * Modifies the mmc_request to perform the "short transfer" tests + */ +static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, + struct mmc_request *mrq, int write) { - return mmc_test_prepare_verify(test, 0); + BUG_ON(!mrq || !mrq->cmd || !mrq->data); + + if (mrq->data->blocks > 1) { + mrq->cmd->opcode = write ? + MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; + mrq->stop = NULL; + } else { + mrq->cmd->opcode = MMC_SEND_STATUS; + mrq->cmd->arg = test->card->rca << 16; + } } -static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, - u8 *buffer, unsigned addr, unsigned blocks, unsigned blksz) +/* + * Checks that a normal transfer didn't have any errors + */ +static int mmc_test_check_result(struct mmc_test_card *test, + struct mmc_request *mrq) { - int ret, i, sectors; + int ret; - /* - * It is assumed that the above preparation has been done. - */ + BUG_ON(!mrq || !mrq->cmd || !mrq->data); + + ret = 0; - memset(test->buffer, 0, BUFFER_SIZE); + if (!ret && mrq->cmd->error) + ret = mrq->cmd->error; + if (!ret && mrq->data->error) + ret = mrq->data->error; + if (!ret && mrq->stop && mrq->stop->error) + ret = mrq->stop->error; + if (!ret && mrq->data->bytes_xfered != + mrq->data->blocks * mrq->data->blksz) + ret = RESULT_FAIL; + + if (ret == -EINVAL) + ret = RESULT_UNSUP_HOST; + + return ret; +} + +/* + * Checks that a "short transfer" behaved as expected + */ +static int mmc_test_check_broken_result(struct mmc_test_card *test, + struct mmc_request *mrq) +{ + int ret; + + BUG_ON(!mrq || !mrq->cmd || !mrq->data); + + ret = 0; + + if (!ret && mrq->cmd->error) + ret = mrq->cmd->error; + if (!ret && mrq->data->error == 0) + ret = RESULT_FAIL; + if (!ret && mrq->data->error != -ETIMEDOUT) + ret = mrq->data->error; + if (!ret && mrq->stop && mrq->stop->error) + ret = mrq->stop->error; + if (mrq->data->blocks > 1) { + if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) + ret = RESULT_FAIL; + } else { + if (!ret && mrq->data->bytes_xfered > 0) + ret = RESULT_FAIL; + } + + if (ret == -EINVAL) + ret = RESULT_UNSUP_HOST; + + return ret; +} + +/* + * Tests a basic transfer with certain parameters + */ +static int mmc_test_simple_transfer(struct mmc_test_card *test, + struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, + unsigned blocks, unsigned blksz, int write) +{ + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_data data; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + memset(&stop, 0, sizeof(struct mmc_command)); + + mrq.cmd = &cmd; + mrq.data = &data; + mrq.stop = &stop; + + mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, + blocks, blksz, write); + + mmc_wait_for_req(test->card->host, &mrq); + + mmc_test_wait_busy(test); + + return mmc_test_check_result(test, &mrq); +} + +/* + * Tests a transfer where the card will fail completely or partly + */ +static int mmc_test_broken_transfer(struct mmc_test_card *test, + unsigned blocks, unsigned blksz, int write) +{ + struct mmc_request mrq; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_data data; + + struct scatterlist sg; + + memset(&mrq, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + memset(&stop, 0, sizeof(struct mmc_command)); + + mrq.cmd = &cmd; + mrq.data = &data; + mrq.stop = &stop; + + sg_init_one(&sg, test->buffer, blocks * blksz); + + mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); + mmc_test_prepare_broken_mrq(test, &mrq, write); + + mmc_wait_for_req(test->card->host, &mrq); + + mmc_test_wait_busy(test); + + return mmc_test_check_broken_result(test, &mrq); +} + +/* + * Does a complete transfer test where data is also validated + * + * Note: mmc_test_prepare() must have been done before this call + */ +static int mmc_test_transfer(struct mmc_test_card *test, + struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, + unsigned blocks, unsigned blksz, int write) +{ + int ret, i; + unsigned long flags; if (write) { for (i = 0;i < blocks * blksz;i++) - buffer[i] = i; + test->scratch[i] = i; + } else { + memset(test->scratch, 0, BUFFER_SIZE); } + local_irq_save(flags); + sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); + local_irq_restore(flags); ret = mmc_test_set_blksize(test, blksz); if (ret) return ret; - ret = mmc_test_transfer(test, write, buffer, addr, blocks, blksz); + ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, + blocks, blksz, write); if (ret) return ret; if (write) { + int sectors; + ret = mmc_test_set_blksize(test, 512); if (ret) return ret; @@ -253,9 +420,9 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, memset(test->buffer, 0, sectors * 512); for (i = 0;i < sectors;i++) { - ret = mmc_test_transfer(test, 0, + ret = mmc_test_buffer_transfer(test, test->buffer + i * 512, - addr + i * 512, 1, 512); + dev_addr + i * 512, 512, 0); if (ret) return ret; } @@ -270,8 +437,11 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, return RESULT_FAIL; } } else { + local_irq_save(flags); + sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); + local_irq_restore(flags); for (i = 0;i < blocks * blksz;i++) { - if (buffer[i] != (u8)i) + if (test->scratch[i] != (u8)i) return RESULT_FAIL; } } @@ -279,26 +449,6 @@ static int mmc_test_verified_transfer(struct mmc_test_card *test, int write, return 0; } -static int mmc_test_cleanup_verify(struct mmc_test_card *test) -{ - int ret, i; - - ret = mmc_test_set_blksize(test, 512); - if (ret) - return ret; - - memset(test->buffer, 0, BUFFER_SIZE); - - for (i = 0;i < BUFFER_SIZE / 512;i++) { - ret = mmc_test_transfer(test, 1, test->buffer + i * 512, - i * 512, 1, 512); - if (ret) - return ret; - } - - return 0; -} - /*******************************************************************/ /* Tests */ /*******************************************************************/ @@ -314,12 +464,15 @@ struct mmc_test_case { static int mmc_test_basic_write(struct mmc_test_card *test) { int ret; + struct scatterlist sg; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; - ret = mmc_test_transfer(test, 1, test->buffer, 0, 1, 512); + sg_init_one(&sg, test->buffer, 512); + + ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; @@ -329,12 +482,15 @@ static int mmc_test_basic_write(struct mmc_test_card *test) static int mmc_test_basic_read(struct mmc_test_card *test) { int ret; + struct scatterlist sg; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; - ret = mmc_test_transfer(test, 0, test->buffer, 0, 1, 512); + sg_init_one(&sg, test->buffer, 512); + + ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; @@ -344,8 +500,11 @@ static int mmc_test_basic_read(struct mmc_test_card *test) static int mmc_test_verify_write(struct mmc_test_card *test) { int ret; + struct scatterlist sg; + + sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, 1, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; @@ -355,8 +514,11 @@ static int mmc_test_verify_write(struct mmc_test_card *test) static int mmc_test_verify_read(struct mmc_test_card *test) { int ret; + struct scatterlist sg; + + sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, 1, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; @@ -367,6 +529,7 @@ static int mmc_test_multi_write(struct mmc_test_card *test) { int ret; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -379,8 +542,9 @@ static int mmc_test_multi_write(struct mmc_test_card *test) if (size < 1024) return RESULT_UNSUP_HOST; - ret = mmc_test_verified_transfer(test, 1, test->buffer, 0, - size / 512, 512); + sg_init_one(&sg, test->buffer, size); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); if (ret) return ret; @@ -391,6 +555,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test) { int ret; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -403,8 +568,9 @@ static int mmc_test_multi_read(struct mmc_test_card *test) if (size < 1024) return RESULT_UNSUP_HOST; - ret = mmc_test_verified_transfer(test, 0, test->buffer, 0, - size / 512, 512); + sg_init_one(&sg, test->buffer, size); + + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); if (ret) return ret; @@ -414,13 +580,14 @@ static int mmc_test_multi_read(struct mmc_test_card *test) static int mmc_test_pow2_write(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.write_partial) return RESULT_UNSUP_CARD; for (i = 1; i < 512;i <<= 1) { - ret = mmc_test_verified_transfer(test, 1, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); if (ret) return ret; } @@ -431,13 +598,14 @@ static int mmc_test_pow2_write(struct mmc_test_card *test) static int mmc_test_pow2_read(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.read_partial) return RESULT_UNSUP_CARD; for (i = 1; i < 512;i <<= 1) { - ret = mmc_test_verified_transfer(test, 0, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); if (ret) return ret; } @@ -448,13 +616,14 @@ static int mmc_test_pow2_read(struct mmc_test_card *test) static int mmc_test_weird_write(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.write_partial) return RESULT_UNSUP_CARD; for (i = 3; i < 512;i += 7) { - ret = mmc_test_verified_transfer(test, 1, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); if (ret) return ret; } @@ -465,13 +634,14 @@ static int mmc_test_weird_write(struct mmc_test_card *test) static int mmc_test_weird_read(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; if (!test->card->csd.read_partial) return RESULT_UNSUP_CARD; for (i = 3; i < 512;i += 7) { - ret = mmc_test_verified_transfer(test, 0, - test->buffer, 0, 1, i); + sg_init_one(&sg, test->buffer, i); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); if (ret) return ret; } @@ -482,10 +652,11 @@ static int mmc_test_weird_read(struct mmc_test_card *test) static int mmc_test_align_write(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 1, test->buffer + i, - 0, 1, 512); + sg_init_one(&sg, test->buffer + i, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; } @@ -496,10 +667,11 @@ static int mmc_test_align_write(struct mmc_test_card *test) static int mmc_test_align_read(struct mmc_test_card *test) { int ret, i; + struct scatterlist sg; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 0, test->buffer + i, - 0, 1, 512); + sg_init_one(&sg, test->buffer + i, 512); + ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; } @@ -511,6 +683,7 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test) { int ret, i; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -524,8 +697,8 @@ static int mmc_test_align_multi_write(struct mmc_test_card *test) return RESULT_UNSUP_HOST; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 1, test->buffer + i, - 0, size / 512, 512); + sg_init_one(&sg, test->buffer + i, size); + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); if (ret) return ret; } @@ -537,6 +710,7 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test) { int ret, i; unsigned int size; + struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; @@ -550,8 +724,8 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test) return RESULT_UNSUP_HOST; for (i = 1;i < 4;i++) { - ret = mmc_test_verified_transfer(test, 0, test->buffer + i, - 0, size / 512, 512); + sg_init_one(&sg, test->buffer + i, size); + ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); if (ret) return ret; } @@ -567,7 +741,7 @@ static int mmc_test_xfersize_write(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 1, 512); + ret = mmc_test_broken_transfer(test, 1, 512, 1); if (ret) return ret; @@ -582,7 +756,7 @@ static int mmc_test_xfersize_read(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 1, 512); + ret = mmc_test_broken_transfer(test, 1, 512, 0); if (ret) return ret; @@ -600,7 +774,7 @@ static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 1, 1, test->buffer, 0, 2, 512); + ret = mmc_test_broken_transfer(test, 2, 512, 1); if (ret) return ret; @@ -618,7 +792,7 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) if (ret) return ret; - ret = __mmc_test_transfer(test, 0, 1, test->buffer, 0, 2, 512); + ret = mmc_test_broken_transfer(test, 2, 512, 0); if (ret) return ret; @@ -638,86 +812,86 @@ static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (with data verification)", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_verify_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Basic read (with data verification)", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_verify_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Multi-block write", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_multi_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Multi-block read", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_multi_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Power of two block writes", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_pow2_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Power of two block reads", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_pow2_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Weird sized block writes", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_weird_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Weird sized block reads", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_weird_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned write", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_align_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned read", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_align_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned multi-block write", - .prepare = mmc_test_prepare_verify_write, + .prepare = mmc_test_prepare_write, .run = mmc_test_align_multi_write, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned multi-block read", - .prepare = mmc_test_prepare_verify_read, + .prepare = mmc_test_prepare_read, .run = mmc_test_align_multi_read, - .cleanup = mmc_test_cleanup_verify, + .cleanup = mmc_test_cleanup, }, { @@ -743,7 +917,7 @@ static const struct mmc_test_case mmc_test_cases[] = { static struct mutex mmc_test_lock; -static void mmc_test_run(struct mmc_test_card *test) +static void mmc_test_run(struct mmc_test_card *test, int testcase) { int i, ret; @@ -753,6 +927,9 @@ static void mmc_test_run(struct mmc_test_card *test) mmc_claim_host(test->card->host); for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { + if (testcase && ((i + 1) != testcase)) + continue; + printk(KERN_INFO "%s: Test case %d. %s...\n", mmc_hostname(test->card->host), i + 1, mmc_test_cases[i].name); @@ -824,9 +1001,12 @@ static ssize_t mmc_test_store(struct device *dev, { struct mmc_card *card; struct mmc_test_card *test; + int testcase; card = container_of(dev, struct mmc_card, dev); + testcase = simple_strtol(buf, NULL, 10); + test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); if (!test) return -ENOMEM; @@ -836,7 +1016,7 @@ static ssize_t mmc_test_store(struct device *dev, test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); if (test->buffer) { mutex_lock(&mmc_test_lock); - mmc_test_run(test); + mmc_test_run(test, testcase); mutex_unlock(&mmc_test_lock); } @@ -852,6 +1032,9 @@ static int mmc_test_probe(struct mmc_card *card) { int ret; + if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD)) + return -ENODEV; + mutex_init(&mmc_test_lock); ret = device_create_file(&card->dev, &dev_attr_test); diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index eeea84c309e6..78ad48718ab0 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -885,12 +885,14 @@ static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_t sdio_uart_release_func(port); } -static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state) +static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state) { struct sdio_uart_port *port = tty->driver_data; + int result; - if (sdio_uart_claim_func(port) != 0) - return; + result = sdio_uart_claim_func(port); + if (result != 0) + return result; if (break_state == -1) port->lcr |= UART_LCR_SBC; @@ -899,6 +901,7 @@ static void sdio_uart_break_ctl(struct tty_struct *tty, int break_state) sdio_out(port, UART_LCR, port->lcr); sdio_uart_release_func(port); + return 0; } static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 01ced4c5a61d..3ee5b8c3b5ce 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -3,7 +3,7 @@ * * Copyright (C) 2003-2004 Russell King, All Rights Reserved. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. - * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify @@ -295,6 +295,33 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) EXPORT_SYMBOL(mmc_set_data_timeout); /** + * mmc_align_data_size - pads a transfer size to a more optimal value + * @card: the MMC card associated with the data transfer + * @sz: original transfer size + * + * Pads the original data size with a number of extra bytes in + * order to avoid controller bugs and/or performance hits + * (e.g. some controllers revert to PIO for certain sizes). + * + * Returns the improved size, which might be unmodified. + * + * Note that this function is only relevant when issuing a + * single scatter gather entry. + */ +unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) +{ + /* + * FIXME: We don't have a system for the controller to tell + * the core about its problems yet, so for now we just 32-bit + * align the size. + */ + sz = ((sz + 3) / 4) * 4; + + return sz; +} +EXPORT_SYMBOL(mmc_align_data_size); + +/** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim * @abort: whether or not the operation should be aborted @@ -638,6 +665,9 @@ void mmc_rescan(struct work_struct *work) */ mmc_bus_put(host); + if (host->ops->get_cd && host->ops->get_cd(host) == 0) + goto out; + mmc_claim_host(host); mmc_power_up(host); @@ -652,7 +682,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); - return; + goto out; } /* @@ -662,7 +692,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); - return; + goto out; } /* @@ -672,7 +702,7 @@ void mmc_rescan(struct work_struct *work) if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); - return; + goto out; } mmc_release_host(host); @@ -683,6 +713,9 @@ void mmc_rescan(struct work_struct *work) mmc_bus_put(host); } +out: + if (host->caps & MMC_CAP_NEEDS_POLL) + mmc_schedule_delayed_work(&host->detect, HZ); } void mmc_start_host(struct mmc_host *host) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 3da29eef8f7d..fdd7c760be8c 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -288,7 +288,7 @@ static struct device_type mmc_type = { /* * Handle the detection and initialisation of a card. * - * In the case of a resume, "curcard" will contain the card + * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 7ef3b15c5e3d..26fc098d77cd 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -326,7 +326,7 @@ static struct device_type sd_type = { /* * Handle the detection and initialisation of a card. * - * In the case of a resume, "curcard" will contain the card + * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, @@ -494,13 +494,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, * Check if read-only switch is active. */ if (!oldcard) { - if (!host->ops->get_ro) { + if (!host->ops->get_ro || host->ops->get_ro(host) < 0) { printk(KERN_WARNING "%s: host does not " "support reading read-only " "switch. assuming write-enable.\n", mmc_hostname(host)); } else { - if (host->ops->get_ro(host)) + if (host->ops->get_ro(host) > 0) mmc_card_set_readonly(card); } } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index d5e51b1c7b3f..956bd7677502 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -129,6 +129,12 @@ static int cistpl_funce_func(struct sdio_func *func, /* TPLFE_MAX_BLK_SIZE */ func->max_blksize = buf[12] | (buf[13] << 8); + /* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */ + if (vsn > SDIO_SDIO_REV_1_00) + func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10; + else + func->enable_timeout = jiffies_to_msecs(HZ); + return 0; } diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 625b92ce9cef..f61fc2d4cd0a 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -1,7 +1,7 @@ /* * linux/drivers/mmc/core/sdio_io.c * - * Copyright 2007 Pierre Ossman + * Copyright 2007-2008 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -76,11 +76,7 @@ int sdio_enable_func(struct sdio_func *func) if (ret) goto err; - /* - * FIXME: This should timeout based on information in the CIS, - * but we don't have card to parse that yet. - */ - timeout = jiffies + HZ; + timeout = jiffies + msecs_to_jiffies(func->enable_timeout); while (1) { ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, ®); @@ -167,10 +163,8 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz) return -EINVAL; if (blksz == 0) { - blksz = min(min( - func->max_blksize, - func->card->host->max_blk_size), - 512u); + blksz = min(func->max_blksize, func->card->host->max_blk_size); + blksz = min(blksz, 512u); } ret = mmc_io_rw_direct(func->card, 1, 0, @@ -186,9 +180,116 @@ int sdio_set_block_size(struct sdio_func *func, unsigned blksz) func->cur_blksize = blksz; return 0; } - EXPORT_SYMBOL_GPL(sdio_set_block_size); +/* + * Calculate the maximum byte mode transfer size + */ +static inline unsigned int sdio_max_byte_size(struct sdio_func *func) +{ + unsigned mval = min(func->card->host->max_seg_size, + func->card->host->max_blk_size); + mval = min(mval, func->max_blksize); + return min(mval, 512u); /* maximum size for byte mode */ +} + +/** + * sdio_align_size - pads a transfer size to a more optimal value + * @func: SDIO function + * @sz: original transfer size + * + * Pads the original data size with a number of extra bytes in + * order to avoid controller bugs and/or performance hits + * (e.g. some controllers revert to PIO for certain sizes). + * + * If possible, it will also adjust the size so that it can be + * handled in just a single request. + * + * Returns the improved size, which might be unmodified. + */ +unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) +{ + unsigned int orig_sz; + unsigned int blk_sz, byte_sz; + unsigned chunk_sz; + + orig_sz = sz; + + /* + * Do a first check with the controller, in case it + * wants to increase the size up to a point where it + * might need more than one block. + */ + sz = mmc_align_data_size(func->card, sz); + + /* + * If we can still do this with just a byte transfer, then + * we're done. + */ + if (sz <= sdio_max_byte_size(func)) + return sz; + + if (func->card->cccr.multi_block) { + /* + * Check if the transfer is already block aligned + */ + if ((sz % func->cur_blksize) == 0) + return sz; + + /* + * Realign it so that it can be done with one request, + * and recheck if the controller still likes it. + */ + blk_sz = ((sz + func->cur_blksize - 1) / + func->cur_blksize) * func->cur_blksize; + blk_sz = mmc_align_data_size(func->card, blk_sz); + + /* + * This value is only good if it is still just + * one request. + */ + if ((blk_sz % func->cur_blksize) == 0) + return blk_sz; + + /* + * We failed to do one request, but at least try to + * pad the remainder properly. + */ + byte_sz = mmc_align_data_size(func->card, + sz % func->cur_blksize); + if (byte_sz <= sdio_max_byte_size(func)) { + blk_sz = sz / func->cur_blksize; + return blk_sz * func->cur_blksize + byte_sz; + } + } else { + /* + * We need multiple requests, so first check that the + * controller can handle the chunk size; + */ + chunk_sz = mmc_align_data_size(func->card, + sdio_max_byte_size(func)); + if (chunk_sz == sdio_max_byte_size(func)) { + /* + * Fix up the size of the remainder (if any) + */ + byte_sz = orig_sz % chunk_sz; + if (byte_sz) { + byte_sz = mmc_align_data_size(func->card, + byte_sz); + } + + return (orig_sz / chunk_sz) * chunk_sz + byte_sz; + } + } + + /* + * The controller is simply incapable of transferring the size + * we want in decent manner, so just return the original size. + */ + return orig_sz; +} +EXPORT_SYMBOL_GPL(sdio_align_size); + /* Split an arbitrarily sized data transfer into several * IO_RW_EXTENDED commands. */ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, @@ -199,14 +300,13 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, int ret; /* Do the bulk of the transfer using block mode (if supported). */ - if (func->card->cccr.multi_block) { + if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { /* Blocks per command is limited by host count, host transfer * size (we only use a single sg entry) and the maximum for * IO_RW_EXTENDED of 511 blocks. */ - max_blocks = min(min( - func->card->host->max_blk_count, - func->card->host->max_seg_size / func->cur_blksize), - 511u); + max_blocks = min(func->card->host->max_blk_count, + func->card->host->max_seg_size / func->cur_blksize); + max_blocks = min(max_blocks, 511u); while (remainder > func->cur_blksize) { unsigned blocks; @@ -231,11 +331,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, /* Write the remainder using byte mode. */ while (remainder > 0) { - size = remainder; - if (size > func->cur_blksize) - size = func->cur_blksize; - if (size > 512) - size = 512; /* maximum size for byte mode */ + size = min(remainder, sdio_max_byte_size(func)); ret = mmc_io_rw_extended(func->card, write, func->num, addr, incr_addr, buf, 1, size); @@ -260,11 +356,10 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, * function. If there is a problem reading the address, 0xff * is returned and @err_ret will contain the error code. */ -unsigned char sdio_readb(struct sdio_func *func, unsigned int addr, - int *err_ret) +u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; - unsigned char val; + u8 val; BUG_ON(!func); @@ -293,8 +388,7 @@ EXPORT_SYMBOL_GPL(sdio_readb); * function. @err_ret will contain the status of the actual * transfer. */ -void sdio_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, - int *err_ret) +void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret) { int ret; @@ -355,7 +449,6 @@ int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr, { return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count); } - EXPORT_SYMBOL_GPL(sdio_readsb); /** @@ -385,8 +478,7 @@ EXPORT_SYMBOL_GPL(sdio_writesb); * function. If there is a problem reading the address, 0xffff * is returned and @err_ret will contain the error code. */ -unsigned short sdio_readw(struct sdio_func *func, unsigned int addr, - int *err_ret) +u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; @@ -400,7 +492,7 @@ unsigned short sdio_readw(struct sdio_func *func, unsigned int addr, return 0xFFFF; } - return le16_to_cpu(*(u16*)func->tmpbuf); + return le16_to_cpup((__le16 *)func->tmpbuf); } EXPORT_SYMBOL_GPL(sdio_readw); @@ -415,12 +507,11 @@ EXPORT_SYMBOL_GPL(sdio_readw); * function. @err_ret will contain the status of the actual * transfer. */ -void sdio_writew(struct sdio_func *func, unsigned short b, unsigned int addr, - int *err_ret) +void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret) { int ret; - *(u16*)func->tmpbuf = cpu_to_le16(b); + *(__le16 *)func->tmpbuf = cpu_to_le16(b); ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2); if (err_ret) @@ -439,8 +530,7 @@ EXPORT_SYMBOL_GPL(sdio_writew); * 0xffffffff is returned and @err_ret will contain the error * code. */ -unsigned long sdio_readl(struct sdio_func *func, unsigned int addr, - int *err_ret) +u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret) { int ret; @@ -454,7 +544,7 @@ unsigned long sdio_readl(struct sdio_func *func, unsigned int addr, return 0xFFFFFFFF; } - return le32_to_cpu(*(u32*)func->tmpbuf); + return le32_to_cpup((__le32 *)func->tmpbuf); } EXPORT_SYMBOL_GPL(sdio_readl); @@ -469,12 +559,11 @@ EXPORT_SYMBOL_GPL(sdio_readl); * function. @err_ret will contain the status of the actual * transfer. */ -void sdio_writel(struct sdio_func *func, unsigned long b, unsigned int addr, - int *err_ret) +void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret) { int ret; - *(u32*)func->tmpbuf = cpu_to_le32(b); + *(__le32 *)func->tmpbuf = cpu_to_le32(b); ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4); if (err_ret) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index dead61754ad7..dc6f2579f85c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -26,18 +26,31 @@ config MMC_PXA config MMC_SDHCI tristate "Secure Digital Host Controller Interface support" - depends on PCI + depends on HAS_DMA help - This select the generic Secure Digital Host Controller Interface. + This selects the generic Secure Digital Host Controller Interface. It is used by manufacturers such as Texas Instruments(R), Ricoh(R) and Toshiba(R). Most controllers found in laptops are of this type. + + If you have a controller with this interface, say Y or M here. You + also need to enable an appropriate bus interface. + + If unsure, say N. + +config MMC_SDHCI_PCI + tristate "SDHCI support on PCI bus" + depends on MMC_SDHCI && PCI + help + This selects the PCI Secure Digital Host Controller Interface. + Most controllers found today are PCI devices. + If you have a controller with this interface, say Y or M here. If unsure, say N. config MMC_RICOH_MMC tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)" - depends on PCI && EXPERIMENTAL && MMC_SDHCI + depends on MMC_SDHCI_PCI help This selects the disabler for the Ricoh MMC Controller. This proprietary controller is unnecessary because the SDHCI driver @@ -91,6 +104,16 @@ config MMC_AT91 If unsure, say N. +config MMC_ATMELMCI + tristate "Atmel Multimedia Card Interface support" + depends on AVR32 + help + This selects the Atmel Multimedia Card Interface driver. If + you have an AT32 (AVR32) platform with a Multimedia Card + slot, say Y or M here. + + If unsure, say N. + config MMC_IMX tristate "Motorola i.MX Multimedia Card Interface support" depends on ARCH_IMX @@ -130,3 +153,24 @@ config MMC_SPI If unsure, or if your system has no SPI master driver, say N. +config MMC_S3C + tristate "Samsung S3C SD/MMC Card Interface support" + depends on ARCH_S3C2410 && MMC + help + This selects a driver for the MCI interface found in + Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. + If you have a board based on one of those and a MMC/SD + slot, say Y or M here. + + If unsure, say N. + +config MMC_SDRICOH_CS + tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" + depends on EXPERIMENTAL && MMC && PCI && PCMCIA + help + Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA + card whenever you insert a MMC or SD card into the card slot. + + To compile this driver as a module, choose M here: the + module will be called sdricoh_cs. + diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 3877c87e6da2..db52eebfb50e 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -10,11 +10,15 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o obj-$(CONFIG_MMC_PXA) += pxamci.o obj-$(CONFIG_MMC_IMX) += imxmmc.o obj-$(CONFIG_MMC_SDHCI) += sdhci.o +obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o obj-$(CONFIG_MMC_WBSD) += wbsd.o obj-$(CONFIG_MMC_AU1X) += au1xmmc.o obj-$(CONFIG_MMC_OMAP) += omap.o obj-$(CONFIG_MMC_AT91) += at91_mci.o +obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o obj-$(CONFIG_MMC_SPI) += mmc_spi.o +obj-$(CONFIG_MMC_S3C) += s3cmci.o +obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 8979ad330a4d..f15e2064305c 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c @@ -125,9 +125,72 @@ struct at91mci_host /* Latest in the scatterlist that has been enabled for transfer */ int transfer_index; + + /* Timer for timeouts */ + struct timer_list timer; }; /* + * Reset the controller and restore most of the state + */ +static void at91_reset_host(struct at91mci_host *host) +{ + unsigned long flags; + u32 mr; + u32 sdcr; + u32 dtor; + u32 imr; + + local_irq_save(flags); + imr = at91_mci_read(host, AT91_MCI_IMR); + + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); + + /* save current state */ + mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; + sdcr = at91_mci_read(host, AT91_MCI_SDCR); + dtor = at91_mci_read(host, AT91_MCI_DTOR); + + /* reset the controller */ + at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); + + /* restore state */ + at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); + at91_mci_write(host, AT91_MCI_MR, mr); + at91_mci_write(host, AT91_MCI_SDCR, sdcr); + at91_mci_write(host, AT91_MCI_DTOR, dtor); + at91_mci_write(host, AT91_MCI_IER, imr); + + /* make sure sdio interrupts will fire */ + at91_mci_read(host, AT91_MCI_SR); + + local_irq_restore(flags); +} + +static void at91_timeout_timer(unsigned long data) +{ + struct at91mci_host *host; + + host = (struct at91mci_host *)data; + + if (host->request) { + dev_err(host->mmc->parent, "Timeout waiting end of packet\n"); + + if (host->cmd && host->cmd->data) { + host->cmd->data->error = -ETIMEDOUT; + } else { + if (host->cmd) + host->cmd->error = -ETIMEDOUT; + else + host->request->cmd->error = -ETIMEDOUT; + } + + at91_reset_host(host); + mmc_request_done(host->mmc, host->request); + } +} + +/* * Copy from sg to a dma block - used for transfers */ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) @@ -135,9 +198,14 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data unsigned int len, i, size; unsigned *dmabuf = host->buffer; - size = host->total_length; + size = data->blksz * data->blocks; len = data->sg_len; + /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */ + if (cpu_is_at91sam9260() || cpu_is_at91sam9263()) + if (host->total_length == 12) + memset(dmabuf, 0, 12); + /* * Just loop through all entries. Size might not * be the entire list though so make sure that @@ -159,9 +227,10 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data for (index = 0; index < (amount / 4); index++) *dmabuf++ = swab32(sgbuffer[index]); - } - else + } else { memcpy(dmabuf, sgbuffer, amount); + dmabuf += amount; + } kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); @@ -233,11 +302,11 @@ static void at91_mci_pre_dma_read(struct at91mci_host *host) if (i == 0) { at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address); - at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4); + at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4); } else { at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address); - at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4); + at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4); } } @@ -277,8 +346,6 @@ static void at91_mci_post_dma_read(struct at91mci_host *host) dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE); - data->bytes_xfered += sg->length; - if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ unsigned int *buffer; int index; @@ -294,6 +361,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host) } flush_dcache_page(sg_page(sg)); + + data->bytes_xfered += sg->length; } /* Is there another transfer to trigger? */ @@ -334,10 +403,32 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host) at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); } else at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); +} + +/* + * Update bytes tranfered count during a write operation + */ +static void at91_mci_update_bytes_xfered(struct at91mci_host *host) +{ + struct mmc_data *data; + + /* always deal with the effective request (and not the current cmd) */ + + if (host->request->cmd && host->request->cmd->error != 0) + return; - data->bytes_xfered = host->total_length; + if (host->request->data) { + data = host->request->data; + if (data->flags & MMC_DATA_WRITE) { + /* card is in IDLE mode now */ + pr_debug("-> bytes_xfered %d, total_length = %d\n", + data->bytes_xfered, host->total_length); + data->bytes_xfered = data->blksz * data->blocks; + } + } } + /*Handle after command sent ready*/ static int at91_mci_handle_cmdrdy(struct at91mci_host *host) { @@ -350,8 +441,7 @@ static int at91_mci_handle_cmdrdy(struct at91mci_host *host) } else return 1; } else if (host->cmd->data->flags & MMC_DATA_WRITE) { /*After sendding multi-block-write command, start DMA transfer*/ - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE); - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); + at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE); at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); } @@ -430,11 +520,19 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command if (data) { - if ( data->blksz & 0x3 ) { - pr_debug("Unsupported block size\n"); - cmd->error = -EINVAL; - mmc_request_done(host->mmc, host->request); - return; + if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) { + if (data->blksz & 0x3) { + pr_debug("Unsupported block size\n"); + cmd->error = -EINVAL; + mmc_request_done(host->mmc, host->request); + return; + } + if (data->flags & MMC_DATA_STREAM) { + pr_debug("Stream commands not supported\n"); + cmd->error = -EINVAL; + mmc_request_done(host->mmc, host->request); + return; + } } block_length = data->blksz; @@ -481,8 +579,16 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command ier = AT91_MCI_CMDRDY; } else { /* zero block length and PDC mode */ - mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; - at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE); + mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff; + mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0; + mr |= (block_length << 16); + mr |= AT91_MCI_PDCMODE; + at91_mci_write(host, AT91_MCI_MR, mr); + + if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261())) + at91_mci_write(host, AT91_MCI_BLKR, + AT91_MCI_BLKR_BCNT(blocks) | + AT91_MCI_BLKR_BLKLEN(block_length)); /* * Disable the PDC controller @@ -508,6 +614,13 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command * Handle a write */ host->total_length = block_length * blocks; + /* + * AT91SAM926[0/3] Data Write Operation and + * number of bytes erratum + */ + if (cpu_is_at91sam9260 () || cpu_is_at91sam9263()) + if (host->total_length < 12) + host->total_length = 12; host->buffer = dma_alloc_coherent(NULL, host->total_length, &host->physical_address, GFP_KERNEL); @@ -517,7 +630,9 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command pr_debug("Transmitting %d bytes\n", host->total_length); at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); - at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4); + at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ? + host->total_length : host->total_length / 4); + ier = AT91_MCI_CMDRDY; } } @@ -552,20 +667,26 @@ static void at91_mci_process_next(struct at91mci_host *host) else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { host->flags |= FL_SENT_STOP; at91_mci_send_command(host, host->request->stop); - } - else + } else { + del_timer(&host->timer); + /* the at91rm9200 mci controller hangs after some transfers, + * and the workaround is to reset it after each transfer. + */ + if (cpu_is_at91rm9200()) + at91_reset_host(host); mmc_request_done(host->mmc, host->request); + } } /* * Handle a command that has been completed */ -static void at91_mci_completed_command(struct at91mci_host *host) +static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status) { struct mmc_command *cmd = host->cmd; - unsigned int status; + struct mmc_data *data = cmd->data; - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); @@ -577,25 +698,34 @@ static void at91_mci_completed_command(struct at91mci_host *host) host->buffer = NULL; } - status = at91_mci_read(host, AT91_MCI_SR); - - pr_debug("Status = %08X [%08X %08X %08X %08X]\n", - status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); + pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n", + status, at91_mci_read(host, AT91_MCI_SR), + cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); if (status & AT91_MCI_ERRORS) { if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { cmd->error = 0; } else { - if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE)) - cmd->error = -ETIMEDOUT; - else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE)) - cmd->error = -EILSEQ; - else - cmd->error = -EIO; + if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) { + if (data) { + if (status & AT91_MCI_DTOE) + data->error = -ETIMEDOUT; + else if (status & AT91_MCI_DCRCE) + data->error = -EILSEQ; + } + } else { + if (status & AT91_MCI_RTOE) + cmd->error = -ETIMEDOUT; + else if (status & AT91_MCI_RCRCE) + cmd->error = -EILSEQ; + else + cmd->error = -EIO; + } - pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n", - cmd->error, cmd->opcode, cmd->retries); + pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n", + cmd->error, data ? data->error : 0, + cmd->opcode, cmd->retries); } } else @@ -613,6 +743,8 @@ static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->request = mrq; host->flags = 0; + mod_timer(&host->timer, jiffies + HZ); + at91_mci_process_next(host); } @@ -736,6 +868,7 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) if (int_status & AT91_MCI_NOTBUSY) { pr_debug("Card is ready\n"); + at91_mci_update_bytes_xfered(host); completed = 1; } @@ -744,9 +877,21 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) if (int_status & AT91_MCI_BLKE) { pr_debug("Block transfer has ended\n"); - completed = 1; + if (host->request->data && host->request->data->blocks > 1) { + /* multi block write : complete multi write + * command and send stop */ + completed = 1; + } else { + at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); + } } + if (int_status & AT91_MCI_SDIOIRQA) + mmc_signal_sdio_irq(host->mmc); + + if (int_status & AT91_MCI_SDIOIRQB) + mmc_signal_sdio_irq(host->mmc); + if (int_status & AT91_MCI_TXRDY) pr_debug("Ready to transmit\n"); @@ -761,10 +906,10 @@ static irqreturn_t at91_mci_irq(int irq, void *devid) if (completed) { pr_debug("Completed command\n"); - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); - at91_mci_completed_command(host); + at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); + at91_mci_completed_command(host, int_status); } else - at91_mci_write(host, AT91_MCI_IDR, int_status); + at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); return IRQ_HANDLED; } @@ -793,25 +938,33 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host) static int at91_mci_get_ro(struct mmc_host *mmc) { - int read_only = 0; struct at91mci_host *host = mmc_priv(mmc); - if (host->board->wp_pin) { - read_only = gpio_get_value(host->board->wp_pin); - printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc), - (read_only ? "read-only" : "read-write") ); - } - else { - printk(KERN_WARNING "%s: host does not support reading read-only " - "switch. Assuming write-enable.\n", mmc_hostname(mmc)); - } - return read_only; + if (host->board->wp_pin) + return !!gpio_get_value(host->board->wp_pin); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; +} + +static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct at91mci_host *host = mmc_priv(mmc); + + pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc), + host->board->slot_b ? 'B':'A', enable ? "enable" : "disable"); + at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR, + host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA); + } static const struct mmc_host_ops at91_mci_ops = { .request = at91_mci_request, .set_ios = at91_mci_set_ios, .get_ro = at91_mci_get_ro, + .enable_sdio_irq = at91_mci_enable_sdio_irq, }; /* @@ -842,6 +995,7 @@ static int __init at91_mci_probe(struct platform_device *pdev) mmc->f_min = 375000; mmc->f_max = 25000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_SDIO_IRQ; mmc->max_blk_size = 4095; mmc->max_blk_count = mmc->max_req_size; @@ -935,6 +1089,8 @@ static int __init at91_mci_probe(struct platform_device *pdev) mmc_add_host(mmc); + setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); + /* * monitor card insertion/removal if we can */ @@ -995,6 +1151,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev) } at91_mci_disable(host); + del_timer_sync(&host->timer); mmc_remove_host(mmc); free_irq(host->irq, host); diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h new file mode 100644 index 000000000000..a9a5657706c6 --- /dev/null +++ b/drivers/mmc/host/atmel-mci-regs.h @@ -0,0 +1,91 @@ +/* + * Atmel MultiMedia Card Interface driver + * + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __DRIVERS_MMC_ATMEL_MCI_H__ +#define __DRIVERS_MMC_ATMEL_MCI_H__ + +/* MCI Register Definitions */ +#define MCI_CR 0x0000 /* Control */ +# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ +# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ +# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */ +#define MCI_MR 0x0004 /* Mode */ +# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ +# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ +# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ +#define MCI_DTOR 0x0008 /* Data Timeout */ +# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ +# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ +#define MCI_SDCR 0x000c /* SD Card / SDIO */ +# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ +# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ +# define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */ +# define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */ +#define MCI_ARGR 0x0010 /* Command Argument */ +#define MCI_CMDR 0x0014 /* Command */ +# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ +# define MCI_CMDR_RSPTYP_NONE ( 0 << 6) /* No response */ +# define MCI_CMDR_RSPTYP_48BIT ( 1 << 6) /* 48-bit response */ +# define MCI_CMDR_RSPTYP_136BIT ( 2 << 6) /* 136-bit response */ +# define MCI_CMDR_SPCMD_INIT ( 1 << 8) /* Initialization command */ +# define MCI_CMDR_SPCMD_SYNC ( 2 << 8) /* Synchronized command */ +# define MCI_CMDR_SPCMD_INT ( 4 << 8) /* Interrupt command */ +# define MCI_CMDR_SPCMD_INTRESP ( 5 << 8) /* Interrupt response */ +# define MCI_CMDR_OPDCMD ( 1 << 11) /* Open Drain */ +# define MCI_CMDR_MAXLAT_5CYC ( 0 << 12) /* Max latency 5 cycles */ +# define MCI_CMDR_MAXLAT_64CYC ( 1 << 12) /* Max latency 64 cycles */ +# define MCI_CMDR_START_XFER ( 1 << 16) /* Start data transfer */ +# define MCI_CMDR_STOP_XFER ( 2 << 16) /* Stop data transfer */ +# define MCI_CMDR_TRDIR_WRITE ( 0 << 18) /* Write data */ +# define MCI_CMDR_TRDIR_READ ( 1 << 18) /* Read data */ +# define MCI_CMDR_BLOCK ( 0 << 19) /* Single-block transfer */ +# define MCI_CMDR_MULTI_BLOCK ( 1 << 19) /* Multi-block transfer */ +# define MCI_CMDR_STREAM ( 2 << 19) /* MMC Stream transfer */ +# define MCI_CMDR_SDIO_BYTE ( 4 << 19) /* SDIO Byte transfer */ +# define MCI_CMDR_SDIO_BLOCK ( 5 << 19) /* SDIO Block transfer */ +# define MCI_CMDR_SDIO_SUSPEND ( 1 << 24) /* SDIO Suspend Command */ +# define MCI_CMDR_SDIO_RESUME ( 2 << 24) /* SDIO Resume Command */ +#define MCI_BLKR 0x0018 /* Block */ +# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */ +# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ +#define MCI_RSPR 0x0020 /* Response 0 */ +#define MCI_RSPR1 0x0024 /* Response 1 */ +#define MCI_RSPR2 0x0028 /* Response 2 */ +#define MCI_RSPR3 0x002c /* Response 3 */ +#define MCI_RDR 0x0030 /* Receive Data */ +#define MCI_TDR 0x0034 /* Transmit Data */ +#define MCI_SR 0x0040 /* Status */ +#define MCI_IER 0x0044 /* Interrupt Enable */ +#define MCI_IDR 0x0048 /* Interrupt Disable */ +#define MCI_IMR 0x004c /* Interrupt Mask */ +# define MCI_CMDRDY ( 1 << 0) /* Command Ready */ +# define MCI_RXRDY ( 1 << 1) /* Receiver Ready */ +# define MCI_TXRDY ( 1 << 2) /* Transmitter Ready */ +# define MCI_BLKE ( 1 << 3) /* Data Block Ended */ +# define MCI_DTIP ( 1 << 4) /* Data Transfer In Progress */ +# define MCI_NOTBUSY ( 1 << 5) /* Data Not Busy */ +# define MCI_SDIOIRQA ( 1 << 8) /* SDIO IRQ in slot A */ +# define MCI_SDIOIRQB ( 1 << 9) /* SDIO IRQ in slot B */ +# define MCI_RINDE ( 1 << 16) /* Response Index Error */ +# define MCI_RDIRE ( 1 << 17) /* Response Direction Error */ +# define MCI_RCRCE ( 1 << 18) /* Response CRC Error */ +# define MCI_RENDE ( 1 << 19) /* Response End Bit Error */ +# define MCI_RTOE ( 1 << 20) /* Response Time-Out Error */ +# define MCI_DCRCE ( 1 << 21) /* Data CRC Error */ +# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */ +# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */ +# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */ + +/* Register access macros */ +#define mci_readl(port,reg) \ + __raw_readl((port)->regs + MCI_##reg) +#define mci_writel(port,reg,value) \ + __raw_writel((value), (port)->regs + MCI_##reg) + +#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c new file mode 100644 index 000000000000..cce873c5a149 --- /dev/null +++ b/drivers/mmc/host/atmel-mci.c @@ -0,0 +1,981 @@ +/* + * Atmel MultiMedia Card Interface driver + * + * Copyright (C) 2004-2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/blkdev.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> + +#include <linux/mmc/host.h> + +#include <asm/atmel-mci.h> +#include <asm/io.h> +#include <asm/unaligned.h> + +#include <asm/arch/board.h> +#include <asm/arch/gpio.h> + +#include "atmel-mci-regs.h" + +#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) + +enum { + EVENT_CMD_COMPLETE = 0, + EVENT_DATA_ERROR, + EVENT_DATA_COMPLETE, + EVENT_STOP_SENT, + EVENT_STOP_COMPLETE, + EVENT_XFER_COMPLETE, +}; + +struct atmel_mci { + struct mmc_host *mmc; + void __iomem *regs; + + struct scatterlist *sg; + unsigned int pio_offset; + + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + + u32 cmd_status; + u32 data_status; + u32 stop_status; + u32 stop_cmdr; + + u32 mode_reg; + u32 sdc_reg; + + struct tasklet_struct tasklet; + unsigned long pending_events; + unsigned long completed_events; + + int present; + int detect_pin; + int wp_pin; + + /* For detect pin debouncing */ + struct timer_list detect_timer; + + unsigned long bus_hz; + unsigned long mapbase; + struct clk *mck; + struct platform_device *pdev; +}; + +#define atmci_is_completed(host, event) \ + test_bit(event, &host->completed_events) +#define atmci_test_and_clear_pending(host, event) \ + test_and_clear_bit(event, &host->pending_events) +#define atmci_test_and_set_completed(host, event) \ + test_and_set_bit(event, &host->completed_events) +#define atmci_set_completed(host, event) \ + set_bit(event, &host->completed_events) +#define atmci_set_pending(host, event) \ + set_bit(event, &host->pending_events) +#define atmci_clear_pending(host, event) \ + clear_bit(event, &host->pending_events) + + +static void atmci_enable(struct atmel_mci *host) +{ + clk_enable(host->mck); + mci_writel(host, CR, MCI_CR_MCIEN); + mci_writel(host, MR, host->mode_reg); + mci_writel(host, SDCR, host->sdc_reg); +} + +static void atmci_disable(struct atmel_mci *host) +{ + mci_writel(host, CR, MCI_CR_SWRST); + + /* Stall until write is complete, then disable the bus clock */ + mci_readl(host, SR); + clk_disable(host->mck); +} + +static inline unsigned int ns_to_clocks(struct atmel_mci *host, + unsigned int ns) +{ + return (ns * (host->bus_hz / 1000000) + 999) / 1000; +} + +static void atmci_set_timeout(struct atmel_mci *host, + struct mmc_data *data) +{ + static unsigned dtomul_to_shift[] = { + 0, 4, 7, 8, 10, 12, 16, 20 + }; + unsigned timeout; + unsigned dtocyc; + unsigned dtomul; + + timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; + + for (dtomul = 0; dtomul < 8; dtomul++) { + unsigned shift = dtomul_to_shift[dtomul]; + dtocyc = (timeout + (1 << shift) - 1) >> shift; + if (dtocyc < 15) + break; + } + + if (dtomul >= 8) { + dtomul = 7; + dtocyc = 15; + } + + dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n", + dtocyc << dtomul_to_shift[dtomul]); + mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); +} + +/* + * Return mask with command flags to be enabled for this command. + */ +static u32 atmci_prepare_command(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct mmc_data *data; + u32 cmdr; + + cmd->error = -EINPROGRESS; + + cmdr = MCI_CMDR_CMDNB(cmd->opcode); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) + cmdr |= MCI_CMDR_RSPTYP_136BIT; + else + cmdr |= MCI_CMDR_RSPTYP_48BIT; + } + + /* + * This should really be MAXLAT_5 for CMD2 and ACMD41, but + * it's too difficult to determine whether this is an ACMD or + * not. Better make it 64. + */ + cmdr |= MCI_CMDR_MAXLAT_64CYC; + + if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) + cmdr |= MCI_CMDR_OPDCMD; + + data = cmd->data; + if (data) { + cmdr |= MCI_CMDR_START_XFER; + if (data->flags & MMC_DATA_STREAM) + cmdr |= MCI_CMDR_STREAM; + else if (data->blocks > 1) + cmdr |= MCI_CMDR_MULTI_BLOCK; + else + cmdr |= MCI_CMDR_BLOCK; + + if (data->flags & MMC_DATA_READ) + cmdr |= MCI_CMDR_TRDIR_READ; + } + + return cmdr; +} + +static void atmci_start_command(struct atmel_mci *host, + struct mmc_command *cmd, + u32 cmd_flags) +{ + /* Must read host->cmd after testing event flags */ + smp_rmb(); + WARN_ON(host->cmd); + host->cmd = cmd; + + dev_vdbg(&host->mmc->class_dev, + "start command: ARGR=0x%08x CMDR=0x%08x\n", + cmd->arg, cmd_flags); + + mci_writel(host, ARGR, cmd->arg); + mci_writel(host, CMDR, cmd_flags); +} + +static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data) +{ + struct atmel_mci *host = mmc_priv(mmc); + + atmci_start_command(host, data->stop, host->stop_cmdr); + mci_writel(host, IER, MCI_CMDRDY); +} + +static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct atmel_mci *host = mmc_priv(mmc); + + WARN_ON(host->cmd || host->data); + host->mrq = NULL; + + atmci_disable(host); + + mmc_request_done(mmc, mrq); +} + +/* + * Returns a mask of interrupt flags to be enabled after the whole + * request has been prepared. + */ +static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) +{ + struct atmel_mci *host = mmc_priv(mmc); + u32 iflags; + + data->error = -EINPROGRESS; + + WARN_ON(host->data); + host->sg = NULL; + host->data = data; + + mci_writel(host, BLKR, MCI_BCNT(data->blocks) + | MCI_BLKLEN(data->blksz)); + dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", + MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); + + iflags = ATMCI_DATA_ERROR_FLAGS; + host->sg = data->sg; + host->pio_offset = 0; + if (data->flags & MMC_DATA_READ) + iflags |= MCI_RXRDY; + else + iflags |= MCI_TXRDY; + + return iflags; +} + +static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct atmel_mci *host = mmc_priv(mmc); + struct mmc_data *data; + struct mmc_command *cmd; + u32 iflags; + u32 cmdflags = 0; + + iflags = mci_readl(host, IMR); + if (iflags) + dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n", + mci_readl(host, IMR)); + + WARN_ON(host->mrq != NULL); + + /* + * We may "know" the card is gone even though there's still an + * electrical connection. If so, we really need to communicate + * this to the MMC core since there won't be any more + * interrupts as the card is completely removed. Otherwise, + * the MMC core might believe the card is still there even + * though the card was just removed very slowly. + */ + if (!host->present) { + mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + return; + } + + host->mrq = mrq; + host->pending_events = 0; + host->completed_events = 0; + + atmci_enable(host); + + /* We don't support multiple blocks of weird lengths. */ + data = mrq->data; + if (data) { + if (data->blocks > 1 && data->blksz & 3) + goto fail; + atmci_set_timeout(host, data); + } + + iflags = MCI_CMDRDY; + cmd = mrq->cmd; + cmdflags = atmci_prepare_command(mmc, cmd); + atmci_start_command(host, cmd, cmdflags); + + if (data) + iflags |= atmci_submit_data(mmc, data); + + if (mrq->stop) { + host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop); + host->stop_cmdr |= MCI_CMDR_STOP_XFER; + if (!(data->flags & MMC_DATA_WRITE)) + host->stop_cmdr |= MCI_CMDR_TRDIR_READ; + if (data->flags & MMC_DATA_STREAM) + host->stop_cmdr |= MCI_CMDR_STREAM; + else + host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; + } + + /* + * We could have enabled interrupts earlier, but I suspect + * that would open up a nice can of interesting race + * conditions (e.g. command and data complete, but stop not + * prepared yet.) + */ + mci_writel(host, IER, iflags); + + return; + +fail: + atmci_disable(host); + host->mrq = NULL; + mrq->cmd->error = -EINVAL; + mmc_request_done(mmc, mrq); +} + +static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct atmel_mci *host = mmc_priv(mmc); + + if (ios->clock) { + u32 clkdiv; + + /* Set clock rate */ + clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1; + if (clkdiv > 255) { + dev_warn(&mmc->class_dev, + "clock %u too slow; using %lu\n", + ios->clock, host->bus_hz / (2 * 256)); + clkdiv = 255; + } + + host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF + | MCI_MR_RDPROOF; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + host->sdc_reg = 0; + break; + case MMC_BUS_WIDTH_4: + host->sdc_reg = MCI_SDCBUS_4BIT; + break; + } + + switch (ios->power_mode) { + case MMC_POWER_ON: + /* Send init sequence (74 clock cycles) */ + atmci_enable(host); + mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); + while (!(mci_readl(host, SR) & MCI_CMDRDY)) + cpu_relax(); + atmci_disable(host); + break; + default: + /* + * TODO: None of the currently available AVR32-based + * boards allow MMC power to be turned off. Implement + * power control when this can be tested properly. + */ + break; + } +} + +static int atmci_get_ro(struct mmc_host *mmc) +{ + int read_only = 0; + struct atmel_mci *host = mmc_priv(mmc); + + if (host->wp_pin >= 0) { + read_only = gpio_get_value(host->wp_pin); + dev_dbg(&mmc->class_dev, "card is %s\n", + read_only ? "read-only" : "read-write"); + } else { + dev_dbg(&mmc->class_dev, + "no pin for checking read-only switch." + " Assuming write-enable.\n"); + } + + return read_only; +} + +static struct mmc_host_ops atmci_ops = { + .request = atmci_request, + .set_ios = atmci_set_ios, + .get_ro = atmci_get_ro, +}; + +static void atmci_command_complete(struct atmel_mci *host, + struct mmc_command *cmd, u32 status) +{ + /* Read the response from the card (up to 16 bytes) */ + cmd->resp[0] = mci_readl(host, RSPR); + cmd->resp[1] = mci_readl(host, RSPR); + cmd->resp[2] = mci_readl(host, RSPR); + cmd->resp[3] = mci_readl(host, RSPR); + + if (status & MCI_RTOE) + cmd->error = -ETIMEDOUT; + else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) + cmd->error = -EILSEQ; + else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) + cmd->error = -EIO; + else + cmd->error = 0; + + if (cmd->error) { + dev_dbg(&host->mmc->class_dev, + "command error: status=0x%08x\n", status); + + if (cmd->data) { + host->data = NULL; + mci_writel(host, IDR, MCI_NOTBUSY + | MCI_TXRDY | MCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + } + } +} + +static void atmci_detect_change(unsigned long data) +{ + struct atmel_mci *host = (struct atmel_mci *)data; + struct mmc_request *mrq = host->mrq; + int present; + + /* + * atmci_remove() sets detect_pin to -1 before freeing the + * interrupt. We must not re-enable the interrupt if it has + * been freed. + */ + smp_rmb(); + if (host->detect_pin < 0) + return; + + enable_irq(gpio_to_irq(host->detect_pin)); + present = !gpio_get_value(host->detect_pin); + + dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n", + present, host->present); + + if (present != host->present) { + dev_dbg(&host->mmc->class_dev, "card %s\n", + present ? "inserted" : "removed"); + host->present = present; + + /* Reset controller if card is gone */ + if (!present) { + mci_writel(host, CR, MCI_CR_SWRST); + mci_writel(host, IDR, ~0UL); + mci_writel(host, CR, MCI_CR_MCIEN); + } + + /* Clean up queue if present */ + if (mrq) { + /* + * Reset controller to terminate any ongoing + * commands or data transfers. + */ + mci_writel(host, CR, MCI_CR_SWRST); + + if (!atmci_is_completed(host, EVENT_CMD_COMPLETE)) + mrq->cmd->error = -ENOMEDIUM; + + if (mrq->data && !atmci_is_completed(host, + EVENT_DATA_COMPLETE)) { + host->data = NULL; + mrq->data->error = -ENOMEDIUM; + } + if (mrq->stop && !atmci_is_completed(host, + EVENT_STOP_COMPLETE)) + mrq->stop->error = -ENOMEDIUM; + + host->cmd = NULL; + atmci_request_end(host->mmc, mrq); + } + + mmc_detect_change(host->mmc, 0); + } +} + +static void atmci_tasklet_func(unsigned long priv) +{ + struct mmc_host *mmc = (struct mmc_host *)priv; + struct atmel_mci *host = mmc_priv(mmc); + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = host->data; + + dev_vdbg(&mmc->class_dev, + "tasklet: pending/completed/mask %lx/%lx/%x\n", + host->pending_events, host->completed_events, + mci_readl(host, IMR)); + + if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) { + /* + * host->cmd must be set to NULL before the interrupt + * handler sees EVENT_CMD_COMPLETE + */ + host->cmd = NULL; + smp_wmb(); + atmci_set_completed(host, EVENT_CMD_COMPLETE); + atmci_command_complete(host, mrq->cmd, host->cmd_status); + + if (!mrq->cmd->error && mrq->stop + && atmci_is_completed(host, EVENT_XFER_COMPLETE) + && !atmci_test_and_set_completed(host, + EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, mrq->data); + } + if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) { + /* + * host->cmd must be set to NULL before the interrupt + * handler sees EVENT_STOP_COMPLETE + */ + host->cmd = NULL; + smp_wmb(); + atmci_set_completed(host, EVENT_STOP_COMPLETE); + atmci_command_complete(host, mrq->stop, host->stop_status); + } + if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) { + u32 status = host->data_status; + + dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status); + + atmci_set_completed(host, EVENT_DATA_ERROR); + atmci_set_completed(host, EVENT_DATA_COMPLETE); + + if (status & MCI_DTOE) { + dev_dbg(&mmc->class_dev, + "data timeout error\n"); + data->error = -ETIMEDOUT; + } else if (status & MCI_DCRCE) { + dev_dbg(&mmc->class_dev, "data CRC error\n"); + data->error = -EILSEQ; + } else { + dev_dbg(&mmc->class_dev, + "data FIFO error (status=%08x)\n", + status); + data->error = -EIO; + } + + if (host->present && data->stop + && atmci_is_completed(host, EVENT_CMD_COMPLETE) + && !atmci_test_and_set_completed( + host, EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, data); + + host->data = NULL; + } + if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) { + atmci_set_completed(host, EVENT_DATA_COMPLETE); + + if (!atmci_is_completed(host, EVENT_DATA_ERROR)) { + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; + } + + host->data = NULL; + } + + if (host->mrq && !host->cmd && !host->data) + atmci_request_end(mmc, host->mrq); +} + +static void atmci_read_data_pio(struct atmel_mci *host) +{ + struct scatterlist *sg = host->sg; + void *buf = sg_virt(sg); + unsigned int offset = host->pio_offset; + struct mmc_data *data = host->data; + u32 value; + u32 status; + unsigned int nbytes = 0; + + do { + value = mci_readl(host, RDR); + if (likely(offset + 4 <= sg->length)) { + put_unaligned(value, (u32 *)(buf + offset)); + + offset += 4; + nbytes += 4; + + if (offset == sg->length) { + host->sg = sg = sg_next(sg); + if (!sg) + goto done; + + offset = 0; + buf = sg_virt(sg); + } + } else { + unsigned int remaining = sg->length - offset; + memcpy(buf + offset, &value, remaining); + nbytes += remaining; + + flush_dcache_page(sg_page(sg)); + host->sg = sg = sg_next(sg); + if (!sg) + goto done; + + offset = 4 - remaining; + buf = sg_virt(sg); + memcpy(buf, (u8 *)&value + remaining, offset); + nbytes += offset; + } + + status = mci_readl(host, SR); + if (status & ATMCI_DATA_ERROR_FLAGS) { + mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS)); + host->data_status = status; + atmci_set_pending(host, EVENT_DATA_ERROR); + tasklet_schedule(&host->tasklet); + break; + } + } while (status & MCI_RXRDY); + + host->pio_offset = offset; + data->bytes_xfered += nbytes; + + return; + +done: + mci_writel(host, IDR, MCI_RXRDY); + mci_writel(host, IER, MCI_NOTBUSY); + data->bytes_xfered += nbytes; + atmci_set_completed(host, EVENT_XFER_COMPLETE); + if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) + && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, data); +} + +static void atmci_write_data_pio(struct atmel_mci *host) +{ + struct scatterlist *sg = host->sg; + void *buf = sg_virt(sg); + unsigned int offset = host->pio_offset; + struct mmc_data *data = host->data; + u32 value; + u32 status; + unsigned int nbytes = 0; + + do { + if (likely(offset + 4 <= sg->length)) { + value = get_unaligned((u32 *)(buf + offset)); + mci_writel(host, TDR, value); + + offset += 4; + nbytes += 4; + if (offset == sg->length) { + host->sg = sg = sg_next(sg); + if (!sg) + goto done; + + offset = 0; + buf = sg_virt(sg); + } + } else { + unsigned int remaining = sg->length - offset; + + value = 0; + memcpy(&value, buf + offset, remaining); + nbytes += remaining; + + host->sg = sg = sg_next(sg); + if (!sg) { + mci_writel(host, TDR, value); + goto done; + } + + offset = 4 - remaining; + buf = sg_virt(sg); + memcpy((u8 *)&value + remaining, buf, offset); + mci_writel(host, TDR, value); + nbytes += offset; + } + + status = mci_readl(host, SR); + if (status & ATMCI_DATA_ERROR_FLAGS) { + mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY + | ATMCI_DATA_ERROR_FLAGS)); + host->data_status = status; + atmci_set_pending(host, EVENT_DATA_ERROR); + tasklet_schedule(&host->tasklet); + break; + } + } while (status & MCI_TXRDY); + + host->pio_offset = offset; + data->bytes_xfered += nbytes; + + return; + +done: + mci_writel(host, IDR, MCI_TXRDY); + mci_writel(host, IER, MCI_NOTBUSY); + data->bytes_xfered += nbytes; + atmci_set_completed(host, EVENT_XFER_COMPLETE); + if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) + && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) + send_stop_cmd(host->mmc, data); +} + +static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status) +{ + struct atmel_mci *host = mmc_priv(mmc); + + mci_writel(host, IDR, MCI_CMDRDY); + + if (atmci_is_completed(host, EVENT_STOP_SENT)) { + host->stop_status = status; + atmci_set_pending(host, EVENT_STOP_COMPLETE); + } else { + host->cmd_status = status; + atmci_set_pending(host, EVENT_CMD_COMPLETE); + } + + tasklet_schedule(&host->tasklet); +} + +static irqreturn_t atmci_interrupt(int irq, void *dev_id) +{ + struct mmc_host *mmc = dev_id; + struct atmel_mci *host = mmc_priv(mmc); + u32 status, mask, pending; + unsigned int pass_count = 0; + + spin_lock(&mmc->lock); + + do { + status = mci_readl(host, SR); + mask = mci_readl(host, IMR); + pending = status & mask; + if (!pending) + break; + + if (pending & ATMCI_DATA_ERROR_FLAGS) { + mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS + | MCI_RXRDY | MCI_TXRDY); + pending &= mci_readl(host, IMR); + host->data_status = status; + atmci_set_pending(host, EVENT_DATA_ERROR); + tasklet_schedule(&host->tasklet); + } + if (pending & MCI_NOTBUSY) { + mci_writel(host, IDR, (MCI_NOTBUSY + | ATMCI_DATA_ERROR_FLAGS)); + atmci_set_pending(host, EVENT_DATA_COMPLETE); + tasklet_schedule(&host->tasklet); + } + if (pending & MCI_RXRDY) + atmci_read_data_pio(host); + if (pending & MCI_TXRDY) + atmci_write_data_pio(host); + + if (pending & MCI_CMDRDY) + atmci_cmd_interrupt(mmc, status); + } while (pass_count++ < 5); + + spin_unlock(&mmc->lock); + + return pass_count ? IRQ_HANDLED : IRQ_NONE; +} + +static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) +{ + struct mmc_host *mmc = dev_id; + struct atmel_mci *host = mmc_priv(mmc); + + /* + * Disable interrupts until the pin has stabilized and check + * the state then. Use mod_timer() since we may be in the + * middle of the timer routine when this interrupt triggers. + */ + disable_irq_nosync(irq); + mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20)); + + return IRQ_HANDLED; +} + +static int __init atmci_probe(struct platform_device *pdev) +{ + struct mci_platform_data *pdata; + struct atmel_mci *host; + struct mmc_host *mmc; + struct resource *regs; + int irq; + int ret; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + pdata = pdev->dev.platform_data; + if (!pdata) + return -ENXIO; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->pdev = pdev; + host->mmc = mmc; + host->detect_pin = pdata->detect_pin; + host->wp_pin = pdata->wp_pin; + + host->mck = clk_get(&pdev->dev, "mci_clk"); + if (IS_ERR(host->mck)) { + ret = PTR_ERR(host->mck); + goto err_clk_get; + } + + ret = -ENOMEM; + host->regs = ioremap(regs->start, regs->end - regs->start + 1); + if (!host->regs) + goto err_ioremap; + + clk_enable(host->mck); + mci_writel(host, CR, MCI_CR_SWRST); + host->bus_hz = clk_get_rate(host->mck); + clk_disable(host->mck); + + host->mapbase = regs->start; + + mmc->ops = &atmci_ops; + mmc->f_min = (host->bus_hz + 511) / 512; + mmc->f_max = host->bus_hz / 2; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps |= MMC_CAP_4_BIT_DATA; + + mmc->max_hw_segs = 64; + mmc->max_phys_segs = 64; + mmc->max_req_size = 32768 * 512; + mmc->max_blk_size = 32768; + mmc->max_blk_count = 512; + + tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc); + + ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc); + if (ret) + goto err_request_irq; + + /* Assume card is present if we don't have a detect pin */ + host->present = 1; + if (host->detect_pin >= 0) { + if (gpio_request(host->detect_pin, "mmc_detect")) { + dev_dbg(&mmc->class_dev, "no detect pin available\n"); + host->detect_pin = -1; + } else { + host->present = !gpio_get_value(host->detect_pin); + } + } + if (host->wp_pin >= 0) { + if (gpio_request(host->wp_pin, "mmc_wp")) { + dev_dbg(&mmc->class_dev, "no WP pin available\n"); + host->wp_pin = -1; + } + } + + platform_set_drvdata(pdev, host); + + mmc_add_host(mmc); + + if (host->detect_pin >= 0) { + setup_timer(&host->detect_timer, atmci_detect_change, + (unsigned long)host); + + ret = request_irq(gpio_to_irq(host->detect_pin), + atmci_detect_interrupt, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "mmc-detect", mmc); + if (ret) { + dev_dbg(&mmc->class_dev, + "could not request IRQ %d for detect pin\n", + gpio_to_irq(host->detect_pin)); + gpio_free(host->detect_pin); + host->detect_pin = -1; + } + } + + dev_info(&mmc->class_dev, + "Atmel MCI controller at 0x%08lx irq %d\n", + host->mapbase, irq); + + return 0; + +err_request_irq: + iounmap(host->regs); +err_ioremap: + clk_put(host->mck); +err_clk_get: + mmc_free_host(mmc); + return ret; +} + +static int __exit atmci_remove(struct platform_device *pdev) +{ + struct atmel_mci *host = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + if (host) { + if (host->detect_pin >= 0) { + int pin = host->detect_pin; + + /* Make sure the timer doesn't enable the interrupt */ + host->detect_pin = -1; + smp_wmb(); + + free_irq(gpio_to_irq(pin), host->mmc); + del_timer_sync(&host->detect_timer); + gpio_free(pin); + } + + mmc_remove_host(host->mmc); + + clk_enable(host->mck); + mci_writel(host, IDR, ~0UL); + mci_writel(host, CR, MCI_CR_MCIDIS); + mci_readl(host, SR); + clk_disable(host->mck); + + if (host->wp_pin >= 0) + gpio_free(host->wp_pin); + + free_irq(platform_get_irq(pdev, 0), host->mmc); + iounmap(host->regs); + + clk_put(host->mck); + + mmc_free_host(host->mmc); + } + return 0; +} + +static struct platform_driver atmci_driver = { + .remove = __exit_p(atmci_remove), + .driver = { + .name = "atmel_mci", + }, +}; + +static int __init atmci_init(void) +{ + return platform_driver_probe(&atmci_driver, atmci_probe); +} + +static void __exit atmci_exit(void) +{ + platform_driver_unregister(&atmci_driver); +} + +module_init(atmci_init); +module_exit(atmci_exit); + +MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); +MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index cc5f7bc546af..3f15eb204895 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -21,7 +21,7 @@ * published by the Free Software Foundation. */ -/* Why is a timer used to detect insert events? +/* Why don't we use the SD controllers' carddetect feature? * * From the AU1100 MMC application guide: * If the Au1100-based design is intended to support both MultiMediaCards @@ -30,8 +30,6 @@ * In doing so, a MMC card never enters SPI-mode communications, * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective * (the low to high transition will not occur). - * - * So we use the timer to check the status manually. */ #include <linux/module.h> @@ -41,51 +39,110 @@ #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> - +#include <linux/leds.h> #include <linux/mmc/host.h> + #include <asm/io.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1100_mmc.h> -#include <au1xxx.h> -#include "au1xmmc.h" - #define DRIVER_NAME "au1xxx-mmc" /* Set this to enable special debugging macros */ +/* #define DEBUG */ #ifdef DEBUG -#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args) +#define DBG(fmt, idx, args...) \ + printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args) #else -#define DBG(fmt, idx, args...) +#define DBG(fmt, idx, args...) do {} while (0) #endif -const struct { +/* Hardware definitions */ +#define AU1XMMC_DESCRIPTOR_COUNT 1 +#define AU1XMMC_DESCRIPTOR_SIZE 2048 + +#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ + MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ + MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) + +/* This gives us a hard value for the stop command that we can write directly + * to the command register. + */ +#define STOP_CMD \ + (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO) + +/* This is the set of interrupts that we configure by default. */ +#define AU1XMMC_INTERRUPTS \ + (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \ + SD_CONFIG_CR | SD_CONFIG_I) + +/* The poll event (looking for insert/remove events runs twice a second. */ +#define AU1XMMC_DETECT_TIMEOUT (HZ/2) + +struct au1xmmc_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + + u32 flags; u32 iobase; - u32 tx_devid, rx_devid; - u16 bcsrpwr; - u16 bcsrstatus; - u16 wpstatus; -} au1xmmc_card_table[] = { - { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0, - BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP }, -#ifndef CONFIG_MIPS_DB1200 - { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1, - BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP } -#endif -}; + u32 clock; + u32 bus_width; + u32 power_mode; -#define AU1XMMC_CONTROLLER_COUNT (ARRAY_SIZE(au1xmmc_card_table)) + int status; -/* This array stores pointers for the hosts (used by the IRQ handler) */ -struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT]; -static int dma = 1; + struct { + int len; + int dir; + } dma; -#ifdef MODULE -module_param(dma, bool, 0); -MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)"); -#endif + struct { + int index; + int offset; + int len; + } pio; + + u32 tx_chan; + u32 rx_chan; + + int irq; + + struct tasklet_struct finish_task; + struct tasklet_struct data_task; + struct au1xmmc_platform_data *platdata; + struct platform_device *pdev; + struct resource *ioarea; +}; + +/* Status flags used by the host structure */ +#define HOST_F_XMIT 0x0001 +#define HOST_F_RECV 0x0002 +#define HOST_F_DMA 0x0010 +#define HOST_F_ACTIVE 0x0100 +#define HOST_F_STOP 0x1000 + +#define HOST_S_IDLE 0x0001 +#define HOST_S_CMD 0x0002 +#define HOST_S_DATA 0x0003 +#define HOST_S_STOP 0x0004 + +/* Easy access macros */ +#define HOST_STATUS(h) ((h)->iobase + SD_STATUS) +#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) +#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) +#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) +#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) +#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) +#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) +#define HOST_CMD(h) ((h)->iobase + SD_CMD) +#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) +#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) +#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) + +#define DMA_CHANNEL(h) \ + (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) { @@ -119,14 +176,13 @@ static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask) static inline void SEND_STOP(struct au1xmmc_host *host) { - - /* We know the value of CONFIG2, so avoid a read we don't need */ - u32 mask = SD_CONFIG2_EN; + u32 config2; WARN_ON(host->status != HOST_S_DATA); host->status = HOST_S_STOP; - au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host)); + config2 = au_readl(HOST_CONFIG2(host)); + au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); au_sync(); /* Send the stop commmand */ @@ -135,35 +191,36 @@ static inline void SEND_STOP(struct au1xmmc_host *host) static void au1xmmc_set_power(struct au1xmmc_host *host, int state) { - - u32 val = au1xmmc_card_table[host->id].bcsrpwr; - - bcsr->board &= ~val; - if (state) bcsr->board |= val; - - au_sync_delay(1); + if (host->platdata && host->platdata->set_power) + host->platdata->set_power(host->mmc, state); } -static inline int au1xmmc_card_inserted(struct au1xmmc_host *host) +static int au1xmmc_card_inserted(struct mmc_host *mmc) { - return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus) - ? 1 : 0; + struct au1xmmc_host *host = mmc_priv(mmc); + + if (host->platdata && host->platdata->card_inserted) + return !!host->platdata->card_inserted(host->mmc); + + return -ENOSYS; } static int au1xmmc_card_readonly(struct mmc_host *mmc) { struct au1xmmc_host *host = mmc_priv(mmc); - return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) - ? 1 : 0; + + if (host->platdata && host->platdata->card_readonly) + return !!host->platdata->card_readonly(mmc); + + return -ENOSYS; } static void au1xmmc_finish_request(struct au1xmmc_host *host) { - struct mmc_request *mrq = host->mrq; host->mrq = NULL; - host->flags &= HOST_F_ACTIVE; + host->flags &= HOST_F_ACTIVE | HOST_F_DMA; host->dma.len = 0; host->dma.dir = 0; @@ -174,8 +231,6 @@ static void au1xmmc_finish_request(struct au1xmmc_host *host) host->status = HOST_S_IDLE; - bcsr->disk_leds |= (1 << 8); - mmc_request_done(host->mmc, mrq); } @@ -235,18 +290,14 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, au_sync(); /* Wait for the command to go on the line */ - - while(1) { - if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO)) - break; - } + while (au_readl(HOST_CMD(host)) & SD_CMD_GO) + /* nop */; /* Wait for the command to come back */ - if (wait) { u32 status = au_readl(HOST_STATUS(host)); - while(!(status & SD_STATUS_CR)) + while (!(status & SD_STATUS_CR)) status = au_readl(HOST_STATUS(host)); /* Clear the CR status */ @@ -260,12 +311,11 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) { - struct mmc_request *mrq = host->mrq; struct mmc_data *data; u32 crc; - WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP); + WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP)); if (host->mrq == NULL) return; @@ -276,15 +326,13 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) status = au_readl(HOST_STATUS(host)); /* The transaction is really over when the SD_STATUS_DB bit is clear */ - - while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) + while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) status = au_readl(HOST_STATUS(host)); data->error = 0; dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); /* Process any errors */ - crc = (status & (SD_STATUS_WC | SD_STATUS_RC)); if (host->flags & HOST_F_XMIT) crc |= ((status & 0x07) == 0x02) ? 0 : 1; @@ -299,16 +347,16 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) if (!data->error) { if (host->flags & HOST_F_DMA) { +#ifdef CONFIG_SOC_AU1200 /* DBDMA */ u32 chan = DMA_CHANNEL(host); - chan_tab_t *c = *((chan_tab_t **) chan); + chan_tab_t *c = *((chan_tab_t **)chan); au1x_dma_chan_t *cp = c->chan_ptr; data->bytes_xfered = cp->ddma_bytecnt; - } - else +#endif + } else data->bytes_xfered = - (data->blocks * data->blksz) - - host->pio.len; + (data->blocks * data->blksz) - host->pio.len; } au1xmmc_finish_request(host); @@ -316,7 +364,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) static void au1xmmc_tasklet_data(unsigned long param) { - struct au1xmmc_host *host = (struct au1xmmc_host *) param; + struct au1xmmc_host *host = (struct au1xmmc_host *)param; u32 status = au_readl(HOST_STATUS(host)); au1xmmc_data_complete(host, status); @@ -326,11 +374,10 @@ static void au1xmmc_tasklet_data(unsigned long param) static void au1xmmc_send_pio(struct au1xmmc_host *host) { - - struct mmc_data *data = 0; - int sg_len, max, count = 0; - unsigned char *sg_ptr; - u32 status = 0; + struct mmc_data *data; + int sg_len, max, count; + unsigned char *sg_ptr, val; + u32 status; struct scatterlist *sg; data = host->mrq->data; @@ -345,14 +392,12 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) /* This is the space left inside the buffer */ sg_len = data->sg[host->pio.index].length - host->pio.offset; - /* Check to if we need less then the size of the sg_buffer */ - + /* Check if we need less than the size of the sg_buffer */ max = (sg_len > host->pio.len) ? host->pio.len : sg_len; - if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; - - for(count = 0; count < max; count++ ) { - unsigned char val; + if (max > AU1XMMC_MAX_TRANSFER) + max = AU1XMMC_MAX_TRANSFER; + for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_TH)) @@ -360,7 +405,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) val = *sg_ptr++; - au_writel((unsigned long) val, HOST_TXPORT(host)); + au_writel((unsigned long)val, HOST_TXPORT(host)); au_sync(); } @@ -384,11 +429,10 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) static void au1xmmc_receive_pio(struct au1xmmc_host *host) { - - struct mmc_data *data = 0; - int sg_len = 0, max = 0, count = 0; - unsigned char *sg_ptr = 0; - u32 status = 0; + struct mmc_data *data; + int max, count, sg_len = 0; + unsigned char *sg_ptr = NULL; + u32 status, val; struct scatterlist *sg; data = host->mrq->data; @@ -405,33 +449,33 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) /* This is the space left inside the buffer */ sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; - /* Check to if we need less then the size of the sg_buffer */ - if (sg_len < max) max = sg_len; + /* Check if we need less than the size of the sg_buffer */ + if (sg_len < max) + max = sg_len; } if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; - for(count = 0; count < max; count++ ) { - u32 val; + for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_NE)) break; if (status & SD_STATUS_RC) { - DBG("RX CRC Error [%d + %d].\n", host->id, + DBG("RX CRC Error [%d + %d].\n", host->pdev->id, host->pio.len, count); break; } if (status & SD_STATUS_RO) { - DBG("RX Overrun [%d + %d]\n", host->id, + DBG("RX Overrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } else if (status & SD_STATUS_RU) { - DBG("RX Underrun [%d + %d]\n", host->id, + DBG("RX Underrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } @@ -439,7 +483,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) val = au_readl(HOST_RXPORT(host)); if (sg_ptr) - *sg_ptr++ = (unsigned char) (val & 0xFF); + *sg_ptr++ = (unsigned char)(val & 0xFF); } host->pio.len -= count; @@ -451,7 +495,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) } if (host->pio.len == 0) { - //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); + /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */ IRQ_OFF(host, SD_CONFIG_NE); if (host->flags & HOST_F_STOP) @@ -461,17 +505,15 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) } } -/* static void au1xmmc_cmd_complete - This is called when a command has been completed - grab the response - and check for errors. Then start the data transfer if it is indicated. -*/ - +/* This is called when a command has been completed - grab the response + * and check for errors. Then start the data transfer if it is indicated. + */ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) { - struct mmc_request *mrq = host->mrq; struct mmc_command *cmd; - int trans; + u32 r[4]; + int i, trans; if (!host->mrq) return; @@ -481,9 +523,6 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { - u32 r[4]; - int i; - r[0] = au_readl(host->iobase + SD_RESP3); r[1] = au_readl(host->iobase + SD_RESP2); r[2] = au_readl(host->iobase + SD_RESP1); @@ -491,10 +530,9 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) /* The CRC is omitted from the response, so really * we only got 120 bytes, but the engine expects - * 128 bits, so we have to shift things up + * 128 bits, so we have to shift things up. */ - - for(i = 0; i < 4; i++) { + for (i = 0; i < 4; i++) { cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8; if (i != 3) cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24; @@ -505,22 +543,20 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) * our response omits the CRC, our data ends up * being shifted 8 bits to the right. In this case, * that means that the OSR data starts at bit 31, - * so we can just read RESP0 and return that + * so we can just read RESP0 and return that. */ cmd->resp[0] = au_readl(host->iobase + SD_RESP0); } } /* Figure out errors */ - if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC)) cmd->error = -EILSEQ; trans = host->flags & (HOST_F_XMIT | HOST_F_RECV); if (!trans || cmd->error) { - - IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF); + IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); tasklet_schedule(&host->finish_task); return; } @@ -528,6 +564,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) host->status = HOST_S_DATA; if (host->flags & HOST_F_DMA) { +#ifdef CONFIG_SOC_AU1200 /* DBDMA */ u32 channel = DMA_CHANNEL(host); /* Start the DMA as soon as the buffer gets something in it */ @@ -540,23 +577,21 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) } au1xxx_dbdma_start(channel); +#endif } } static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) { - unsigned int pbus = get_au1x00_speed(); unsigned int divisor; u32 config; /* From databook: - divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 - */ - + * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 + */ pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2); pbus /= 2; - divisor = ((pbus / rate) / 2) - 1; config = au_readl(HOST_CONFIG(host)); @@ -568,15 +603,11 @@ static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) au_sync(); } -static int -au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) +static int au1xmmc_prepare_data(struct au1xmmc_host *host, + struct mmc_data *data) { - int datalen = data->blocks * data->blksz; - if (dma != 0) - host->flags |= HOST_F_DMA; - if (data->flags & MMC_DATA_READ) host->flags |= HOST_F_RECV; else @@ -596,12 +627,13 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) au_writel(data->blksz - 1, HOST_BLKSIZE(host)); if (host->flags & HOST_F_DMA) { +#ifdef CONFIG_SOC_AU1200 /* DBDMA */ int i; u32 channel = DMA_CHANNEL(host); au1xxx_dbdma_stop(channel); - for(i = 0; i < host->dma.len; i++) { + for (i = 0; i < host->dma.len; i++) { u32 ret = 0, flags = DDMA_FLAGS_NOIE; struct scatterlist *sg = &data->sg[i]; int sg_len = sg->length; @@ -611,23 +643,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) if (i == host->dma.len - 1) flags = DDMA_FLAGS_IE; - if (host->flags & HOST_F_XMIT){ - ret = au1xxx_dbdma_put_source_flags(channel, - (void *) sg_virt(sg), len, flags); - } - else { - ret = au1xxx_dbdma_put_dest_flags(channel, - (void *) sg_virt(sg), - len, flags); + if (host->flags & HOST_F_XMIT) { + ret = au1xxx_dbdma_put_source_flags(channel, + (void *)sg_virt(sg), len, flags); + } else { + ret = au1xxx_dbdma_put_dest_flags(channel, + (void *)sg_virt(sg), len, flags); } - if (!ret) + if (!ret) goto dataerr; datalen -= len; } - } - else { +#endif + } else { host->pio.index = 0; host->pio.offset = 0; host->pio.len = datalen; @@ -636,25 +666,21 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) IRQ_ON(host, SD_CONFIG_TH); else IRQ_ON(host, SD_CONFIG_NE); - //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF); + /* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */ } return 0; - dataerr: - dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir); +dataerr: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + host->dma.dir); return -ETIMEDOUT; } -/* static void au1xmmc_request - This actually starts a command or data transaction -*/ - +/* This actually starts a command or data transaction */ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) { - struct au1xmmc_host *host = mmc_priv(mmc); - unsigned int flags = 0; int ret = 0; WARN_ON(irqs_disabled()); @@ -663,11 +689,15 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) host->mrq = mrq; host->status = HOST_S_CMD; - bcsr->disk_leds &= ~(1 << 8); + /* fail request immediately if no card is present */ + if (0 == au1xmmc_card_inserted(mmc)) { + mrq->cmd->error = -ENOMEDIUM; + au1xmmc_finish_request(host); + return; + } if (mrq->data) { FLUSH_FIFO(host); - flags = mrq->data->flags; ret = au1xmmc_prepare_data(host, mrq->data); } @@ -682,7 +712,6 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) static void au1xmmc_reset_controller(struct au1xmmc_host *host) { - /* Apply the clock */ au_writel(SD_ENABLE_CE, HOST_ENABLE(host)); au_sync_delay(1); @@ -712,9 +741,10 @@ static void au1xmmc_reset_controller(struct au1xmmc_host *host) } -static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) +static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct au1xmmc_host *host = mmc_priv(mmc); + u32 config2; if (ios->power_mode == MMC_POWER_OFF) au1xmmc_set_power(host, 0); @@ -726,21 +756,18 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios) au1xmmc_set_clock(host, ios->clock); host->clock = ios->clock; } -} - -static void au1xmmc_dma_callback(int irq, void *dev_id) -{ - struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id; - - /* Avoid spurious interrupts */ - if (!host->mrq) - return; - - if (host->flags & HOST_F_STOP) - SEND_STOP(host); - - tasklet_schedule(&host->data_task); + config2 = au_readl(HOST_CONFIG2(host)); + switch (ios->bus_width) { + case MMC_BUS_WIDTH_4: + config2 |= SD_CONFIG2_WB; + break; + case MMC_BUS_WIDTH_1: + config2 &= ~SD_CONFIG2_WB; + break; + } + au_writel(config2, HOST_CONFIG2(host)); + au_sync(); } #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) @@ -749,245 +776,354 @@ static void au1xmmc_dma_callback(int irq, void *dev_id) static irqreturn_t au1xmmc_irq(int irq, void *dev_id) { - + struct au1xmmc_host *host = dev_id; u32 status; - int i, ret = 0; - - disable_irq(AU1100_SD_IRQ); - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { - struct au1xmmc_host * host = au1xmmc_hosts[i]; - u32 handled = 1; + status = au_readl(HOST_STATUS(host)); - status = au_readl(HOST_STATUS(host)); + if (!(status & SD_STATUS_I)) + return IRQ_NONE; /* not ours */ - if (host->mrq && (status & STATUS_TIMEOUT)) { - if (status & SD_STATUS_RAT) - host->mrq->cmd->error = -ETIMEDOUT; + if (status & SD_STATUS_SI) /* SDIO */ + mmc_signal_sdio_irq(host->mmc); - else if (status & SD_STATUS_DT) - host->mrq->data->error = -ETIMEDOUT; + if (host->mrq && (status & STATUS_TIMEOUT)) { + if (status & SD_STATUS_RAT) + host->mrq->cmd->error = -ETIMEDOUT; + else if (status & SD_STATUS_DT) + host->mrq->data->error = -ETIMEDOUT; - /* In PIO mode, interrupts might still be enabled */ - IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); + /* In PIO mode, interrupts might still be enabled */ + IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH); - //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF); - tasklet_schedule(&host->finish_task); - } + /* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */ + tasklet_schedule(&host->finish_task); + } #if 0 - else if (status & SD_STATUS_DD) { - - /* Sometimes we get a DD before a NE in PIO mode */ - - if (!(host->flags & HOST_F_DMA) && - (status & SD_STATUS_NE)) - au1xmmc_receive_pio(host); - else { - au1xmmc_data_complete(host, status); - //tasklet_schedule(&host->data_task); - } + else if (status & SD_STATUS_DD) { + /* Sometimes we get a DD before a NE in PIO mode */ + if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE)) + au1xmmc_receive_pio(host); + else { + au1xmmc_data_complete(host, status); + /* tasklet_schedule(&host->data_task); */ } + } #endif - else if (status & (SD_STATUS_CR)) { - if (host->status == HOST_S_CMD) - au1xmmc_cmd_complete(host,status); - } - else if (!(host->flags & HOST_F_DMA)) { - if ((host->flags & HOST_F_XMIT) && - (status & STATUS_DATA_OUT)) - au1xmmc_send_pio(host); - else if ((host->flags & HOST_F_RECV) && - (status & STATUS_DATA_IN)) - au1xmmc_receive_pio(host); - } - else if (status & 0x203FBC70) { - DBG("Unhandled status %8.8x\n", host->id, status); - handled = 0; - } - - au_writel(status, HOST_STATUS(host)); - au_sync(); - - ret |= handled; + else if (status & SD_STATUS_CR) { + if (host->status == HOST_S_CMD) + au1xmmc_cmd_complete(host, status); + + } else if (!(host->flags & HOST_F_DMA)) { + if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT)) + au1xmmc_send_pio(host); + else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN)) + au1xmmc_receive_pio(host); + + } else if (status & 0x203F3C70) { + DBG("Unhandled status %8.8x\n", host->pdev->id, + status); } - enable_irq(AU1100_SD_IRQ); - return ret; + au_writel(status, HOST_STATUS(host)); + au_sync(); + + return IRQ_HANDLED; } -static void au1xmmc_poll_event(unsigned long arg) -{ - struct au1xmmc_host *host = (struct au1xmmc_host *) arg; +#ifdef CONFIG_SOC_AU1200 +/* 8bit memory DMA device */ +static dbdev_tab_t au1xmmc_mem_dbdev = { + .dev_id = DSCR_CMD0_ALWAYS, + .dev_flags = DEV_FLAGS_ANYUSE, + .dev_tsize = 0, + .dev_devwidth = 8, + .dev_physaddr = 0x00000000, + .dev_intlevel = 0, + .dev_intpolarity = 0, +}; +static int memid; - int card = au1xmmc_card_inserted(host); - int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0; +static void au1xmmc_dbdma_callback(int irq, void *dev_id) +{ + struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id; - if (card != controller) { - host->flags &= ~HOST_F_ACTIVE; - if (card) host->flags |= HOST_F_ACTIVE; - mmc_detect_change(host->mmc, 0); - } + /* Avoid spurious interrupts */ + if (!host->mrq) + return; - if (host->mrq != NULL) { - u32 status = au_readl(HOST_STATUS(host)); - DBG("PENDING - %8.8x\n", host->id, status); - } + if (host->flags & HOST_F_STOP) + SEND_STOP(host); - mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT); + tasklet_schedule(&host->data_task); } -static dbdev_tab_t au1xmmc_mem_dbdev = -{ - DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0 -}; - -static void au1xmmc_init_dma(struct au1xmmc_host *host) +static int au1xmmc_dbdma_init(struct au1xmmc_host *host) { + struct resource *res; + int txid, rxid; + + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0); + if (!res) + return -ENODEV; + txid = res->start; + + res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1); + if (!res) + return -ENODEV; + rxid = res->start; + + if (!memid) + return -ENODEV; + + host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid, + au1xmmc_dbdma_callback, (void *)host); + if (!host->tx_chan) { + dev_err(&host->pdev->dev, "cannot allocate TX DMA\n"); + return -ENODEV; + } - u32 rxchan, txchan; - - int txid = au1xmmc_card_table[host->id].tx_devid; - int rxid = au1xmmc_card_table[host->id].rx_devid; + host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid, + au1xmmc_dbdma_callback, (void *)host); + if (!host->rx_chan) { + dev_err(&host->pdev->dev, "cannot allocate RX DMA\n"); + au1xxx_dbdma_chan_free(host->tx_chan); + return -ENODEV; + } - /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride - of 8 bits. And since devices are shared, we need to create - our own to avoid freaking out other devices - */ + au1xxx_dbdma_set_devwidth(host->tx_chan, 8); + au1xxx_dbdma_set_devwidth(host->rx_chan, 8); - int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); + au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT); + au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); - txchan = au1xxx_dbdma_chan_alloc(memid, txid, - au1xmmc_dma_callback, (void *) host); + /* DBDMA is good to go */ + host->flags |= HOST_F_DMA; - rxchan = au1xxx_dbdma_chan_alloc(rxid, memid, - au1xmmc_dma_callback, (void *) host); + return 0; +} - au1xxx_dbdma_set_devwidth(txchan, 8); - au1xxx_dbdma_set_devwidth(rxchan, 8); +static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host) +{ + if (host->flags & HOST_F_DMA) { + host->flags &= ~HOST_F_DMA; + au1xxx_dbdma_chan_free(host->tx_chan); + au1xxx_dbdma_chan_free(host->rx_chan); + } +} +#endif - au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT); - au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT); +static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) +{ + struct au1xmmc_host *host = mmc_priv(mmc); - host->tx_chan = txchan; - host->rx_chan = rxchan; + if (en) + IRQ_ON(host, SD_CONFIG_SI); + else + IRQ_OFF(host, SD_CONFIG_SI); } static const struct mmc_host_ops au1xmmc_ops = { .request = au1xmmc_request, .set_ios = au1xmmc_set_ios, .get_ro = au1xmmc_card_readonly, + .get_cd = au1xmmc_card_inserted, + .enable_sdio_irq = au1xmmc_enable_sdio_irq, }; static int __devinit au1xmmc_probe(struct platform_device *pdev) { + struct mmc_host *mmc; + struct au1xmmc_host *host; + struct resource *r; + int ret; + + mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "no memory for mmc_host\n"); + ret = -ENOMEM; + goto out0; + } - int i, ret = 0; - - /* THe interrupt is shared among all controllers */ - ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0); + host = mmc_priv(mmc); + host->mmc = mmc; + host->platdata = pdev->dev.platform_data; + host->pdev = pdev; - if (ret) { - printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n", - AU1100_SD_IRQ, ret); - return -ENXIO; + ret = -ENODEV; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no mmio defined\n"); + goto out1; } - disable_irq(AU1100_SD_IRQ); + host->ioarea = request_mem_region(r->start, r->end - r->start + 1, + pdev->name); + if (!host->ioarea) { + dev_err(&pdev->dev, "mmio already in use\n"); + goto out1; + } - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { - struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev); - struct au1xmmc_host *host = 0; + host->iobase = (unsigned long)ioremap(r->start, 0x3c); + if (!host->iobase) { + dev_err(&pdev->dev, "cannot remap mmio\n"); + goto out2; + } - if (!mmc) { - printk(DRIVER_NAME "ERROR: no mem for host %d\n", i); - au1xmmc_hosts[i] = 0; - continue; - } + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!r) { + dev_err(&pdev->dev, "no IRQ defined\n"); + goto out3; + } - mmc->ops = &au1xmmc_ops; + host->irq = r->start; + /* IRQ is shared among both SD controllers */ + ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED, + DRIVER_NAME, host); + if (ret) { + dev_err(&pdev->dev, "cannot grab IRQ\n"); + goto out3; + } - mmc->f_min = 450000; - mmc->f_max = 24000000; + mmc->ops = &au1xmmc_ops; - mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; - mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; + mmc->f_min = 450000; + mmc->f_max = 24000000; - mmc->max_blk_size = 2048; - mmc->max_blk_count = 512; + mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; + mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; - mmc->ocr_avail = AU1XMMC_OCR; + mmc->max_blk_size = 2048; + mmc->max_blk_count = 512; - host = mmc_priv(mmc); - host->mmc = mmc; + mmc->ocr_avail = AU1XMMC_OCR; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; - host->id = i; - host->iobase = au1xmmc_card_table[host->id].iobase; - host->clock = 0; - host->power_mode = MMC_POWER_OFF; + host->status = HOST_S_IDLE; - host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0; - host->status = HOST_S_IDLE; + /* board-specific carddetect setup, if any */ + if (host->platdata && host->platdata->cd_setup) { + ret = host->platdata->cd_setup(mmc, 1); + if (ret) { + dev_warn(&pdev->dev, "board CD setup failed\n"); + mmc->caps |= MMC_CAP_NEEDS_POLL; + } + } else + mmc->caps |= MMC_CAP_NEEDS_POLL; - init_timer(&host->timer); + tasklet_init(&host->data_task, au1xmmc_tasklet_data, + (unsigned long)host); - host->timer.function = au1xmmc_poll_event; - host->timer.data = (unsigned long) host; - host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT; + tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, + (unsigned long)host); - tasklet_init(&host->data_task, au1xmmc_tasklet_data, - (unsigned long) host); +#ifdef CONFIG_SOC_AU1200 + ret = au1xmmc_dbdma_init(host); + if (ret) + printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n"); +#endif - tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, - (unsigned long) host); +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) { + struct led_classdev *led = host->platdata->led; + led->name = mmc_hostname(mmc); + led->brightness = LED_OFF; + led->default_trigger = mmc_hostname(mmc); + ret = led_classdev_register(mmc_dev(mmc), led); + if (ret) + goto out5; + } +#endif - spin_lock_init(&host->lock); + au1xmmc_reset_controller(host); - if (dma != 0) - au1xmmc_init_dma(host); + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "cannot add mmc host\n"); + goto out6; + } - au1xmmc_reset_controller(host); + platform_set_drvdata(pdev, mmc); - mmc_add_host(mmc); - au1xmmc_hosts[i] = host; + printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" + " (mode=%s)\n", pdev->id, host->iobase, + host->flags & HOST_F_DMA ? "dma" : "pio"); - add_timer(&host->timer); + return 0; /* all ok */ - printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n", - host->id, host->iobase, dma ? "dma" : "pio"); - } +out6: +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) + led_classdev_unregister(host->platdata->led); +out5: +#endif + au_writel(0, HOST_ENABLE(host)); + au_writel(0, HOST_CONFIG(host)); + au_writel(0, HOST_CONFIG2(host)); + au_sync(); - enable_irq(AU1100_SD_IRQ); +#ifdef CONFIG_SOC_AU1200 + au1xmmc_dbdma_shutdown(host); +#endif - return 0; + tasklet_kill(&host->data_task); + tasklet_kill(&host->finish_task); + + if (host->platdata && host->platdata->cd_setup && + !(mmc->caps & MMC_CAP_NEEDS_POLL)) + host->platdata->cd_setup(mmc, 0); + + free_irq(host->irq, host); +out3: + iounmap((void *)host->iobase); +out2: + release_resource(host->ioarea); + kfree(host->ioarea); +out1: + mmc_free_host(mmc); +out0: + return ret; } static int __devexit au1xmmc_remove(struct platform_device *pdev) { + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct au1xmmc_host *host; + + if (mmc) { + host = mmc_priv(mmc); - int i; + mmc_remove_host(mmc); - disable_irq(AU1100_SD_IRQ); +#ifdef CONFIG_LEDS_CLASS + if (host->platdata && host->platdata->led) + led_classdev_unregister(host->platdata->led); +#endif - for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) { - struct au1xmmc_host *host = au1xmmc_hosts[i]; - if (!host) continue; + if (host->platdata && host->platdata->cd_setup && + !(mmc->caps & MMC_CAP_NEEDS_POLL)) + host->platdata->cd_setup(mmc, 0); + + au_writel(0, HOST_ENABLE(host)); + au_writel(0, HOST_CONFIG(host)); + au_writel(0, HOST_CONFIG2(host)); + au_sync(); tasklet_kill(&host->data_task); tasklet_kill(&host->finish_task); - del_timer_sync(&host->timer); +#ifdef CONFIG_SOC_AU1200 + au1xmmc_dbdma_shutdown(host); +#endif au1xmmc_set_power(host, 0); - mmc_remove_host(host->mmc); - - au1xxx_dbdma_chan_free(host->tx_chan); - au1xxx_dbdma_chan_free(host->rx_chan); + free_irq(host->irq, host); + iounmap((void *)host->iobase); + release_resource(host->ioarea); + kfree(host->ioarea); - au_writel(0x0, HOST_ENABLE(host)); - au_sync(); + mmc_free_host(mmc); } - - free_irq(AU1100_SD_IRQ, 0); return 0; } @@ -1004,21 +1140,31 @@ static struct platform_driver au1xmmc_driver = { static int __init au1xmmc_init(void) { +#ifdef CONFIG_SOC_AU1200 + /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride + * of 8 bits. And since devices are shared, we need to create + * our own to avoid freaking out other devices. + */ + memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); + if (!memid) + printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n"); +#endif return platform_driver_register(&au1xmmc_driver); } static void __exit au1xmmc_exit(void) { +#ifdef CONFIG_SOC_AU1200 + if (memid) + au1xxx_ddma_del_device(memid); +#endif platform_driver_unregister(&au1xmmc_driver); } module_init(au1xmmc_init); module_exit(au1xmmc_exit); -#ifdef MODULE MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:au1xxx-mmc"); -#endif - diff --git a/drivers/mmc/host/au1xmmc.h b/drivers/mmc/host/au1xmmc.h deleted file mode 100644 index 341cbdf0baca..000000000000 --- a/drivers/mmc/host/au1xmmc.h +++ /dev/null @@ -1,96 +0,0 @@ -#ifndef _AU1XMMC_H_ -#define _AU1XMMC_H_ - -/* Hardware definitions */ - -#define AU1XMMC_DESCRIPTOR_COUNT 1 -#define AU1XMMC_DESCRIPTOR_SIZE 2048 - -#define AU1XMMC_OCR ( MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ - MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ - MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36) - -/* Easy access macros */ - -#define HOST_STATUS(h) ((h)->iobase + SD_STATUS) -#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG) -#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE) -#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT) -#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT) -#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG) -#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE) -#define HOST_CMD(h) ((h)->iobase + SD_CMD) -#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2) -#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT) -#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG) - -#define DMA_CHANNEL(h) \ - ( ((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) - -/* This gives us a hard value for the stop command that we can write directly - * to the command register - */ - -#define STOP_CMD (SD_CMD_RT_1B|SD_CMD_CT_7|(0xC << SD_CMD_CI_SHIFT)|SD_CMD_GO) - -/* This is the set of interrupts that we configure by default */ - -#if 0 -#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_DD | \ - SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I) -#endif - -#define AU1XMMC_INTERRUPTS (SD_CONFIG_SC | SD_CONFIG_DT | \ - SD_CONFIG_RAT | SD_CONFIG_CR | SD_CONFIG_I) -/* The poll event (looking for insert/remove events runs twice a second */ -#define AU1XMMC_DETECT_TIMEOUT (HZ/2) - -struct au1xmmc_host { - struct mmc_host *mmc; - struct mmc_request *mrq; - - u32 id; - - u32 flags; - u32 iobase; - u32 clock; - u32 bus_width; - u32 power_mode; - - int status; - - struct { - int len; - int dir; - } dma; - - struct { - int index; - int offset; - int len; - } pio; - - u32 tx_chan; - u32 rx_chan; - - struct timer_list timer; - struct tasklet_struct finish_task; - struct tasklet_struct data_task; - - spinlock_t lock; -}; - -/* Status flags used by the host structure */ - -#define HOST_F_XMIT 0x0001 -#define HOST_F_RECV 0x0002 -#define HOST_F_DMA 0x0010 -#define HOST_F_ACTIVE 0x0100 -#define HOST_F_STOP 0x1000 - -#define HOST_S_IDLE 0x0001 -#define HOST_S_CMD 0x0002 -#define HOST_S_DATA 0x0003 -#define HOST_S_STOP 0x0004 - -#endif diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index eed211b2ac70..5e880c0f1349 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c @@ -892,9 +892,12 @@ static int imxmci_get_ro(struct mmc_host *mmc) struct imxmci_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) - return host->pdata->get_ro(mmc_dev(mmc)); - /* Host doesn't support read only detection so assume writeable */ - return 0; + return !!host->pdata->get_ro(mmc_dev(mmc)); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; } diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 35508584ac2a..41cc63360e43 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1126,16 +1126,28 @@ static int mmc_spi_get_ro(struct mmc_host *mmc) struct mmc_spi_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) - return host->pdata->get_ro(mmc->parent); - /* board doesn't support read only detection; assume writeable */ - return 0; + return !!host->pdata->get_ro(mmc->parent); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; } +static int mmc_spi_get_cd(struct mmc_host *mmc) +{ + struct mmc_spi_host *host = mmc_priv(mmc); + + if (host->pdata && host->pdata->get_cd) + return !!host->pdata->get_cd(mmc->parent); + return -ENOSYS; +} static const struct mmc_host_ops mmc_spi_ops = { .request = mmc_spi_request, .set_ios = mmc_spi_set_ios, .get_ro = mmc_spi_get_ro, + .get_cd = mmc_spi_get_cd, }; @@ -1240,10 +1252,7 @@ static int mmc_spi_probe(struct spi_device *spi) mmc->ops = &mmc_spi_ops; mmc->max_blk_size = MMC_SPI_BLOCKSIZE; - /* As long as we keep track of the number of successfully - * transmitted blocks, we're good for multiwrite. - */ - mmc->caps = MMC_CAP_SPI | MMC_CAP_MULTIWRITE; + mmc->caps = MMC_CAP_SPI; /* SPI doesn't need the lowspeed device identification thing for * MMC or SD cards, since it never comes up in open drain mode. @@ -1319,17 +1328,23 @@ static int mmc_spi_probe(struct spi_device *spi) goto fail_glue_init; } + /* pass platform capabilities, if any */ + if (host->pdata) + mmc->caps |= host->pdata->caps; + status = mmc_add_host(mmc); if (status != 0) goto fail_add_host; - dev_info(&spi->dev, "SD/MMC host %s%s%s%s\n", + dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", mmc->class_dev.bus_id, host->dma_dev ? "" : ", no DMA", (host->pdata && host->pdata->get_ro) ? "" : ", no WP", (host->pdata && host->pdata->setpower) - ? "" : ", no poweroff"); + ? "" : ", no poweroff", + (mmc->caps & MMC_CAP_NEEDS_POLL) + ? ", cd polling" : ""); return 0; fail_add_host: diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index da5fecad74d9..696cf3647ceb 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -535,7 +535,6 @@ static int mmci_probe(struct amba_device *dev, void *id) mmc->f_min = (host->mclk + 511) / 512; mmc->f_max = min(host->mclk, fmax); mmc->ocr_avail = plat->ocr_mask; - mmc->caps = MMC_CAP_MULTIWRITE; /* * We can do SGIO diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 549517c35675..dbc26eb6a89e 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1317,7 +1317,7 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) host->slots[id] = slot; - mmc->caps = MMC_CAP_MULTIWRITE; + mmc->caps = 0; if (host->pdata->conf.wire4) mmc->caps |= MMC_CAP_4_BIT_DATA; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index d89475d36988..d39f59738866 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -374,9 +374,12 @@ static int pxamci_get_ro(struct mmc_host *mmc) struct pxamci_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) - return host->pdata->get_ro(mmc_dev(mmc)); - /* Host doesn't support read only detection so assume writeable */ - return 0; + return !!host->pdata->get_ro(mmc_dev(mmc)); + /* + * Board doesn't support read only detection; let the mmc core + * decide what to do. + */ + return -ENOSYS; } static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c new file mode 100644 index 000000000000..6a1e4994b724 --- /dev/null +++ b/drivers/mmc/host/s3cmci.c @@ -0,0 +1,1446 @@ +/* + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver + * + * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/dma-mapping.h> +#include <linux/clk.h> +#include <linux/mmc/host.h> +#include <linux/platform_device.h> +#include <linux/irq.h> +#include <linux/io.h> + +#include <asm/dma.h> + +#include <asm/arch/regs-sdi.h> +#include <asm/arch/regs-gpio.h> + +#include <asm/plat-s3c24xx/mci.h> + +#include "s3cmci.h" + +#define DRIVER_NAME "s3c-mci" + +enum dbg_channels { + dbg_err = (1 << 0), + dbg_debug = (1 << 1), + dbg_info = (1 << 2), + dbg_irq = (1 << 3), + dbg_sg = (1 << 4), + dbg_dma = (1 << 5), + dbg_pio = (1 << 6), + dbg_fail = (1 << 7), + dbg_conf = (1 << 8), +}; + +static const int dbgmap_err = dbg_err | dbg_fail; +static const int dbgmap_info = dbg_info | dbg_conf; +static const int dbgmap_debug = dbg_debug; + +#define dbg(host, channels, args...) \ + do { \ + if (dbgmap_err & channels) \ + dev_err(&host->pdev->dev, args); \ + else if (dbgmap_info & channels) \ + dev_info(&host->pdev->dev, args); \ + else if (dbgmap_debug & channels) \ + dev_dbg(&host->pdev->dev, args); \ + } while (0) + +#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1) + +static struct s3c2410_dma_client s3cmci_dma_client = { + .name = "s3c-mci", +}; + +static void finalize_request(struct s3cmci_host *host); +static void s3cmci_send_request(struct mmc_host *mmc); +static void s3cmci_reset(struct s3cmci_host *host); + +#ifdef CONFIG_MMC_DEBUG + +static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) +{ + u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize; + u32 datcon, datcnt, datsta, fsta, imask; + + con = readl(host->base + S3C2410_SDICON); + pre = readl(host->base + S3C2410_SDIPRE); + cmdarg = readl(host->base + S3C2410_SDICMDARG); + cmdcon = readl(host->base + S3C2410_SDICMDCON); + cmdsta = readl(host->base + S3C2410_SDICMDSTAT); + r0 = readl(host->base + S3C2410_SDIRSP0); + r1 = readl(host->base + S3C2410_SDIRSP1); + r2 = readl(host->base + S3C2410_SDIRSP2); + r3 = readl(host->base + S3C2410_SDIRSP3); + timer = readl(host->base + S3C2410_SDITIMER); + bsize = readl(host->base + S3C2410_SDIBSIZE); + datcon = readl(host->base + S3C2410_SDIDCON); + datcnt = readl(host->base + S3C2410_SDIDCNT); + datsta = readl(host->base + S3C2410_SDIDSTA); + fsta = readl(host->base + S3C2410_SDIFSTA); + imask = readl(host->base + host->sdiimsk); + + dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n", + prefix, con, pre, timer); + + dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n", + prefix, cmdcon, cmdarg, cmdsta); + + dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]" + " DSTA:[%08x] DCNT:[%08x]\n", + prefix, datcon, fsta, datsta, datcnt); + + dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]" + " R2:[%08x] R3:[%08x]\n", + prefix, r0, r1, r2, r3); +} + +static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, + int stop) +{ + snprintf(host->dbgmsg_cmd, 300, + "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u", + host->ccnt, (stop ? " (STOP)" : ""), + cmd->opcode, cmd->arg, cmd->flags, cmd->retries); + + if (cmd->data) { + snprintf(host->dbgmsg_dat, 300, + "#%u bsize:%u blocks:%u bytes:%u", + host->dcnt, cmd->data->blksz, + cmd->data->blocks, + cmd->data->blocks * cmd->data->blksz); + } else { + host->dbgmsg_dat[0] = '\0'; + } +} + +static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd, + int fail) +{ + unsigned int dbglvl = fail ? dbg_fail : dbg_debug; + + if (!cmd) + return; + + if (cmd->error == 0) { + dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n", + host->dbgmsg_cmd, cmd->resp[0]); + } else { + dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n", + cmd->error, host->dbgmsg_cmd, host->status); + } + + if (!cmd->data) + return; + + if (cmd->data->error == 0) { + dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat); + } else { + dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n", + cmd->data->error, host->dbgmsg_dat, + readl(host->base + S3C2410_SDIDCNT)); + } +} +#else +static void dbg_dumpcmd(struct s3cmci_host *host, + struct mmc_command *cmd, int fail) { } + +static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd, + int stop) { } + +static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { } + +#endif /* CONFIG_MMC_DEBUG */ + +static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) +{ + u32 newmask; + + newmask = readl(host->base + host->sdiimsk); + newmask |= imask; + + writel(newmask, host->base + host->sdiimsk); + + return newmask; +} + +static inline u32 disable_imask(struct s3cmci_host *host, u32 imask) +{ + u32 newmask; + + newmask = readl(host->base + host->sdiimsk); + newmask &= ~imask; + + writel(newmask, host->base + host->sdiimsk); + + return newmask; +} + +static inline void clear_imask(struct s3cmci_host *host) +{ + writel(0, host->base + host->sdiimsk); +} + +static inline int get_data_buffer(struct s3cmci_host *host, + u32 *words, u32 **pointer) +{ + struct scatterlist *sg; + + if (host->pio_active == XFER_NONE) + return -EINVAL; + + if ((!host->mrq) || (!host->mrq->data)) + return -EINVAL; + + if (host->pio_sgptr >= host->mrq->data->sg_len) { + dbg(host, dbg_debug, "no more buffers (%i/%i)\n", + host->pio_sgptr, host->mrq->data->sg_len); + return -EBUSY; + } + sg = &host->mrq->data->sg[host->pio_sgptr]; + + *words = sg->length >> 2; + *pointer = sg_virt(sg); + + host->pio_sgptr++; + + dbg(host, dbg_sg, "new buffer (%i/%i)\n", + host->pio_sgptr, host->mrq->data->sg_len); + + return 0; +} + +static inline u32 fifo_count(struct s3cmci_host *host) +{ + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); + + fifostat &= S3C2410_SDIFSTA_COUNTMASK; + return fifostat >> 2; +} + +static inline u32 fifo_free(struct s3cmci_host *host) +{ + u32 fifostat = readl(host->base + S3C2410_SDIFSTA); + + fifostat &= S3C2410_SDIFSTA_COUNTMASK; + return (63 - fifostat) >> 2; +} + +static void do_pio_read(struct s3cmci_host *host) +{ + int res; + u32 fifo; + void __iomem *from_ptr; + + /* write real prescaler to host, it might be set slow to fix */ + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + from_ptr = host->base + host->sdidata; + + while ((fifo = fifo_count(host))) { + if (!host->pio_words) { + res = get_data_buffer(host, &host->pio_words, + &host->pio_ptr); + if (res) { + host->pio_active = XFER_NONE; + host->complete_what = COMPLETION_FINALIZE; + + dbg(host, dbg_pio, "pio_read(): " + "complete (no more data).\n"); + return; + } + + dbg(host, dbg_pio, + "pio_read(): new target: [%i]@[%p]\n", + host->pio_words, host->pio_ptr); + } + + dbg(host, dbg_pio, + "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n", + fifo, host->pio_words, + readl(host->base + S3C2410_SDIDCNT)); + + if (fifo > host->pio_words) + fifo = host->pio_words; + + host->pio_words -= fifo; + host->pio_count += fifo; + + while (fifo--) + *(host->pio_ptr++) = readl(from_ptr); + } + + if (!host->pio_words) { + res = get_data_buffer(host, &host->pio_words, &host->pio_ptr); + if (res) { + dbg(host, dbg_pio, + "pio_read(): complete (no more buffers).\n"); + host->pio_active = XFER_NONE; + host->complete_what = COMPLETION_FINALIZE; + + return; + } + } + + enable_imask(host, + S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST); +} + +static void do_pio_write(struct s3cmci_host *host) +{ + void __iomem *to_ptr; + int res; + u32 fifo; + + to_ptr = host->base + host->sdidata; + + while ((fifo = fifo_free(host))) { + if (!host->pio_words) { + res = get_data_buffer(host, &host->pio_words, + &host->pio_ptr); + if (res) { + dbg(host, dbg_pio, + "pio_write(): complete (no more data).\n"); + host->pio_active = XFER_NONE; + + return; + } + + dbg(host, dbg_pio, + "pio_write(): new source: [%i]@[%p]\n", + host->pio_words, host->pio_ptr); + + } + + if (fifo > host->pio_words) + fifo = host->pio_words; + + host->pio_words -= fifo; + host->pio_count += fifo; + + while (fifo--) + writel(*(host->pio_ptr++), to_ptr); + } + + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); +} + +static void pio_tasklet(unsigned long data) +{ + struct s3cmci_host *host = (struct s3cmci_host *) data; + + + disable_irq(host->irq); + + if (host->pio_active == XFER_WRITE) + do_pio_write(host); + + if (host->pio_active == XFER_READ) + do_pio_read(host); + + if (host->complete_what == COMPLETION_FINALIZE) { + clear_imask(host); + if (host->pio_active != XFER_NONE) { + dbg(host, dbg_err, "unfinished %s " + "- pio_count:[%u] pio_words:[%u]\n", + (host->pio_active == XFER_READ) ? "read" : "write", + host->pio_count, host->pio_words); + + if (host->mrq->data) + host->mrq->data->error = -EINVAL; + } + + finalize_request(host); + } else + enable_irq(host->irq); +} + +/* + * ISR for SDI Interface IRQ + * Communication between driver and ISR works as follows: + * host->mrq points to current request + * host->complete_what Indicates when the request is considered done + * COMPLETION_CMDSENT when the command was sent + * COMPLETION_RSPFIN when a response was received + * COMPLETION_XFERFINISH when the data transfer is finished + * COMPLETION_XFERFINISH_RSPFIN both of the above. + * host->complete_request is the completion-object the driver waits for + * + * 1) Driver sets up host->mrq and host->complete_what + * 2) Driver prepares the transfer + * 3) Driver enables interrupts + * 4) Driver starts transfer + * 5) Driver waits for host->complete_rquest + * 6) ISR checks for request status (errors and success) + * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error + * 7) ISR completes host->complete_request + * 8) ISR disables interrupts + * 9) Driver wakes up and takes care of the request + * + * Note: "->error"-fields are expected to be set to 0 before the request + * was issued by mmc.c - therefore they are only set, when an error + * contition comes up + */ + +static irqreturn_t s3cmci_irq(int irq, void *dev_id) +{ + struct s3cmci_host *host = dev_id; + struct mmc_command *cmd; + u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; + u32 mci_cclear, mci_dclear; + unsigned long iflags; + + spin_lock_irqsave(&host->complete_lock, iflags); + + mci_csta = readl(host->base + S3C2410_SDICMDSTAT); + mci_dsta = readl(host->base + S3C2410_SDIDSTA); + mci_dcnt = readl(host->base + S3C2410_SDIDCNT); + mci_fsta = readl(host->base + S3C2410_SDIFSTA); + mci_imsk = readl(host->base + host->sdiimsk); + mci_cclear = 0; + mci_dclear = 0; + + if ((host->complete_what == COMPLETION_NONE) || + (host->complete_what == COMPLETION_FINALIZE)) { + host->status = "nothing to complete"; + clear_imask(host); + goto irq_out; + } + + if (!host->mrq) { + host->status = "no active mrq"; + clear_imask(host); + goto irq_out; + } + + cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd; + + if (!cmd) { + host->status = "no active cmd"; + clear_imask(host); + goto irq_out; + } + + if (!host->dodma) { + if ((host->pio_active == XFER_WRITE) && + (mci_fsta & S3C2410_SDIFSTA_TFDET)) { + + disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); + tasklet_schedule(&host->pio_tasklet); + host->status = "pio tx"; + } + + if ((host->pio_active == XFER_READ) && + (mci_fsta & S3C2410_SDIFSTA_RFDET)) { + + disable_imask(host, + S3C2410_SDIIMSK_RXFIFOHALF | + S3C2410_SDIIMSK_RXFIFOLAST); + + tasklet_schedule(&host->pio_tasklet); + host->status = "pio rx"; + } + } + + if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) { + dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n"); + cmd->error = -ETIMEDOUT; + host->status = "error: command timeout"; + goto fail_transfer; + } + + if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) { + if (host->complete_what == COMPLETION_CMDSENT) { + host->status = "ok: command sent"; + goto close_transfer; + } + + mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT; + } + + if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) { + if (cmd->flags & MMC_RSP_CRC) { + if (host->mrq->cmd->flags & MMC_RSP_136) { + dbg(host, dbg_irq, + "fixup: ignore CRC fail with long rsp\n"); + } else { + /* note, we used to fail the transfer + * here, but it seems that this is just + * the hardware getting it wrong. + * + * cmd->error = -EILSEQ; + * host->status = "error: bad command crc"; + * goto fail_transfer; + */ + } + } + + mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL; + } + + if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) { + if (host->complete_what == COMPLETION_RSPFIN) { + host->status = "ok: command response received"; + goto close_transfer; + } + + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) + host->complete_what = COMPLETION_XFERFINISH; + + mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN; + } + + /* errors handled after this point are only relevant + when a data transfer is in progress */ + + if (!cmd->data) + goto clear_status_bits; + + /* Check for FIFO failure */ + if (host->is2440) { + if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) { + dbg(host, dbg_err, "FIFO failure\n"); + host->mrq->data->error = -EILSEQ; + host->status = "error: 2440 fifo failure"; + goto fail_transfer; + } + } else { + if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) { + dbg(host, dbg_err, "FIFO failure\n"); + cmd->data->error = -EILSEQ; + host->status = "error: fifo failure"; + goto fail_transfer; + } + } + + if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) { + dbg(host, dbg_err, "bad data crc (outgoing)\n"); + cmd->data->error = -EILSEQ; + host->status = "error: bad data crc (outgoing)"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) { + dbg(host, dbg_err, "bad data crc (incoming)\n"); + cmd->data->error = -EILSEQ; + host->status = "error: bad data crc (incoming)"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) { + dbg(host, dbg_err, "data timeout\n"); + cmd->data->error = -ETIMEDOUT; + host->status = "error: data timeout"; + goto fail_transfer; + } + + if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) { + if (host->complete_what == COMPLETION_XFERFINISH) { + host->status = "ok: data transfer completed"; + goto close_transfer; + } + + if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN) + host->complete_what = COMPLETION_RSPFIN; + + mci_dclear |= S3C2410_SDIDSTA_XFERFINISH; + } + +clear_status_bits: + writel(mci_cclear, host->base + S3C2410_SDICMDSTAT); + writel(mci_dclear, host->base + S3C2410_SDIDSTA); + + goto irq_out; + +fail_transfer: + host->pio_active = XFER_NONE; + +close_transfer: + host->complete_what = COMPLETION_FINALIZE; + + clear_imask(host); + tasklet_schedule(&host->pio_tasklet); + + goto irq_out; + +irq_out: + dbg(host, dbg_irq, + "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n", + mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status); + + spin_unlock_irqrestore(&host->complete_lock, iflags); + return IRQ_HANDLED; + +} + +/* + * ISR for the CardDetect Pin +*/ + +static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id) +{ + struct s3cmci_host *host = (struct s3cmci_host *)dev_id; + + dbg(host, dbg_irq, "card detect\n"); + + mmc_detect_change(host->mmc, msecs_to_jiffies(500)); + + return IRQ_HANDLED; +} + +void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id, + int size, enum s3c2410_dma_buffresult result) +{ + struct s3cmci_host *host = buf_id; + unsigned long iflags; + u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt; + + mci_csta = readl(host->base + S3C2410_SDICMDSTAT); + mci_dsta = readl(host->base + S3C2410_SDIDSTA); + mci_fsta = readl(host->base + S3C2410_SDIFSTA); + mci_dcnt = readl(host->base + S3C2410_SDIDCNT); + + BUG_ON(!host->mrq); + BUG_ON(!host->mrq->data); + BUG_ON(!host->dmatogo); + + spin_lock_irqsave(&host->complete_lock, iflags); + + if (result != S3C2410_RES_OK) { + dbg(host, dbg_fail, "DMA FAILED: csta=0x%08x dsta=0x%08x " + "fsta=0x%08x dcnt:0x%08x result:0x%08x toGo:%u\n", + mci_csta, mci_dsta, mci_fsta, + mci_dcnt, result, host->dmatogo); + + goto fail_request; + } + + host->dmatogo--; + if (host->dmatogo) { + dbg(host, dbg_dma, "DMA DONE Size:%i DSTA:[%08x] " + "DCNT:[%08x] toGo:%u\n", + size, mci_dsta, mci_dcnt, host->dmatogo); + + goto out; + } + + dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", + size, mci_dsta, mci_dcnt); + + host->complete_what = COMPLETION_FINALIZE; + +out: + tasklet_schedule(&host->pio_tasklet); + spin_unlock_irqrestore(&host->complete_lock, iflags); + return; + +fail_request: + host->mrq->data->error = -EINVAL; + host->complete_what = COMPLETION_FINALIZE; + writel(0, host->base + host->sdiimsk); + goto out; + +} + +static void finalize_request(struct s3cmci_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + int debug_as_failure = 0; + + if (host->complete_what != COMPLETION_FINALIZE) + return; + + if (!mrq) + return; + + if (cmd->data && (cmd->error == 0) && + (cmd->data->error == 0)) { + if (host->dodma && (!host->dma_complete)) { + dbg(host, dbg_dma, "DMA Missing!\n"); + return; + } + } + + /* Read response from controller. */ + cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0); + cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1); + cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2); + cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3); + + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + if (cmd->error) + debug_as_failure = 1; + + if (cmd->data && cmd->data->error) + debug_as_failure = 1; + + dbg_dumpcmd(host, cmd, debug_as_failure); + + /* Cleanup controller */ + writel(0, host->base + S3C2410_SDICMDARG); + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); + writel(0, host->base + S3C2410_SDICMDCON); + writel(0, host->base + host->sdiimsk); + + if (cmd->data && cmd->error) + cmd->data->error = cmd->error; + + if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { + host->cmd_is_stop = 1; + s3cmci_send_request(host->mmc); + return; + } + + /* If we have no data transfer we are finished here */ + if (!mrq->data) + goto request_done; + + /* Calulate the amout of bytes transfer if there was no error */ + if (mrq->data->error == 0) { + mrq->data->bytes_xfered = + (mrq->data->blocks * mrq->data->blksz); + } else { + mrq->data->bytes_xfered = 0; + } + + /* If we had an error while transfering data we flush the + * DMA channel and the fifo to clear out any garbage. */ + if (mrq->data->error != 0) { + if (host->dodma) + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); + + if (host->is2440) { + /* Clear failure register and reset fifo. */ + writel(S3C2440_SDIFSTA_FIFORESET | + S3C2440_SDIFSTA_FIFOFAIL, + host->base + S3C2410_SDIFSTA); + } else { + u32 mci_con; + + /* reset fifo */ + mci_con = readl(host->base + S3C2410_SDICON); + mci_con |= S3C2410_SDICON_FIFORESET; + + writel(mci_con, host->base + S3C2410_SDICON); + } + } + +request_done: + host->complete_what = COMPLETION_NONE; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + + +void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source) +{ + static enum s3c2410_dmasrc last_source = -1; + static int setup_ok; + + if (last_source == source) + return; + + last_source = source; + + s3c2410_dma_devconfig(host->dma, source, 3, + host->mem->start + host->sdidata); + + if (!setup_ok) { + s3c2410_dma_config(host->dma, 4, + (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI)); + s3c2410_dma_set_buffdone_fn(host->dma, + s3cmci_dma_done_callback); + s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); + setup_ok = 1; + } +} + +static void s3cmci_send_command(struct s3cmci_host *host, + struct mmc_command *cmd) +{ + u32 ccon, imsk; + + imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT | + S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT | + S3C2410_SDIIMSK_RESPONSECRC; + + enable_imask(host, imsk); + + if (cmd->data) + host->complete_what = COMPLETION_XFERFINISH_RSPFIN; + else if (cmd->flags & MMC_RSP_PRESENT) + host->complete_what = COMPLETION_RSPFIN; + else + host->complete_what = COMPLETION_CMDSENT; + + writel(cmd->arg, host->base + S3C2410_SDICMDARG); + + ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX; + ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART; + + if (cmd->flags & MMC_RSP_PRESENT) + ccon |= S3C2410_SDICMDCON_WAITRSP; + + if (cmd->flags & MMC_RSP_136) + ccon |= S3C2410_SDICMDCON_LONGRSP; + + writel(ccon, host->base + S3C2410_SDICMDCON); +} + +static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data) +{ + u32 dcon, imsk, stoptries = 3; + + /* write DCON register */ + + if (!data) { + writel(0, host->base + S3C2410_SDIDCON); + return 0; + } + + if ((data->blksz & 3) != 0) { + /* We cannot deal with unaligned blocks with more than + * one block being transfered. */ + + if (data->blocks > 1) + return -EINVAL; + + /* No support yet for non-word block transfers. */ + return -EINVAL; + } + + while (readl(host->base + S3C2410_SDIDSTA) & + (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) { + + dbg(host, dbg_err, + "mci_setup_data() transfer stillin progress.\n"); + + writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); + s3cmci_reset(host); + + if ((stoptries--) == 0) { + dbg_dumpregs(host, "DRF"); + return -EINVAL; + } + } + + dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; + + if (host->dodma) + dcon |= S3C2410_SDIDCON_DMAEN; + + if (host->bus_width == MMC_BUS_WIDTH_4) + dcon |= S3C2410_SDIDCON_WIDEBUS; + + if (!(data->flags & MMC_DATA_STREAM)) + dcon |= S3C2410_SDIDCON_BLOCKMODE; + + if (data->flags & MMC_DATA_WRITE) { + dcon |= S3C2410_SDIDCON_TXAFTERRESP; + dcon |= S3C2410_SDIDCON_XFER_TXSTART; + } + + if (data->flags & MMC_DATA_READ) { + dcon |= S3C2410_SDIDCON_RXAFTERCMD; + dcon |= S3C2410_SDIDCON_XFER_RXSTART; + } + + if (host->is2440) { + dcon |= S3C2440_SDIDCON_DS_WORD; + dcon |= S3C2440_SDIDCON_DATSTART; + } + + writel(dcon, host->base + S3C2410_SDIDCON); + + /* write BSIZE register */ + + writel(data->blksz, host->base + S3C2410_SDIBSIZE); + + /* add to IMASK register */ + imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC | + S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH; + + enable_imask(host, imsk); + + /* write TIMER register */ + + if (host->is2440) { + writel(0x007FFFFF, host->base + S3C2410_SDITIMER); + } else { + writel(0x0000FFFF, host->base + S3C2410_SDITIMER); + + /* FIX: set slow clock to prevent timeouts on read */ + if (data->flags & MMC_DATA_READ) + writel(0xFF, host->base + S3C2410_SDIPRE); + } + + return 0; +} + +#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ) + +static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data) +{ + int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); + + host->pio_sgptr = 0; + host->pio_words = 0; + host->pio_count = 0; + host->pio_active = rw ? XFER_WRITE : XFER_READ; + + if (rw) { + do_pio_write(host); + enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF); + } else { + enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF + | S3C2410_SDIIMSK_RXFIFOLAST); + } + + return 0; +} + +static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) +{ + int dma_len, i; + int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; + + BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); + + s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); + + dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (dma_len == 0) + return -ENOMEM; + + host->dma_complete = 0; + host->dmatogo = dma_len; + + for (i = 0; i < dma_len; i++) { + int res; + + dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, + sg_dma_address(&data->sg[i]), + sg_dma_len(&data->sg[i])); + + res = s3c2410_dma_enqueue(host->dma, (void *) host, + sg_dma_address(&data->sg[i]), + sg_dma_len(&data->sg[i])); + + if (res) { + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); + return -EBUSY; + } + } + + s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_START); + + return 0; +} + +static void s3cmci_send_request(struct mmc_host *mmc) +{ + struct s3cmci_host *host = mmc_priv(mmc); + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; + + host->ccnt++; + prepare_dbgmsg(host, cmd, host->cmd_is_stop); + + /* Clear command, data and fifo status registers + Fifo clear only necessary on 2440, but doesn't hurt on 2410 + */ + writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT); + writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA); + writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA); + + if (cmd->data) { + int res = s3cmci_setup_data(host, cmd->data); + + host->dcnt++; + + if (res) { + dbg(host, dbg_err, "setup data error %d\n", res); + cmd->error = res; + cmd->data->error = res; + + mmc_request_done(mmc, mrq); + return; + } + + if (host->dodma) + res = s3cmci_prepare_dma(host, cmd->data); + else + res = s3cmci_prepare_pio(host, cmd->data); + + if (res) { + dbg(host, dbg_err, "data prepare error %d\n", res); + cmd->error = res; + cmd->data->error = res; + + mmc_request_done(mmc, mrq); + return; + } + } + + /* Send command */ + s3cmci_send_command(host, cmd); + + /* Enable Interrupt */ + enable_irq(host->irq); +} + +static int s3cmci_card_present(struct s3cmci_host *host) +{ + struct s3c24xx_mci_pdata *pdata = host->pdata; + int ret; + + if (pdata->gpio_detect == 0) + return -ENOSYS; + + ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; + return ret ^ pdata->detect_invert; +} + +static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct s3cmci_host *host = mmc_priv(mmc); + + host->status = "mmc request"; + host->cmd_is_stop = 0; + host->mrq = mrq; + + if (s3cmci_card_present(host) == 0) { + dbg(host, dbg_err, "%s: no medium present\n", __func__); + host->mrq->cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, mrq); + } else + s3cmci_send_request(mmc); +} + +static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct s3cmci_host *host = mmc_priv(mmc); + u32 mci_psc, mci_con; + + /* Set the power state */ + + mci_con = readl(host->base + S3C2410_SDICON); + + switch (ios->power_mode) { + case MMC_POWER_ON: + case MMC_POWER_UP: + s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); + s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); + s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); + s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); + s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); + s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); + + if (host->pdata->set_power) + host->pdata->set_power(ios->power_mode, ios->vdd); + + if (!host->is2440) + mci_con |= S3C2410_SDICON_FIFORESET; + + break; + + case MMC_POWER_OFF: + default: + s3c2410_gpio_setpin(S3C2410_GPE5, 0); + s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP); + + if (host->is2440) + mci_con |= S3C2440_SDICON_SDRESET; + + if (host->pdata->set_power) + host->pdata->set_power(ios->power_mode, ios->vdd); + + break; + } + + /* Set clock */ + for (mci_psc = 0; mci_psc < 255; mci_psc++) { + host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1)); + + if (host->real_rate <= ios->clock) + break; + } + + if (mci_psc > 255) + mci_psc = 255; + + host->prescaler = mci_psc; + writel(host->prescaler, host->base + S3C2410_SDIPRE); + + /* If requested clock is 0, real_rate will be 0, too */ + if (ios->clock == 0) + host->real_rate = 0; + + /* Set CLOCK_ENABLE */ + if (ios->clock) + mci_con |= S3C2410_SDICON_CLOCKTYPE; + else + mci_con &= ~S3C2410_SDICON_CLOCKTYPE; + + writel(mci_con, host->base + S3C2410_SDICON); + + if ((ios->power_mode == MMC_POWER_ON) || + (ios->power_mode == MMC_POWER_UP)) { + dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n", + host->real_rate/1000, ios->clock/1000); + } else { + dbg(host, dbg_conf, "powered down.\n"); + } + + host->bus_width = ios->bus_width; +} + +static void s3cmci_reset(struct s3cmci_host *host) +{ + u32 con = readl(host->base + S3C2410_SDICON); + + con |= S3C2440_SDICON_SDRESET; + writel(con, host->base + S3C2410_SDICON); +} + +static int s3cmci_get_ro(struct mmc_host *mmc) +{ + struct s3cmci_host *host = mmc_priv(mmc); + struct s3c24xx_mci_pdata *pdata = host->pdata; + int ret; + + if (pdata->gpio_wprotect == 0) + return 0; + + ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); + + if (pdata->wprotect_invert) + ret = !ret; + + return ret; +} + +static struct mmc_host_ops s3cmci_ops = { + .request = s3cmci_request, + .set_ios = s3cmci_set_ios, + .get_ro = s3cmci_get_ro, +}; + +static struct s3c24xx_mci_pdata s3cmci_def_pdata = { + /* This is currently here to avoid a number of if (host->pdata) + * checks. Any zero fields to ensure reaonable defaults are picked. */ +}; + +static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) +{ + struct s3cmci_host *host; + struct mmc_host *mmc; + int ret; + + mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto probe_out; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->pdev = pdev; + host->is2440 = is2440; + + host->pdata = pdev->dev.platform_data; + if (!host->pdata) { + pdev->dev.platform_data = &s3cmci_def_pdata; + host->pdata = &s3cmci_def_pdata; + } + + spin_lock_init(&host->complete_lock); + tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host); + + if (is2440) { + host->sdiimsk = S3C2440_SDIIMSK; + host->sdidata = S3C2440_SDIDATA; + host->clk_div = 1; + } else { + host->sdiimsk = S3C2410_SDIIMSK; + host->sdidata = S3C2410_SDIDATA; + host->clk_div = 2; + } + + host->dodma = 0; + host->complete_what = COMPLETION_NONE; + host->pio_active = XFER_NONE; + + host->dma = S3CMCI_DMA; + + host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!host->mem) { + dev_err(&pdev->dev, + "failed to get io memory region resouce.\n"); + + ret = -ENOENT; + goto probe_free_host; + } + + host->mem = request_mem_region(host->mem->start, + RESSIZE(host->mem), pdev->name); + + if (!host->mem) { + dev_err(&pdev->dev, "failed to request io memory region.\n"); + ret = -ENOENT; + goto probe_free_host; + } + + host->base = ioremap(host->mem->start, RESSIZE(host->mem)); + if (host->base == 0) { + dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); + ret = -EINVAL; + goto probe_free_mem_region; + } + + host->irq = platform_get_irq(pdev, 0); + if (host->irq == 0) { + dev_err(&pdev->dev, "failed to get interrupt resouce.\n"); + ret = -EINVAL; + goto probe_iounmap; + } + + if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) { + dev_err(&pdev->dev, "failed to request mci interrupt.\n"); + ret = -ENOENT; + goto probe_iounmap; + } + + /* We get spurious interrupts even when we have set the IMSK + * register to ignore everything, so use disable_irq() to make + * ensure we don't lock the system with un-serviceable requests. */ + + disable_irq(host->irq); + + host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); + + if (host->irq_cd >= 0) { + if (request_irq(host->irq_cd, s3cmci_irq_cd, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + DRIVER_NAME, host)) { + dev_err(&pdev->dev, "can't get card detect irq.\n"); + ret = -ENOENT; + goto probe_free_irq; + } + } else { + dev_warn(&pdev->dev, "host detect has no irq available\n"); + s3c2410_gpio_cfgpin(host->pdata->gpio_detect, + S3C2410_GPIO_INPUT); + } + + if (host->pdata->gpio_wprotect) + s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect, + S3C2410_GPIO_INPUT); + + if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { + dev_err(&pdev->dev, "unable to get DMA channel.\n"); + ret = -EBUSY; + goto probe_free_irq_cd; + } + + host->clk = clk_get(&pdev->dev, "sdi"); + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "failed to find clock source.\n"); + ret = PTR_ERR(host->clk); + host->clk = NULL; + goto probe_free_host; + } + + ret = clk_enable(host->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock source.\n"); + goto clk_free; + } + + host->clk_rate = clk_get_rate(host->clk); + + mmc->ops = &s3cmci_ops; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA; + mmc->f_min = host->clk_rate / (host->clk_div * 256); + mmc->f_max = host->clk_rate / host->clk_div; + + if (host->pdata->ocr_avail) + mmc->ocr_avail = host->pdata->ocr_avail; + + mmc->max_blk_count = 4095; + mmc->max_blk_size = 4095; + mmc->max_req_size = 4095 * 512; + mmc->max_seg_size = mmc->max_req_size; + + mmc->max_phys_segs = 128; + mmc->max_hw_segs = 128; + + dbg(host, dbg_debug, + "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n", + (host->is2440?"2440":""), + host->base, host->irq, host->irq_cd, host->dma); + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(&pdev->dev, "failed to add mmc host.\n"); + goto free_dmabuf; + } + + platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "initialisation done.\n"); + + return 0; + + free_dmabuf: + clk_disable(host->clk); + + clk_free: + clk_put(host->clk); + + probe_free_irq_cd: + if (host->irq_cd >= 0) + free_irq(host->irq_cd, host); + + probe_free_irq: + free_irq(host->irq, host); + + probe_iounmap: + iounmap(host->base); + + probe_free_mem_region: + release_mem_region(host->mem->start, RESSIZE(host->mem)); + + probe_free_host: + mmc_free_host(mmc); + probe_out: + return ret; +} + +static int __devexit s3cmci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct s3cmci_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + + clk_disable(host->clk); + clk_put(host->clk); + + tasklet_disable(&host->pio_tasklet); + s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); + + if (host->irq_cd >= 0) + free_irq(host->irq_cd, host); + free_irq(host->irq, host); + + iounmap(host->base); + release_mem_region(host->mem->start, RESSIZE(host->mem)); + + mmc_free_host(mmc); + return 0; +} + +static int __devinit s3cmci_probe_2410(struct platform_device *dev) +{ + return s3cmci_probe(dev, 0); +} + +static int __devinit s3cmci_probe_2412(struct platform_device *dev) +{ + return s3cmci_probe(dev, 1); +} + +static int __devinit s3cmci_probe_2440(struct platform_device *dev) +{ + return s3cmci_probe(dev, 1); +} + +#ifdef CONFIG_PM + +static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + + return mmc_suspend_host(mmc, state); +} + +static int s3cmci_resume(struct platform_device *dev) +{ + struct mmc_host *mmc = platform_get_drvdata(dev); + + return mmc_resume_host(mmc); +} + +#else /* CONFIG_PM */ +#define s3cmci_suspend NULL +#define s3cmci_resume NULL +#endif /* CONFIG_PM */ + + +static struct platform_driver s3cmci_driver_2410 = { + .driver.name = "s3c2410-sdi", + .driver.owner = THIS_MODULE, + .probe = s3cmci_probe_2410, + .remove = __devexit_p(s3cmci_remove), + .suspend = s3cmci_suspend, + .resume = s3cmci_resume, +}; + +static struct platform_driver s3cmci_driver_2412 = { + .driver.name = "s3c2412-sdi", + .driver.owner = THIS_MODULE, + .probe = s3cmci_probe_2412, + .remove = __devexit_p(s3cmci_remove), + .suspend = s3cmci_suspend, + .resume = s3cmci_resume, +}; + +static struct platform_driver s3cmci_driver_2440 = { + .driver.name = "s3c2440-sdi", + .driver.owner = THIS_MODULE, + .probe = s3cmci_probe_2440, + .remove = __devexit_p(s3cmci_remove), + .suspend = s3cmci_suspend, + .resume = s3cmci_resume, +}; + + +static int __init s3cmci_init(void) +{ + platform_driver_register(&s3cmci_driver_2410); + platform_driver_register(&s3cmci_driver_2412); + platform_driver_register(&s3cmci_driver_2440); + return 0; +} + +static void __exit s3cmci_exit(void) +{ + platform_driver_unregister(&s3cmci_driver_2410); + platform_driver_unregister(&s3cmci_driver_2412); + platform_driver_unregister(&s3cmci_driver_2440); +} + +module_init(s3cmci_init); +module_exit(s3cmci_exit); + +MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>"); +MODULE_ALIAS("platform:s3c2410-sdi"); +MODULE_ALIAS("platform:s3c2412-sdi"); +MODULE_ALIAS("platform:s3c2440-sdi"); diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h new file mode 100644 index 000000000000..37d9c60010c9 --- /dev/null +++ b/drivers/mmc/host/s3cmci.h @@ -0,0 +1,70 @@ +/* + * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver + * + * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* FIXME: DMA Resource management ?! */ +#define S3CMCI_DMA 0 + +enum s3cmci_waitfor { + COMPLETION_NONE, + COMPLETION_FINALIZE, + COMPLETION_CMDSENT, + COMPLETION_RSPFIN, + COMPLETION_XFERFINISH, + COMPLETION_XFERFINISH_RSPFIN, +}; + +struct s3cmci_host { + struct platform_device *pdev; + struct s3c24xx_mci_pdata *pdata; + struct mmc_host *mmc; + struct resource *mem; + struct clk *clk; + void __iomem *base; + int irq; + int irq_cd; + int dma; + + unsigned long clk_rate; + unsigned long clk_div; + unsigned long real_rate; + u8 prescaler; + + int is2440; + unsigned sdiimsk; + unsigned sdidata; + int dodma; + int dmatogo; + + struct mmc_request *mrq; + int cmd_is_stop; + + spinlock_t complete_lock; + enum s3cmci_waitfor complete_what; + + int dma_complete; + + u32 pio_sgptr; + u32 pio_words; + u32 pio_count; + u32 *pio_ptr; +#define XFER_NONE 0 +#define XFER_READ 1 +#define XFER_WRITE 2 + u32 pio_active; + + int bus_width; + + char dbgmsg_cmd[301]; + char dbgmsg_dat[301]; + char *status; + + unsigned int ccnt, dcnt; + struct tasklet_struct pio_tasklet; +}; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c new file mode 100644 index 000000000000..deb607c52c0d --- /dev/null +++ b/drivers/mmc/host/sdhci-pci.c @@ -0,0 +1,732 @@ +/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface + * + * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * Thanks to the following companies for their support: + * + * - JMicron (hardware and technical support) + */ + +#include <linux/delay.h> +#include <linux/highmem.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> + +#include <linux/mmc/host.h> + +#include <asm/scatterlist.h> +#include <asm/io.h> + +#include "sdhci.h" + +/* + * PCI registers + */ + +#define PCI_SDHCI_IFPIO 0x00 +#define PCI_SDHCI_IFDMA 0x01 +#define PCI_SDHCI_IFVENDOR 0x02 + +#define PCI_SLOT_INFO 0x40 /* 8 bits */ +#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) +#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 + +#define MAX_SLOTS 8 + +struct sdhci_pci_chip; +struct sdhci_pci_slot; + +struct sdhci_pci_fixes { + unsigned int quirks; + + int (*probe)(struct sdhci_pci_chip*); + + int (*probe_slot)(struct sdhci_pci_slot*); + void (*remove_slot)(struct sdhci_pci_slot*, int); + + int (*suspend)(struct sdhci_pci_chip*, + pm_message_t); + int (*resume)(struct sdhci_pci_chip*); +}; + +struct sdhci_pci_slot { + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + + int pci_bar; +}; + +struct sdhci_pci_chip { + struct pci_dev *pdev; + + unsigned int quirks; + const struct sdhci_pci_fixes *fixes; + + int num_slots; /* Slots on controller */ + struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ +}; + + +/*****************************************************************************\ + * * + * Hardware specific quirk handling * + * * +\*****************************************************************************/ + +static int ricoh_probe(struct sdhci_pci_chip *chip) +{ + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET; + + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) + chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_ricoh = { + .probe = ricoh_probe, + .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR, +}; + +static const struct sdhci_pci_fixes sdhci_ene_712 = { + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_BROKEN_DMA, +}; + +static const struct sdhci_pci_fixes sdhci_ene_714 = { + .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | + SDHCI_QUIRK_BROKEN_DMA, +}; + +static const struct sdhci_pci_fixes sdhci_cafe = { + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, +}; + +static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) +{ + u8 scratch; + int ret; + + ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); + if (ret) + return ret; + + /* + * Turn PMOS on [bit 0], set over current detection to 2.4 V + * [bit 1:2] and enable over current debouncing [bit 6]. + */ + if (on) + scratch |= 0x47; + else + scratch &= ~0x47; + + ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); + if (ret) + return ret; + + return 0; +} + +static int jmicron_probe(struct sdhci_pci_chip *chip) +{ + int ret; + + if (chip->pdev->revision == 0) { + chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | + SDHCI_QUIRK_32BIT_ADMA_SIZE | + SDHCI_QUIRK_RESET_AFTER_REQUEST; + } + + /* + * JMicron chips can have two interfaces to the same hardware + * in order to work around limitations in Microsoft's driver. + * We need to make sure we only bind to one of them. + * + * This code assumes two things: + * + * 1. The PCI code adds subfunctions in order. + * + * 2. The MMC interface has a lower subfunction number + * than the SD interface. + */ + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { + struct pci_dev *sd_dev; + + sd_dev = NULL; + while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, + PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { + if ((PCI_SLOT(chip->pdev->devfn) == + PCI_SLOT(sd_dev->devfn)) && + (chip->pdev->bus == sd_dev->bus)) + break; + } + + if (sd_dev) { + pci_dev_put(sd_dev); + dev_info(&chip->pdev->dev, "Refusing to bind to " + "secondary interface.\n"); + return -ENODEV; + } + } + + /* + * JMicron chips need a bit of a nudge to enable the power + * output pins. + */ + ret = jmicron_pmos(chip, 1); + if (ret) { + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); + return ret; + } + + return 0; +} + +static void jmicron_enable_mmc(struct sdhci_host *host, int on) +{ + u8 scratch; + + scratch = readb(host->ioaddr + 0xC0); + + if (on) + scratch |= 0x01; + else + scratch &= ~0x01; + + writeb(scratch, host->ioaddr + 0xC0); +} + +static int jmicron_probe_slot(struct sdhci_pci_slot *slot) +{ + if (slot->chip->pdev->revision == 0) { + u16 version; + + version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); + version = (version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT; + + /* + * Older versions of the chip have lots of nasty glitches + * in the ADMA engine. It's best just to avoid it + * completely. + */ + if (version < 0xAC) + slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; + } + + /* + * The secondary interface requires a bit set to get the + * interrupts. + */ + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) + jmicron_enable_mmc(slot->host, 1); + + return 0; +} + +static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) +{ + if (dead) + return; + + if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) + jmicron_enable_mmc(slot->host, 0); +} + +static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) +{ + int i; + + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { + for (i = 0;i < chip->num_slots;i++) + jmicron_enable_mmc(chip->slots[i]->host, 0); + } + + return 0; +} + +static int jmicron_resume(struct sdhci_pci_chip *chip) +{ + int ret, i; + + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { + for (i = 0;i < chip->num_slots;i++) + jmicron_enable_mmc(chip->slots[i]->host, 1); + } + + ret = jmicron_pmos(chip, 1); + if (ret) { + dev_err(&chip->pdev->dev, "Failure enabling card power\n"); + return ret; + } + + return 0; +} + +static const struct sdhci_pci_fixes sdhci_jmicron = { + .probe = jmicron_probe, + + .probe_slot = jmicron_probe_slot, + .remove_slot = jmicron_remove_slot, + + .suspend = jmicron_suspend, + .resume = jmicron_resume, +}; + +static const struct pci_device_id pci_ids[] __devinitdata = { + { + .vendor = PCI_VENDOR_ID_RICOH, + .device = PCI_DEVICE_ID_RICOH_R5C822, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ricoh, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB712_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_712, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB712_SD_2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_712, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB714_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_714, + }, + + { + .vendor = PCI_VENDOR_ID_ENE, + .device = PCI_DEVICE_ID_ENE_CB714_SD_2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ene_714, + }, + + { + .vendor = PCI_VENDOR_ID_MARVELL, + .device = PCI_DEVICE_ID_MARVELL_CAFE_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_cafe, + }, + + { + .vendor = PCI_VENDOR_ID_JMICRON, + .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_jmicron, + }, + + { + .vendor = PCI_VENDOR_ID_JMICRON, + .device = PCI_DEVICE_ID_JMICRON_JMB38X_MMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_jmicron, + }, + + { /* Generic SD host controller */ + PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) + }, + + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +/*****************************************************************************\ + * * + * SDHCI core callbacks * + * * +\*****************************************************************************/ + +static int sdhci_pci_enable_dma(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot; + struct pci_dev *pdev; + int ret; + + slot = sdhci_priv(host); + pdev = slot->chip->pdev; + + if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && + ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && + (host->flags & SDHCI_USE_DMA)) { + dev_warn(&pdev->dev, "Will use DMA mode even though HW " + "doesn't fully claim to support it.\n"); + } + + ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (ret) + return ret; + + pci_set_master(pdev); + + return 0; +} + +static struct sdhci_ops sdhci_pci_ops = { + .enable_dma = sdhci_pci_enable_dma, +}; + +/*****************************************************************************\ + * * + * Suspend/resume * + * * +\*****************************************************************************/ + +#ifdef CONFIG_PM + +static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot; + int i, ret; + + chip = pci_get_drvdata(pdev); + if (!chip) + return 0; + + for (i = 0;i < chip->num_slots;i++) { + slot = chip->slots[i]; + if (!slot) + continue; + + ret = sdhci_suspend_host(slot->host, state); + + if (ret) { + for (i--;i >= 0;i--) + sdhci_resume_host(chip->slots[i]->host); + return ret; + } + } + + if (chip->fixes && chip->fixes->suspend) { + ret = chip->fixes->suspend(chip, state); + if (ret) { + for (i = chip->num_slots - 1;i >= 0;i--) + sdhci_resume_host(chip->slots[i]->host); + return ret; + } + } + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int sdhci_pci_resume (struct pci_dev *pdev) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot; + int i, ret; + + chip = pci_get_drvdata(pdev); + if (!chip) + return 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + + if (chip->fixes && chip->fixes->resume) { + ret = chip->fixes->resume(chip); + if (ret) + return ret; + } + + for (i = 0;i < chip->num_slots;i++) { + slot = chip->slots[i]; + if (!slot) + continue; + + ret = sdhci_resume_host(slot->host); + if (ret) + return ret; + } + + return 0; +} + +#else /* CONFIG_PM */ + +#define sdhci_pci_suspend NULL +#define sdhci_pci_resume NULL + +#endif /* CONFIG_PM */ + +/*****************************************************************************\ + * * + * Device probing/removal * + * * +\*****************************************************************************/ + +static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( + struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar) +{ + struct sdhci_pci_slot *slot; + struct sdhci_host *host; + + resource_size_t addr; + + int ret; + + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); + return ERR_PTR(-ENODEV); + } + + if (pci_resource_len(pdev, bar) != 0x100) { + dev_err(&pdev->dev, "Invalid iomem size. You may " + "experience problems.\n"); + } + + if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { + dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); + return ERR_PTR(-ENODEV); + } + + if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { + dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); + return ERR_PTR(-ENODEV); + } + + host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + goto unmap; + } + + slot = sdhci_priv(host); + + slot->chip = chip; + slot->host = host; + slot->pci_bar = bar; + + host->hw_name = "PCI"; + host->ops = &sdhci_pci_ops; + host->quirks = chip->quirks; + + host->irq = pdev->irq; + + ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc)); + if (ret) { + dev_err(&pdev->dev, "cannot request region\n"); + return ERR_PTR(ret); + } + + addr = pci_resource_start(pdev, bar); + host->ioaddr = ioremap_nocache(addr, pci_resource_len(pdev, bar)); + if (!host->ioaddr) { + dev_err(&pdev->dev, "failed to remap registers\n"); + goto release; + } + + if (chip->fixes && chip->fixes->probe_slot) { + ret = chip->fixes->probe_slot(slot); + if (ret) + goto unmap; + } + + ret = sdhci_add_host(host); + if (ret) + goto remove; + + return slot; + +remove: + if (chip->fixes && chip->fixes->remove_slot) + chip->fixes->remove_slot(slot, 0); + +unmap: + iounmap(host->ioaddr); + +release: + pci_release_region(pdev, bar); + sdhci_free_host(host); + + return ERR_PTR(ret); +} + +static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) +{ + int dead; + u32 scratch; + + dead = 0; + scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); + if (scratch == (u32)-1) + dead = 1; + + sdhci_remove_host(slot->host, dead); + + if (slot->chip->fixes && slot->chip->fixes->remove_slot) + slot->chip->fixes->remove_slot(slot, dead); + + pci_release_region(slot->chip->pdev, slot->pci_bar); + + sdhci_free_host(slot->host); +} + +static int __devinit sdhci_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct sdhci_pci_chip *chip; + struct sdhci_pci_slot *slot; + + u8 slots, rev, first_bar; + int ret, i; + + BUG_ON(pdev == NULL); + BUG_ON(ent == NULL); + + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); + + dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", + (int)pdev->vendor, (int)pdev->device, (int)rev); + + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); + if (ret) + return ret; + + slots = PCI_SLOT_INFO_SLOTS(slots) + 1; + dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); + if (slots == 0) + return -ENODEV; + + BUG_ON(slots > MAX_SLOTS); + + ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); + if (ret) + return ret; + + first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; + + if (first_bar > 5) { + dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); + return -ENODEV; + } + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL); + if (!chip) { + ret = -ENOMEM; + goto err; + } + + chip->pdev = pdev; + chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; + if (chip->fixes) + chip->quirks = chip->fixes->quirks; + chip->num_slots = slots; + + pci_set_drvdata(pdev, chip); + + if (chip->fixes && chip->fixes->probe) { + ret = chip->fixes->probe(chip); + if (ret) + goto free; + } + + for (i = 0;i < slots;i++) { + slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); + if (IS_ERR(slot)) { + for (i--;i >= 0;i--) + sdhci_pci_remove_slot(chip->slots[i]); + ret = PTR_ERR(slot); + goto free; + } + + chip->slots[i] = slot; + } + + return 0; + +free: + pci_set_drvdata(pdev, NULL); + kfree(chip); + +err: + pci_disable_device(pdev); + return ret; +} + +static void __devexit sdhci_pci_remove(struct pci_dev *pdev) +{ + int i; + struct sdhci_pci_chip *chip; + + chip = pci_get_drvdata(pdev); + + if (chip) { + for (i = 0;i < chip->num_slots; i++) + sdhci_pci_remove_slot(chip->slots[i]); + + pci_set_drvdata(pdev, NULL); + kfree(chip); + } + + pci_disable_device(pdev); +} + +static struct pci_driver sdhci_driver = { + .name = "sdhci-pci", + .id_table = pci_ids, + .probe = sdhci_pci_probe, + .remove = __devexit_p(sdhci_pci_remove), + .suspend = sdhci_pci_suspend, + .resume = sdhci_pci_resume, +}; + +/*****************************************************************************\ + * * + * Driver init/exit * + * * +\*****************************************************************************/ + +static int __init sdhci_drv_init(void) +{ + return pci_register_driver(&sdhci_driver); +} + +static void __exit sdhci_drv_exit(void) +{ + pci_unregister_driver(&sdhci_driver); +} + +module_init(sdhci_drv_init); +module_exit(sdhci_drv_exit); + +MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); +MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index b413aa6c246b..17701c3da733 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -15,7 +15,7 @@ #include <linux/delay.h> #include <linux/highmem.h> -#include <linux/pci.h> +#include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> @@ -32,135 +32,6 @@ static unsigned int debug_quirks = 0; -/* - * Different quirks to handle when the hardware deviates from a strict - * interpretation of the SDHCI specification. - */ - -/* Controller doesn't honor resets unless we touch the clock register */ -#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) -/* Controller has bad caps bits, but really supports DMA */ -#define SDHCI_QUIRK_FORCE_DMA (1<<1) -/* Controller doesn't like to be reset when there is no card inserted. */ -#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) -/* Controller doesn't like clearing the power reg before a change */ -#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) -/* Controller has flaky internal state so reset it on each ios change */ -#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) -/* Controller has an unusable DMA engine */ -#define SDHCI_QUIRK_BROKEN_DMA (1<<5) -/* Controller can only DMA from 32-bit aligned addresses */ -#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6) -/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ -#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7) -/* Controller needs to be reset after each request to stay stable */ -#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8) -/* Controller needs voltage and power writes to happen separately */ -#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<9) -/* Controller has an off-by-one issue with timeout value */ -#define SDHCI_QUIRK_INCR_TIMEOUT_CONTROL (1<<10) - -static const struct pci_device_id pci_ids[] __devinitdata = { - { - .vendor = PCI_VENDOR_ID_RICOH, - .device = PCI_DEVICE_ID_RICOH_R5C822, - .subvendor = PCI_VENDOR_ID_IBM, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET | - SDHCI_QUIRK_FORCE_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_RICOH, - .device = PCI_DEVICE_ID_RICOH_R5C822, - .subvendor = PCI_VENDOR_ID_SAMSUNG, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_FORCE_DMA | - SDHCI_QUIRK_NO_CARD_NO_RESET, - }, - - { - .vendor = PCI_VENDOR_ID_RICOH, - .device = PCI_DEVICE_ID_RICOH_R5C822, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_FORCE_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_TI, - .device = PCI_DEVICE_ID_TI_XX21_XX11_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_FORCE_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB712_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB712_SD_2, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB714_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_ENE, - .device = PCI_DEVICE_ID_ENE_CB714_SD_2, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS | - SDHCI_QUIRK_BROKEN_DMA, - }, - - { - .vendor = PCI_VENDOR_ID_MARVELL, - .device = PCI_DEVICE_ID_MARVELL_CAFE_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | - SDHCI_QUIRK_INCR_TIMEOUT_CONTROL, - }, - - { - .vendor = PCI_VENDOR_ID_JMICRON, - .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR | - SDHCI_QUIRK_32BIT_DMA_SIZE | - SDHCI_QUIRK_RESET_AFTER_REQUEST, - }, - - { /* Generic SD host controller */ - PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) - }, - - { /* end: all zeroes */ }, -}; - -MODULE_DEVICE_TABLE(pci, pci_ids); - static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); static void sdhci_finish_data(struct sdhci_host *); @@ -215,7 +86,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) { unsigned long timeout; - if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) return; @@ -253,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host) SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | - SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; + SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | + SDHCI_INT_ADMA_ERROR; writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); @@ -443,23 +315,226 @@ static void sdhci_transfer_pio(struct sdhci_host *host) DBG("PIO transfer complete.\n"); } -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) +static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) { - u8 count; - unsigned target_timeout, current_timeout; + local_irq_save(*flags); + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; +} - WARN_ON(host->data); +static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) +{ + kunmap_atomic(buffer, KM_BIO_SRC_IRQ); + local_irq_restore(*flags); +} - if (data == NULL) - return; +static int sdhci_adma_table_pre(struct sdhci_host *host, + struct mmc_data *data) +{ + int direction; - /* Sanity checks */ - BUG_ON(data->blksz * data->blocks > 524288); - BUG_ON(data->blksz > host->mmc->max_blk_size); - BUG_ON(data->blocks > 65535); + u8 *desc; + u8 *align; + dma_addr_t addr; + dma_addr_t align_addr; + int len, offset; - host->data = data; - host->data_early = 0; + struct scatterlist *sg; + int i; + char *buffer; + unsigned long flags; + + /* + * The spec does not specify endianness of descriptor table. + * We currently guess that it is LE. + */ + + if (data->flags & MMC_DATA_READ) + direction = DMA_FROM_DEVICE; + else + direction = DMA_TO_DEVICE; + + /* + * The ADMA descriptor table is mapped further down as we + * need to fill it with data first. + */ + + host->align_addr = dma_map_single(mmc_dev(host->mmc), + host->align_buffer, 128 * 4, direction); + if (dma_mapping_error(host->align_addr)) + goto fail; + BUG_ON(host->align_addr & 0x3); + + host->sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, direction); + if (host->sg_count == 0) + goto unmap_align; + + desc = host->adma_desc; + align = host->align_buffer; + + align_addr = host->align_addr; + + for_each_sg(data->sg, sg, host->sg_count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + + /* + * The SDHCI specification states that ADMA + * addresses must be 32-bit aligned. If they + * aren't, then we use a bounce buffer for + * the (up to three) bytes that screw up the + * alignment. + */ + offset = (4 - (addr & 0x3)) & 0x3; + if (offset) { + if (data->flags & MMC_DATA_WRITE) { + buffer = sdhci_kmap_atomic(sg, &flags); + memcpy(align, buffer, offset); + sdhci_kunmap_atomic(buffer, &flags); + } + + desc[7] = (align_addr >> 24) & 0xff; + desc[6] = (align_addr >> 16) & 0xff; + desc[5] = (align_addr >> 8) & 0xff; + desc[4] = (align_addr >> 0) & 0xff; + + BUG_ON(offset > 65536); + + desc[3] = (offset >> 8) & 0xff; + desc[2] = (offset >> 0) & 0xff; + + desc[1] = 0x00; + desc[0] = 0x21; /* tran, valid */ + + align += 4; + align_addr += 4; + + desc += 8; + + addr += offset; + len -= offset; + } + + desc[7] = (addr >> 24) & 0xff; + desc[6] = (addr >> 16) & 0xff; + desc[5] = (addr >> 8) & 0xff; + desc[4] = (addr >> 0) & 0xff; + + BUG_ON(len > 65536); + + desc[3] = (len >> 8) & 0xff; + desc[2] = (len >> 0) & 0xff; + + desc[1] = 0x00; + desc[0] = 0x21; /* tran, valid */ + + desc += 8; + + /* + * If this triggers then we have a calculation bug + * somewhere. :/ + */ + WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); + } + + /* + * Add a terminating entry. + */ + desc[7] = 0; + desc[6] = 0; + desc[5] = 0; + desc[4] = 0; + + desc[3] = 0; + desc[2] = 0; + + desc[1] = 0x00; + desc[0] = 0x03; /* nop, end, valid */ + + /* + * Resync align buffer as we might have changed it. + */ + if (data->flags & MMC_DATA_WRITE) { + dma_sync_single_for_device(mmc_dev(host->mmc), + host->align_addr, 128 * 4, direction); + } + + host->adma_addr = dma_map_single(mmc_dev(host->mmc), + host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); + if (dma_mapping_error(host->align_addr)) + goto unmap_entries; + BUG_ON(host->adma_addr & 0x3); + + return 0; + +unmap_entries: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); +unmap_align: + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, + 128 * 4, direction); +fail: + return -EINVAL; +} + +static void sdhci_adma_table_post(struct sdhci_host *host, + struct mmc_data *data) +{ + int direction; + + struct scatterlist *sg; + int i, size; + u8 *align; + char *buffer; + unsigned long flags; + + if (data->flags & MMC_DATA_READ) + direction = DMA_FROM_DEVICE; + else + direction = DMA_TO_DEVICE; + + dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, + (128 * 2 + 1) * 4, DMA_TO_DEVICE); + + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, + 128 * 4, direction); + + if (data->flags & MMC_DATA_READ) { + dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); + + align = host->align_buffer; + + for_each_sg(data->sg, sg, host->sg_count, i) { + if (sg_dma_address(sg) & 0x3) { + size = 4 - (sg_dma_address(sg) & 0x3); + + buffer = sdhci_kmap_atomic(sg, &flags); + memcpy(buffer, align, size); + sdhci_kunmap_atomic(buffer, &flags); + + align += 4; + } + } + } + + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); +} + +static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) +{ + u8 count; + unsigned target_timeout, current_timeout; + + /* + * If the host controller provides us with an incorrect timeout + * value, just skip the check and use 0xE. The hardware may take + * longer to time out, but that's much better than having a too-short + * timeout value. + */ + if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)) + return 0xE; /* timeout in us */ target_timeout = data->timeout_ns / 1000 + @@ -484,52 +559,158 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) break; } - /* - * Compensate for an off-by-one error in the CaFe hardware; otherwise, - * a too-small count gives us interrupt timeouts. - */ - if ((host->chip->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) - count++; - if (count >= 0xF) { printk(KERN_WARNING "%s: Too large timeout requested!\n", mmc_hostname(host->mmc)); count = 0xE; } + return count; +} + +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) +{ + u8 count; + u8 ctrl; + int ret; + + WARN_ON(host->data); + + if (data == NULL) + return; + + /* Sanity checks */ + BUG_ON(data->blksz * data->blocks > 524288); + BUG_ON(data->blksz > host->mmc->max_blk_size); + BUG_ON(data->blocks > 65535); + + host->data = data; + host->data_early = 0; + + count = sdhci_calc_timeout(host, data); writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL); if (host->flags & SDHCI_USE_DMA) host->flags |= SDHCI_REQ_USE_DMA; - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && - (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && - ((data->blksz * data->blocks) & 0x3))) { - DBG("Reverting to PIO because of transfer size (%d)\n", - data->blksz * data->blocks); - host->flags &= ~SDHCI_REQ_USE_DMA; + /* + * FIXME: This doesn't account for merging when mapping the + * scatterlist. + */ + if (host->flags & SDHCI_REQ_USE_DMA) { + int broken, i; + struct scatterlist *sg; + + broken = 0; + if (host->flags & SDHCI_USE_ADMA) { + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) + broken = 1; + } else { + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) + broken = 1; + } + + if (unlikely(broken)) { + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->length & 0x3) { + DBG("Reverting to PIO because of " + "transfer size (%d)\n", + sg->length); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + } + } } /* * The assumption here being that alignment is the same after * translation to device address space. */ - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && - (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && - (data->sg->offset & 0x3))) { - DBG("Reverting to PIO because of bad alignment\n"); - host->flags &= ~SDHCI_REQ_USE_DMA; + if (host->flags & SDHCI_REQ_USE_DMA) { + int broken, i; + struct scatterlist *sg; + + broken = 0; + if (host->flags & SDHCI_USE_ADMA) { + /* + * As we use 3 byte chunks to work around + * alignment problems, we need to check this + * quirk. + */ + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) + broken = 1; + } else { + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) + broken = 1; + } + + if (unlikely(broken)) { + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 0x3) { + DBG("Reverting to PIO because of " + "bad alignment\n"); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + } + } } if (host->flags & SDHCI_REQ_USE_DMA) { - int count; + if (host->flags & SDHCI_USE_ADMA) { + ret = sdhci_adma_table_pre(host, data); + if (ret) { + /* + * This only happens when someone fed + * us an invalid request. + */ + WARN_ON(1); + host->flags &= ~SDHCI_USE_DMA; + } else { + writel(host->adma_addr, + host->ioaddr + SDHCI_ADMA_ADDRESS); + } + } else { + int sg_cnt; + + sg_cnt = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : + DMA_TO_DEVICE); + if (sg_cnt == 0) { + /* + * This only happens when someone fed + * us an invalid request. + */ + WARN_ON(1); + host->flags &= ~SDHCI_USE_DMA; + } else { + WARN_ON(count != 1); + writel(sg_dma_address(data->sg), + host->ioaddr + SDHCI_DMA_ADDRESS); + } + } + } - count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len, - (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); - BUG_ON(count != 1); + /* + * Always adjust the DMA selection as some controllers + * (e.g. JMicron) can't do PIO properly when the selection + * is ADMA. + */ + if (host->version >= SDHCI_SPEC_200) { + ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if ((host->flags & SDHCI_REQ_USE_DMA) && + (host->flags & SDHCI_USE_ADMA)) + ctrl |= SDHCI_CTRL_ADMA32; + else + ctrl |= SDHCI_CTRL_SDMA; + writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); + } - writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS); - } else { + if (!(host->flags & SDHCI_REQ_USE_DMA)) { host->cur_sg = data->sg; host->num_sg = data->sg_len; @@ -567,7 +748,6 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, static void sdhci_finish_data(struct sdhci_host *host) { struct mmc_data *data; - u16 blocks; BUG_ON(!host->data); @@ -575,25 +755,26 @@ static void sdhci_finish_data(struct sdhci_host *host) host->data = NULL; if (host->flags & SDHCI_REQ_USE_DMA) { - pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len, - (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE); + if (host->flags & SDHCI_USE_ADMA) + sdhci_adma_table_post(host, data); + else { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE); + } } /* - * Controller doesn't count down when in single block mode. + * The specification states that the block count register must + * be updated, but it does not specify at what point in the + * data flow. That makes the register entirely useless to read + * back so we have to assume that nothing made it to the card + * in the event of an error. */ - if (data->blocks == 1) - blocks = (data->error == 0) ? 0 : 1; + if (data->error) + data->bytes_xfered = 0; else - blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT); - data->bytes_xfered = data->blksz * (data->blocks - blocks); - - if (!data->error && blocks) { - printk(KERN_ERR "%s: Controller signalled completion even " - "though there were blocks left.\n", - mmc_hostname(host->mmc)); - data->error = -EIO; - } + data->bytes_xfered = data->blksz * data->blocks; if (data->stop) { /* @@ -775,7 +956,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) * Spec says that we should clear the power reg before setting * a new value. Some controllers don't seem to like this though. */ - if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); pwr = SDHCI_POWER_ON; @@ -797,10 +978,10 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) } /* - * At least the CaFe chip gets confused if we set the voltage + * At least the Marvell CaFe chip gets confused if we set the voltage * and set turn on power at the same time, so set the voltage first. */ - if ((host->chip->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) + if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) writeb(pwr & ~SDHCI_POWER_ON, host->ioaddr + SDHCI_POWER_CONTROL); @@ -833,7 +1014,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->mrq = mrq; - if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { + if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) + || (host->flags & SDHCI_DEVICE_DEAD)) { host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } else @@ -853,6 +1035,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + /* * Reset the chip on each power off. * Should clear out any weird states. @@ -888,9 +1073,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) * signalling timeout and CRC errors even on CMD0. Resetting * it on each ios seems to solve the problem. */ - if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) + if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); +out: mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } @@ -905,7 +1091,10 @@ static int sdhci_get_ro(struct mmc_host *mmc) spin_lock_irqsave(&host->lock, flags); - present = readl(host->ioaddr + SDHCI_PRESENT_STATE); + if (host->flags & SDHCI_DEVICE_DEAD) + present = 0; + else + present = readl(host->ioaddr + SDHCI_PRESENT_STATE); spin_unlock_irqrestore(&host->lock, flags); @@ -922,6 +1111,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + ier = readl(host->ioaddr + SDHCI_INT_ENABLE); ier &= ~SDHCI_INT_CARD_INT; @@ -931,6 +1123,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) writel(ier, host->ioaddr + SDHCI_INT_ENABLE); writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); +out: mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -996,13 +1189,14 @@ static void sdhci_tasklet_finish(unsigned long param) * The controller needs a reset of internal state machines * upon error conditions. */ - if (mrq->cmd->error || - (mrq->data && (mrq->data->error || - (mrq->data->stop && mrq->data->stop->error))) || - (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { + if (!(host->flags & SDHCI_DEVICE_DEAD) && + (mrq->cmd->error || + (mrq->data && (mrq->data->error || + (mrq->data->stop && mrq->data->stop->error))) || + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { /* Some controllers need this kick or reset won't work here */ - if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { + if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { unsigned int clock; /* This is to force an update */ @@ -1116,6 +1310,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) host->data->error = -ETIMEDOUT; else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) host->data->error = -EILSEQ; + else if (intmask & SDHCI_INT_ADMA_ERROR) + host->data->error = -EIO; if (host->data->error) sdhci_finish_data(host); @@ -1234,218 +1430,167 @@ out: #ifdef CONFIG_PM -static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state) +int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) { - struct sdhci_chip *chip; - int i, ret; - - chip = pci_get_drvdata(pdev); - if (!chip) - return 0; - - DBG("Suspending...\n"); - - for (i = 0;i < chip->num_slots;i++) { - if (!chip->hosts[i]) - continue; - ret = mmc_suspend_host(chip->hosts[i]->mmc, state); - if (ret) { - for (i--;i >= 0;i--) - mmc_resume_host(chip->hosts[i]->mmc); - return ret; - } - } - - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + int ret; - for (i = 0;i < chip->num_slots;i++) { - if (!chip->hosts[i]) - continue; - free_irq(chip->hosts[i]->irq, chip->hosts[i]); - } + ret = mmc_suspend_host(host->mmc, state); + if (ret) + return ret; - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + free_irq(host->irq, host); return 0; } -static int sdhci_resume (struct pci_dev *pdev) -{ - struct sdhci_chip *chip; - int i, ret; +EXPORT_SYMBOL_GPL(sdhci_suspend_host); - chip = pci_get_drvdata(pdev); - if (!chip) - return 0; +int sdhci_resume_host(struct sdhci_host *host) +{ + int ret; - DBG("Resuming...\n"); + if (host->flags & SDHCI_USE_DMA) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); + ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, + mmc_hostname(host->mmc), host); if (ret) return ret; - for (i = 0;i < chip->num_slots;i++) { - if (!chip->hosts[i]) - continue; - if (chip->hosts[i]->flags & SDHCI_USE_DMA) - pci_set_master(pdev); - ret = request_irq(chip->hosts[i]->irq, sdhci_irq, - IRQF_SHARED, mmc_hostname(chip->hosts[i]->mmc), - chip->hosts[i]); - if (ret) - return ret; - sdhci_init(chip->hosts[i]); - mmiowb(); - ret = mmc_resume_host(chip->hosts[i]->mmc); - if (ret) - return ret; - } + sdhci_init(host); + mmiowb(); + + ret = mmc_resume_host(host->mmc); + if (ret) + return ret; return 0; } -#else /* CONFIG_PM */ - -#define sdhci_suspend NULL -#define sdhci_resume NULL +EXPORT_SYMBOL_GPL(sdhci_resume_host); #endif /* CONFIG_PM */ /*****************************************************************************\ * * - * Device probing/removal * + * Device allocation/registration * * * \*****************************************************************************/ -static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) +struct sdhci_host *sdhci_alloc_host(struct device *dev, + size_t priv_size) { - int ret; - unsigned int version; - struct sdhci_chip *chip; struct mmc_host *mmc; struct sdhci_host *host; - u8 first_bar; - unsigned int caps; - - chip = pci_get_drvdata(pdev); - BUG_ON(!chip); - - ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); - if (ret) - return ret; - - first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; - - if (first_bar > 5) { - printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n"); - return -ENODEV; - } - - if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) { - printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n"); - return -ENODEV; - } - - if (pci_resource_len(pdev, first_bar + slot) != 0x100) { - printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. " - "You may experience problems.\n"); - } - - if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { - printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n"); - return -ENODEV; - } - - if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { - printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n"); - return -ENODEV; - } + WARN_ON(dev == NULL); - mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev); + mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); if (!mmc) - return -ENOMEM; + return ERR_PTR(-ENOMEM); host = mmc_priv(mmc); host->mmc = mmc; - host->chip = chip; - chip->hosts[slot] = host; + return host; +} - host->bar = first_bar + slot; +EXPORT_SYMBOL_GPL(sdhci_alloc_host); - host->addr = pci_resource_start(pdev, host->bar); - host->irq = pdev->irq; +int sdhci_add_host(struct sdhci_host *host) +{ + struct mmc_host *mmc; + unsigned int caps; + int ret; - DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq); + WARN_ON(host == NULL); + if (host == NULL) + return -EINVAL; - ret = pci_request_region(pdev, host->bar, mmc_hostname(mmc)); - if (ret) - goto free; + mmc = host->mmc; - host->ioaddr = ioremap_nocache(host->addr, - pci_resource_len(pdev, host->bar)); - if (!host->ioaddr) { - ret = -ENOMEM; - goto release; - } + if (debug_quirks) + host->quirks = debug_quirks; sdhci_reset(host, SDHCI_RESET_ALL); - version = readw(host->ioaddr + SDHCI_HOST_VERSION); - version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; - if (version > 1) { + host->version = readw(host->ioaddr + SDHCI_HOST_VERSION); + host->version = (host->version & SDHCI_SPEC_VER_MASK) + >> SDHCI_SPEC_VER_SHIFT; + if (host->version > SDHCI_SPEC_200) { printk(KERN_ERR "%s: Unknown controller version (%d). " "You may experience problems.\n", mmc_hostname(mmc), - version); + host->version); } caps = readl(host->ioaddr + SDHCI_CAPABILITIES); - if (chip->quirks & SDHCI_QUIRK_FORCE_DMA) + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_DMA; else if (!(caps & SDHCI_CAN_DO_DMA)) DBG("Controller doesn't have DMA capability\n"); else host->flags |= SDHCI_USE_DMA; - if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) && + if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && (host->flags & SDHCI_USE_DMA)) { DBG("Disabling DMA as it is marked broken\n"); host->flags &= ~SDHCI_USE_DMA; } - if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && - (host->flags & SDHCI_USE_DMA)) { - printk(KERN_WARNING "%s: Will use DMA " - "mode even though HW doesn't fully " - "claim to support it.\n", mmc_hostname(mmc)); + if (host->flags & SDHCI_USE_DMA) { + if ((host->version >= SDHCI_SPEC_200) && + (caps & SDHCI_CAN_DO_ADMA2)) + host->flags |= SDHCI_USE_ADMA; + } + + if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && + (host->flags & SDHCI_USE_ADMA)) { + DBG("Disabling ADMA as it is marked broken\n"); + host->flags &= ~SDHCI_USE_ADMA; } if (host->flags & SDHCI_USE_DMA) { - if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { - printk(KERN_WARNING "%s: No suitable DMA available. " - "Falling back to PIO.\n", mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_DMA; + if (host->ops->enable_dma) { + if (host->ops->enable_dma(host)) { + printk(KERN_WARNING "%s: No suitable DMA " + "available. Falling back to PIO.\n", + mmc_hostname(mmc)); + host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); + } } } - if (host->flags & SDHCI_USE_DMA) - pci_set_master(pdev); - else /* XXX: Hack to get MMC layer to avoid highmem */ - pdev->dma_mask = 0; + if (host->flags & SDHCI_USE_ADMA) { + /* + * We need to allocate descriptors for all sg entries + * (128) and potentially one alignment transfer for + * each of those entries. + */ + host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); + host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); + if (!host->adma_desc || !host->align_buffer) { + kfree(host->adma_desc); + kfree(host->align_buffer); + printk(KERN_WARNING "%s: Unable to allocate ADMA " + "buffers. Falling back to standard DMA.\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; + } + } + + /* XXX: Hack to get MMC layer to avoid highmem */ + if (!(host->flags & SDHCI_USE_DMA)) + mmc_dev(host->mmc)->dma_mask = NULL; host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; if (host->max_clk == 0) { printk(KERN_ERR "%s: Hardware doesn't specify base clock " "frequency.\n", mmc_hostname(mmc)); - ret = -ENODEV; - goto unmap; + return -ENODEV; } host->max_clk *= 1000000; @@ -1454,8 +1599,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) if (host->timeout_clk == 0) { printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " "frequency.\n", mmc_hostname(mmc)); - ret = -ENODEV; - goto unmap; + return -ENODEV; } if (caps & SDHCI_TIMEOUT_CLK_UNIT) host->timeout_clk *= 1000; @@ -1466,7 +1610,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) mmc->ops = &sdhci_ops; mmc->f_min = host->max_clk / 256; mmc->f_max = host->max_clk; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; if (caps & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED; @@ -1482,20 +1626,22 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) if (mmc->ocr_avail == 0) { printk(KERN_ERR "%s: Hardware doesn't report any " "support voltages.\n", mmc_hostname(mmc)); - ret = -ENODEV; - goto unmap; + return -ENODEV; } spin_lock_init(&host->lock); /* - * Maximum number of segments. Hardware cannot do scatter lists. + * Maximum number of segments. Depends on if the hardware + * can do scatter/gather or not. */ - if (host->flags & SDHCI_USE_DMA) + if (host->flags & SDHCI_USE_ADMA) + mmc->max_hw_segs = 128; + else if (host->flags & SDHCI_USE_DMA) mmc->max_hw_segs = 1; - else - mmc->max_hw_segs = 16; - mmc->max_phys_segs = 16; + else /* PIO */ + mmc->max_hw_segs = 128; + mmc->max_phys_segs = 128; /* * Maximum number of sectors in one transfer. Limited by DMA boundary @@ -1505,9 +1651,13 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) /* * Maximum segment size. Could be one segment with the maximum number - * of bytes. + * of bytes. When doing hardware scatter/gather, each entry cannot + * be larger than 64 KiB though. */ - mmc->max_seg_size = mmc->max_req_size; + if (host->flags & SDHCI_USE_ADMA) + mmc->max_seg_size = 65536; + else + mmc->max_seg_size = mmc->max_req_size; /* * Maximum block size. This varies from controller to controller and @@ -1553,7 +1703,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) host->led.default_trigger = mmc_hostname(mmc); host->led.brightness_set = sdhci_led_control; - ret = led_classdev_register(&pdev->dev, &host->led); + ret = led_classdev_register(mmc_dev(mmc), &host->led); if (ret) goto reset; #endif @@ -1562,8 +1712,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) mmc_add_host(mmc); - printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", - mmc_hostname(mmc), host->addr, host->irq, + printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", + mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, + (host->flags & SDHCI_USE_ADMA)?"A":"", (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); return 0; @@ -1576,35 +1727,40 @@ reset: untasklet: tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); -unmap: - iounmap(host->ioaddr); -release: - pci_release_region(pdev, host->bar); -free: - mmc_free_host(mmc); return ret; } -static void sdhci_remove_slot(struct pci_dev *pdev, int slot) +EXPORT_SYMBOL_GPL(sdhci_add_host); + +void sdhci_remove_host(struct sdhci_host *host, int dead) { - struct sdhci_chip *chip; - struct mmc_host *mmc; - struct sdhci_host *host; + unsigned long flags; - chip = pci_get_drvdata(pdev); - host = chip->hosts[slot]; - mmc = host->mmc; + if (dead) { + spin_lock_irqsave(&host->lock, flags); + + host->flags |= SDHCI_DEVICE_DEAD; + + if (host->mrq) { + printk(KERN_ERR "%s: Controller removed during " + " transfer!\n", mmc_hostname(host->mmc)); - chip->hosts[slot] = NULL; + host->mrq->cmd->error = -ENOMEDIUM; + tasklet_schedule(&host->finish_tasklet); + } + + spin_unlock_irqrestore(&host->lock, flags); + } - mmc_remove_host(mmc); + mmc_remove_host(host->mmc); #ifdef CONFIG_LEDS_CLASS led_classdev_unregister(&host->led); #endif - sdhci_reset(host, SDHCI_RESET_ALL); + if (!dead) + sdhci_reset(host, SDHCI_RESET_ALL); free_irq(host->irq, host); @@ -1613,106 +1769,21 @@ static void sdhci_remove_slot(struct pci_dev *pdev, int slot) tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); - iounmap(host->ioaddr); - - pci_release_region(pdev, host->bar); + kfree(host->adma_desc); + kfree(host->align_buffer); - mmc_free_host(mmc); + host->adma_desc = NULL; + host->align_buffer = NULL; } -static int __devinit sdhci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int ret, i; - u8 slots, rev; - struct sdhci_chip *chip; - - BUG_ON(pdev == NULL); - BUG_ON(ent == NULL); +EXPORT_SYMBOL_GPL(sdhci_remove_host); - pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); - - printk(KERN_INFO DRIVER_NAME - ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n", - pci_name(pdev), (int)pdev->vendor, (int)pdev->device, - (int)rev); - - ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); - if (ret) - return ret; - - slots = PCI_SLOT_INFO_SLOTS(slots) + 1; - DBG("found %d slot(s)\n", slots); - if (slots == 0) - return -ENODEV; - - ret = pci_enable_device(pdev); - if (ret) - return ret; - - chip = kzalloc(sizeof(struct sdhci_chip) + - sizeof(struct sdhci_host*) * slots, GFP_KERNEL); - if (!chip) { - ret = -ENOMEM; - goto err; - } - - chip->pdev = pdev; - chip->quirks = ent->driver_data; - - if (debug_quirks) - chip->quirks = debug_quirks; - - chip->num_slots = slots; - pci_set_drvdata(pdev, chip); - - for (i = 0;i < slots;i++) { - ret = sdhci_probe_slot(pdev, i); - if (ret) { - for (i--;i >= 0;i--) - sdhci_remove_slot(pdev, i); - goto free; - } - } - - return 0; - -free: - pci_set_drvdata(pdev, NULL); - kfree(chip); - -err: - pci_disable_device(pdev); - return ret; -} - -static void __devexit sdhci_remove(struct pci_dev *pdev) +void sdhci_free_host(struct sdhci_host *host) { - int i; - struct sdhci_chip *chip; - - chip = pci_get_drvdata(pdev); - - if (chip) { - for (i = 0;i < chip->num_slots;i++) - sdhci_remove_slot(pdev, i); - - pci_set_drvdata(pdev, NULL); - - kfree(chip); - } - - pci_disable_device(pdev); + mmc_free_host(host->mmc); } -static struct pci_driver sdhci_driver = { - .name = DRIVER_NAME, - .id_table = pci_ids, - .probe = sdhci_probe, - .remove = __devexit_p(sdhci_remove), - .suspend = sdhci_suspend, - .resume = sdhci_resume, -}; +EXPORT_SYMBOL_GPL(sdhci_free_host); /*****************************************************************************\ * * @@ -1726,14 +1797,11 @@ static int __init sdhci_drv_init(void) ": Secure Digital Host Controller Interface driver\n"); printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); - return pci_register_driver(&sdhci_driver); + return 0; } static void __exit sdhci_drv_exit(void) { - DBG("Exiting\n"); - - pci_unregister_driver(&sdhci_driver); } module_init(sdhci_drv_init); @@ -1742,7 +1810,7 @@ module_exit(sdhci_drv_exit); module_param(debug_quirks, uint, 0444); MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); -MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver"); +MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 299118de8933..5bb355281765 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -10,18 +10,6 @@ */ /* - * PCI registers - */ - -#define PCI_SDHCI_IFPIO 0x00 -#define PCI_SDHCI_IFDMA 0x01 -#define PCI_SDHCI_IFVENDOR 0x02 - -#define PCI_SLOT_INFO 0x40 /* 8 bits */ -#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) -#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 - -/* * Controller registers */ @@ -72,6 +60,11 @@ #define SDHCI_CTRL_LED 0x01 #define SDHCI_CTRL_4BITBUS 0x02 #define SDHCI_CTRL_HISPD 0x04 +#define SDHCI_CTRL_DMA_MASK 0x18 +#define SDHCI_CTRL_SDMA 0x00 +#define SDHCI_CTRL_ADMA1 0x08 +#define SDHCI_CTRL_ADMA32 0x10 +#define SDHCI_CTRL_ADMA64 0x18 #define SDHCI_POWER_CONTROL 0x29 #define SDHCI_POWER_ON 0x01 @@ -117,6 +110,7 @@ #define SDHCI_INT_DATA_END_BIT 0x00400000 #define SDHCI_INT_BUS_POWER 0x00800000 #define SDHCI_INT_ACMD12ERR 0x01000000 +#define SDHCI_INT_ADMA_ERROR 0x02000000 #define SDHCI_INT_NORMAL_MASK 0x00007FFF #define SDHCI_INT_ERROR_MASK 0xFFFF8000 @@ -140,11 +134,14 @@ #define SDHCI_CLOCK_BASE_SHIFT 8 #define SDHCI_MAX_BLOCK_MASK 0x00030000 #define SDHCI_MAX_BLOCK_SHIFT 16 +#define SDHCI_CAN_DO_ADMA2 0x00080000 +#define SDHCI_CAN_DO_ADMA1 0x00100000 #define SDHCI_CAN_DO_HISPD 0x00200000 #define SDHCI_CAN_DO_DMA 0x00400000 #define SDHCI_CAN_VDD_330 0x01000000 #define SDHCI_CAN_VDD_300 0x02000000 #define SDHCI_CAN_VDD_180 0x04000000 +#define SDHCI_CAN_64BIT 0x10000000 /* 44-47 reserved for more caps */ @@ -152,7 +149,16 @@ /* 4C-4F reserved for more max current */ -/* 50-FB reserved */ +#define SDHCI_SET_ACMD12_ERROR 0x50 +#define SDHCI_SET_INT_ERROR 0x52 + +#define SDHCI_ADMA_ERROR 0x54 + +/* 55-57 reserved */ + +#define SDHCI_ADMA_ADDRESS 0x58 + +/* 60-FB reserved */ #define SDHCI_SLOT_INT_STATUS 0xFC @@ -161,11 +167,50 @@ #define SDHCI_VENDOR_VER_SHIFT 8 #define SDHCI_SPEC_VER_MASK 0x00FF #define SDHCI_SPEC_VER_SHIFT 0 +#define SDHCI_SPEC_100 0 +#define SDHCI_SPEC_200 1 -struct sdhci_chip; +struct sdhci_ops; struct sdhci_host { - struct sdhci_chip *chip; + /* Data set by hardware interface driver */ + const char *hw_name; /* Hardware bus name */ + + unsigned int quirks; /* Deviations from spec. */ + +/* Controller doesn't honor resets unless we touch the clock register */ +#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) +/* Controller has bad caps bits, but really supports DMA */ +#define SDHCI_QUIRK_FORCE_DMA (1<<1) +/* Controller doesn't like to be reset when there is no card inserted. */ +#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) +/* Controller doesn't like clearing the power reg before a change */ +#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3) +/* Controller has flaky internal state so reset it on each ios change */ +#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4) +/* Controller has an unusable DMA engine */ +#define SDHCI_QUIRK_BROKEN_DMA (1<<5) +/* Controller has an unusable ADMA engine */ +#define SDHCI_QUIRK_BROKEN_ADMA (1<<6) +/* Controller can only DMA from 32-bit aligned addresses */ +#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7) +/* Controller can only DMA chunk sizes that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8) +/* Controller can only ADMA chunks that are a multiple of 32 bits */ +#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9) +/* Controller needs to be reset after each request to stay stable */ +#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10) +/* Controller needs voltage and power writes to happen separately */ +#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11) +/* Controller provides an incorrect timeout value for transfers */ +#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) + + int irq; /* Device IRQ */ + void __iomem * ioaddr; /* Mapped address */ + + const struct sdhci_ops *ops; /* Low level hw interface */ + + /* Internal data */ struct mmc_host *mmc; /* MMC structure */ #ifdef CONFIG_LEDS_CLASS @@ -176,7 +221,11 @@ struct sdhci_host { int flags; /* Host attributes */ #define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ -#define SDHCI_REQ_USE_DMA (1<<1) /* Use DMA for this req. */ +#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ +#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ +#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ + + unsigned int version; /* SDHCI spec. version */ unsigned int max_clk; /* Max possible freq (MHz) */ unsigned int timeout_clk; /* Timeout freq (KHz) */ @@ -194,22 +243,41 @@ struct sdhci_host { int offset; /* Offset into current sg */ int remain; /* Bytes left in current */ - int irq; /* Device IRQ */ - int bar; /* PCI BAR index */ - unsigned long addr; /* Bus address */ - void __iomem * ioaddr; /* Mapped address */ + int sg_count; /* Mapped sg entries */ + + u8 *adma_desc; /* ADMA descriptor table */ + u8 *align_buffer; /* Bounce buffer */ + + dma_addr_t adma_addr; /* Mapped ADMA descr. table */ + dma_addr_t align_addr; /* Mapped bounce buffer */ struct tasklet_struct card_tasklet; /* Tasklet structures */ struct tasklet_struct finish_tasklet; struct timer_list timer; /* Timer for timeouts */ -}; -struct sdhci_chip { - struct pci_dev *pdev; + unsigned long private[0] ____cacheline_aligned; +}; - unsigned long quirks; - int num_slots; /* Slots on controller */ - struct sdhci_host *hosts[0]; /* Pointers to hosts */ +struct sdhci_ops { + int (*enable_dma)(struct sdhci_host *host); }; + + +extern struct sdhci_host *sdhci_alloc_host(struct device *dev, + size_t priv_size); +extern void sdhci_free_host(struct sdhci_host *host); + +static inline void *sdhci_priv(struct sdhci_host *host) +{ + return (void *)host->private; +} + +extern int sdhci_add_host(struct sdhci_host *host); +extern void sdhci_remove_host(struct sdhci_host *host, int dead); + +#ifdef CONFIG_PM +extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); +extern int sdhci_resume_host(struct sdhci_host *host); +#endif diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c new file mode 100644 index 000000000000..f99e9f721629 --- /dev/null +++ b/drivers/mmc/host/sdricoh_cs.c @@ -0,0 +1,575 @@ +/* + * sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be + * found on some Ricoh RL5c476 II cardbus bridge + * + * Copyright (C) 2006 - 2008 Sascha Sommer <saschasommer@freenet.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +/* +#define DEBUG +#define VERBOSE_DEBUG +*/ +#include <linux/delay.h> +#include <linux/highmem.h> +#include <linux/pci.h> +#include <linux/ioport.h> +#include <linux/scatterlist.h> +#include <linux/version.h> + +#include <pcmcia/cs_types.h> +#include <pcmcia/cs.h> +#include <pcmcia/cistpl.h> +#include <pcmcia/ds.h> +#include <linux/io.h> + +#include <linux/mmc/host.h> + +#define DRIVER_NAME "sdricoh_cs" + +static unsigned int switchlocked; + +/* i/o region */ +#define SDRICOH_PCI_REGION 0 +#define SDRICOH_PCI_REGION_SIZE 0x1000 + +/* registers */ +#define R104_VERSION 0x104 +#define R200_CMD 0x200 +#define R204_CMD_ARG 0x204 +#define R208_DATAIO 0x208 +#define R20C_RESP 0x20c +#define R21C_STATUS 0x21c +#define R2E0_INIT 0x2e0 +#define R2E4_STATUS_RESP 0x2e4 +#define R2F0_RESET 0x2f0 +#define R224_MODE 0x224 +#define R226_BLOCKSIZE 0x226 +#define R228_POWER 0x228 +#define R230_DATA 0x230 + +/* flags for the R21C_STATUS register */ +#define STATUS_CMD_FINISHED 0x00000001 +#define STATUS_TRANSFER_FINISHED 0x00000004 +#define STATUS_CARD_INSERTED 0x00000020 +#define STATUS_CARD_LOCKED 0x00000080 +#define STATUS_CMD_TIMEOUT 0x00400000 +#define STATUS_READY_TO_READ 0x01000000 +#define STATUS_READY_TO_WRITE 0x02000000 +#define STATUS_BUSY 0x40000000 + +/* timeouts */ +#define INIT_TIMEOUT 100 +#define CMD_TIMEOUT 100000 +#define TRANSFER_TIMEOUT 100000 +#define BUSY_TIMEOUT 32767 + +/* list of supported pcmcia devices */ +static struct pcmcia_device_id pcmcia_ids[] = { + /* vendor and device strings followed by their crc32 hashes */ + PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed, + 0xc3901202), + PCMCIA_DEVICE_NULL, +}; + +MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids); + +/* mmc privdata */ +struct sdricoh_host { + struct device *dev; + struct mmc_host *mmc; /* MMC structure */ + unsigned char __iomem *iobase; + struct pci_dev *pci_dev; + int app_cmd; +}; + +/***************** register i/o helper functions *****************************/ + +static inline unsigned int sdricoh_readl(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readl(host->iobase + reg); + dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value); + return value; +} + +static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg, + unsigned int value) +{ + writel(value, host->iobase + reg); + dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value); + +} + +static inline unsigned int sdricoh_readw(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readw(host->iobase + reg); + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); + return value; +} + +static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg, + unsigned short value) +{ + writew(value, host->iobase + reg); + dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value); +} + +static inline unsigned int sdricoh_readb(struct sdricoh_host *host, + unsigned int reg) +{ + unsigned int value = readb(host->iobase + reg); + dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value); + return value; +} + +static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted, + unsigned int timeout){ + unsigned int loop; + unsigned int status = 0; + struct device *dev = host->dev; + for (loop = 0; loop < timeout; loop++) { + status = sdricoh_readl(host, R21C_STATUS); + sdricoh_writel(host, R2E4_STATUS_RESP, status); + if (status & wanted) + break; + } + + if (loop == timeout) { + dev_err(dev, "query_status: timeout waiting for %x\n", wanted); + return -ETIMEDOUT; + } + + /* do not do this check in the loop as some commands fail otherwise */ + if (status & 0x7F0000) { + dev_err(dev, "waiting for status bit %x failed\n", wanted); + return -EINVAL; + } + return 0; + +} + +static int sdricoh_mmc_cmd(struct sdricoh_host *host, unsigned char opcode, + unsigned int arg) +{ + unsigned int status; + int result = 0; + unsigned int loop = 0; + /* reset status reg? */ + sdricoh_writel(host, R21C_STATUS, 0x18); + /* fill parameters */ + sdricoh_writel(host, R204_CMD_ARG, arg); + sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode); + /* wait for command completion */ + if (opcode) { + for (loop = 0; loop < CMD_TIMEOUT; loop++) { + status = sdricoh_readl(host, R21C_STATUS); + sdricoh_writel(host, R2E4_STATUS_RESP, status); + if (status & STATUS_CMD_FINISHED) + break; + } + /* don't check for timeout in the loop it is not always + reset correctly + */ + if (loop == CMD_TIMEOUT || status & STATUS_CMD_TIMEOUT) + result = -ETIMEDOUT; + + } + + return result; + +} + +static int sdricoh_reset(struct sdricoh_host *host) +{ + dev_dbg(host->dev, "reset\n"); + sdricoh_writel(host, R2F0_RESET, 0x10001); + sdricoh_writel(host, R2E0_INIT, 0x10000); + if (sdricoh_readl(host, R2E0_INIT) != 0x10000) + return -EIO; + sdricoh_writel(host, R2E0_INIT, 0x10007); + + sdricoh_writel(host, R224_MODE, 0x2000000); + sdricoh_writel(host, R228_POWER, 0xe0); + + + /* status register ? */ + sdricoh_writel(host, R21C_STATUS, 0x18); + + return 0; +} + +static int sdricoh_blockio(struct sdricoh_host *host, int read, + u8 *buf, int len) +{ + int size; + u32 data = 0; + /* wait until the data is available */ + if (read) { + if (sdricoh_query_status(host, STATUS_READY_TO_READ, + TRANSFER_TIMEOUT)) + return -ETIMEDOUT; + sdricoh_writel(host, R21C_STATUS, 0x18); + /* read data */ + while (len) { + data = sdricoh_readl(host, R230_DATA); + size = min(len, 4); + len -= size; + while (size) { + *buf = data & 0xFF; + buf++; + data >>= 8; + size--; + } + } + } else { + if (sdricoh_query_status(host, STATUS_READY_TO_WRITE, + TRANSFER_TIMEOUT)) + return -ETIMEDOUT; + sdricoh_writel(host, R21C_STATUS, 0x18); + /* write data */ + while (len) { + size = min(len, 4); + len -= size; + while (size) { + data >>= 8; + data |= (u32)*buf << 24; + buf++; + size--; + } + sdricoh_writel(host, R230_DATA, data); + } + } + + if (len) + return -EIO; + + return 0; +} + +static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sdricoh_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = cmd->data; + struct device *dev = host->dev; + unsigned char opcode = cmd->opcode; + int i; + + dev_dbg(dev, "=============================\n"); + dev_dbg(dev, "sdricoh_request opcode=%i\n", opcode); + + sdricoh_writel(host, R21C_STATUS, 0x18); + + /* MMC_APP_CMDs need some special handling */ + if (host->app_cmd) { + opcode |= 64; + host->app_cmd = 0; + } else if (opcode == 55) + host->app_cmd = 1; + + /* read/write commands seem to require this */ + if (data) { + sdricoh_writew(host, R226_BLOCKSIZE, data->blksz); + sdricoh_writel(host, R208_DATAIO, 0); + } + + cmd->error = sdricoh_mmc_cmd(host, opcode, cmd->arg); + + /* read response buffer */ + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* CRC is stripped so we need to do some shifting. */ + for (i = 0; i < 4; i++) { + cmd->resp[i] = + sdricoh_readl(host, + R20C_RESP + (3 - i) * 4) << 8; + if (i != 3) + cmd->resp[i] |= + sdricoh_readb(host, R20C_RESP + + (3 - i) * 4 - 1); + } + } else + cmd->resp[0] = sdricoh_readl(host, R20C_RESP); + } + + /* transfer data */ + if (data && cmd->error == 0) { + dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i " + "sg length %i\n", data->blksz, data->blocks, + data->sg_len, data->sg->length); + + /* enter data reading mode */ + sdricoh_writel(host, R21C_STATUS, 0x837f031e); + for (i = 0; i < data->blocks; i++) { + size_t len = data->blksz; + u8 *buf; + struct page *page; + int result; + page = sg_page(data->sg); + + buf = kmap(page) + data->sg->offset + (len * i); + result = + sdricoh_blockio(host, + data->flags & MMC_DATA_READ, buf, len); + kunmap(page); + flush_dcache_page(page); + if (result) { + dev_err(dev, "sdricoh_request: cmd %i " + "block transfer failed\n", cmd->opcode); + cmd->error = result; + break; + } else + data->bytes_xfered += len; + } + + sdricoh_writel(host, R208_DATAIO, 1); + + if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED, + TRANSFER_TIMEOUT)) { + dev_err(dev, "sdricoh_request: transfer end error\n"); + cmd->error = -EINVAL; + } + } + /* FIXME check busy flag */ + + mmc_request_done(mmc, mrq); + dev_dbg(dev, "=============================\n"); +} + +static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdricoh_host *host = mmc_priv(mmc); + dev_dbg(host->dev, "set_ios\n"); + + if (ios->power_mode == MMC_POWER_ON) { + sdricoh_writel(host, R228_POWER, 0xc0e0); + + if (ios->bus_width == MMC_BUS_WIDTH_4) { + sdricoh_writel(host, R224_MODE, 0x2000300); + sdricoh_writel(host, R228_POWER, 0x40e0); + } else { + sdricoh_writel(host, R224_MODE, 0x2000340); + } + + } else if (ios->power_mode == MMC_POWER_UP) { + sdricoh_writel(host, R224_MODE, 0x2000320); + sdricoh_writel(host, R228_POWER, 0xe0); + } +} + +static int sdricoh_get_ro(struct mmc_host *mmc) +{ + struct sdricoh_host *host = mmc_priv(mmc); + unsigned int status; + + status = sdricoh_readl(host, R21C_STATUS); + sdricoh_writel(host, R2E4_STATUS_RESP, status); + + /* some notebooks seem to have the locked flag switched */ + if (switchlocked) + return !(status & STATUS_CARD_LOCKED); + + return (status & STATUS_CARD_LOCKED); +} + +static struct mmc_host_ops sdricoh_ops = { + .request = sdricoh_request, + .set_ios = sdricoh_set_ios, + .get_ro = sdricoh_get_ro, +}; + +/* initialize the control and register it to the mmc framework */ +static int sdricoh_init_mmc(struct pci_dev *pci_dev, + struct pcmcia_device *pcmcia_dev) +{ + int result = 0; + void __iomem *iobase = NULL; + struct mmc_host *mmc = NULL; + struct sdricoh_host *host = NULL; + struct device *dev = &pcmcia_dev->dev; + /* map iomem */ + if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) != + SDRICOH_PCI_REGION_SIZE) { + dev_dbg(dev, "unexpected pci resource len\n"); + return -ENODEV; + } + iobase = + pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE); + if (!iobase) { + dev_err(dev, "unable to map iobase\n"); + return -ENODEV; + } + /* check version? */ + if (readl(iobase + R104_VERSION) != 0x4000) { + dev_dbg(dev, "no supported mmc controller found\n"); + result = -ENODEV; + goto err; + } + /* allocate privdata */ + mmc = pcmcia_dev->priv = + mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev); + if (!mmc) { + dev_err(dev, "mmc_alloc_host failed\n"); + result = -ENOMEM; + goto err; + } + host = mmc_priv(mmc); + + host->iobase = iobase; + host->dev = dev; + host->pci_dev = pci_dev; + + mmc->ops = &sdricoh_ops; + + /* FIXME: frequency and voltage handling is done by the controller + */ + mmc->f_min = 450000; + mmc->f_max = 24000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps |= MMC_CAP_4_BIT_DATA; + + mmc->max_seg_size = 1024 * 512; + mmc->max_blk_size = 512; + + /* reset the controler */ + if (sdricoh_reset(host)) { + dev_dbg(dev, "could not reset\n"); + result = -EIO; + goto err; + + } + + result = mmc_add_host(mmc); + + if (!result) { + dev_dbg(dev, "mmc host registered\n"); + return 0; + } + +err: + if (iobase) + iounmap(iobase); + if (mmc) + mmc_free_host(mmc); + + return result; +} + +/* search for supported mmc controllers */ +static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev) +{ + struct pci_dev *pci_dev = NULL; + + dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device" + " %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); + + /* search pci cardbus bridge that contains the mmc controler */ + /* the io region is already claimed by yenta_socket... */ + while ((pci_dev = + pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, + pci_dev))) { + /* try to init the device */ + if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) { + dev_info(&pcmcia_dev->dev, "MMC controller found\n"); + return 0; + } + + } + dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n"); + return -ENODEV; +} + +static void sdricoh_pcmcia_detach(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + + dev_dbg(&link->dev, "detach\n"); + + /* remove mmc host */ + if (mmc) { + struct sdricoh_host *host = mmc_priv(mmc); + mmc_remove_host(mmc); + pci_iounmap(host->pci_dev, host->iobase); + pci_dev_put(host->pci_dev); + mmc_free_host(mmc); + } + pcmcia_disable_device(link); + +} + +#ifdef CONFIG_PM +static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + dev_dbg(&link->dev, "suspend\n"); + mmc_suspend_host(mmc, PMSG_SUSPEND); + return 0; +} + +static int sdricoh_pcmcia_resume(struct pcmcia_device *link) +{ + struct mmc_host *mmc = link->priv; + dev_dbg(&link->dev, "resume\n"); + sdricoh_reset(mmc_priv(mmc)); + mmc_resume_host(mmc); + return 0; +} +#else +#define sdricoh_pcmcia_suspend NULL +#define sdricoh_pcmcia_resume NULL +#endif + +static struct pcmcia_driver sdricoh_driver = { + .drv = { + .name = DRIVER_NAME, + }, + .probe = sdricoh_pcmcia_probe, + .remove = sdricoh_pcmcia_detach, + .id_table = pcmcia_ids, + .suspend = sdricoh_pcmcia_suspend, + .resume = sdricoh_pcmcia_resume, +}; + +/*****************************************************************************\ + * * + * Driver init/exit * + * * +\*****************************************************************************/ + +static int __init sdricoh_drv_init(void) +{ + return pcmcia_register_driver(&sdricoh_driver); +} + +static void __exit sdricoh_drv_exit(void) +{ + pcmcia_unregister_driver(&sdricoh_driver); +} + +module_init(sdricoh_drv_init); +module_exit(sdricoh_drv_exit); + +module_param(switchlocked, uint, 0444); + +MODULE_AUTHOR("Sascha Sommer <saschasommer@freenet.de>"); +MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver"); +MODULE_LICENSE("GPL"); + +MODULE_PARM_DESC(switchlocked, "Switch the cards locked status." + "Use this when unlocked cards are shown readonly (default 0)"); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 1c14a186f000..13844843e8de 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -973,7 +973,7 @@ static int tifm_sd_probe(struct tifm_dev *sock) mmc->ops = &tifm_sd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; + mmc->caps = MMC_CAP_4_BIT_DATA; mmc->f_min = 20000000 / 60; mmc->f_max = 24000000; diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index c303e7f57ab4..adda37952032 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -68,16 +68,16 @@ static const int unlock_codes[] = { 0x83, 0x87 }; static const int valid_ids[] = { 0x7112, - }; +}; #ifdef CONFIG_PNP -static unsigned int nopnp = 0; +static unsigned int param_nopnp = 0; #else -static const unsigned int nopnp = 1; +static const unsigned int param_nopnp = 1; #endif -static unsigned int io = 0x248; -static unsigned int irq = 6; -static int dma = 2; +static unsigned int param_io = 0x248; +static unsigned int param_irq = 6; +static int param_dma = 2; /* * Basic functions @@ -939,7 +939,7 @@ static int wbsd_get_ro(struct mmc_host *mmc) spin_unlock_bh(&host->lock); - return csr & WBSD_WRPT; + return !!(csr & WBSD_WRPT); } static const struct mmc_host_ops wbsd_ops = { @@ -1219,7 +1219,7 @@ static int __devinit wbsd_alloc_mmc(struct device *dev) mmc->f_min = 375000; mmc->f_max = 24000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE; + mmc->caps = MMC_CAP_4_BIT_DATA; spin_lock_init(&host->lock); @@ -1420,7 +1420,7 @@ kfree: dma_unmap_single(mmc_dev(host->mmc), host->dma_addr, WBSD_DMA_SIZE, DMA_BIDIRECTIONAL); - host->dma_addr = (dma_addr_t)NULL; + host->dma_addr = 0; kfree(host->dma_buffer); host->dma_buffer = NULL; @@ -1445,7 +1445,7 @@ static void wbsd_release_dma(struct wbsd_host *host) host->dma = -1; host->dma_buffer = NULL; - host->dma_addr = (dma_addr_t)NULL; + host->dma_addr = 0; } /* @@ -1765,7 +1765,7 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp) static int __devinit wbsd_probe(struct platform_device *dev) { /* Use the module parameters for resources */ - return wbsd_init(&dev->dev, io, irq, dma, 0); + return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); } static int __devexit wbsd_remove(struct platform_device *dev) @@ -1979,14 +1979,14 @@ static int __init wbsd_drv_init(void) #ifdef CONFIG_PNP - if (!nopnp) { + if (!param_nopnp) { result = pnp_register_driver(&wbsd_pnp_driver); if (result < 0) return result; } #endif /* CONFIG_PNP */ - if (nopnp) { + if (param_nopnp) { result = platform_driver_register(&wbsd_driver); if (result < 0) return result; @@ -2012,12 +2012,12 @@ static void __exit wbsd_drv_exit(void) { #ifdef CONFIG_PNP - if (!nopnp) + if (!param_nopnp) pnp_unregister_driver(&wbsd_pnp_driver); #endif /* CONFIG_PNP */ - if (nopnp) { + if (param_nopnp) { platform_device_unregister(wbsd_device); platform_driver_unregister(&wbsd_driver); @@ -2029,11 +2029,11 @@ static void __exit wbsd_drv_exit(void) module_init(wbsd_drv_init); module_exit(wbsd_drv_exit); #ifdef CONFIG_PNP -module_param(nopnp, uint, 0444); +module_param_named(nopnp, param_nopnp, uint, 0444); #endif -module_param(io, uint, 0444); -module_param(irq, uint, 0444); -module_param(dma, int, 0444); +module_param_named(io, param_io, uint, 0444); +module_param_named(irq, param_irq, uint, 0444); +module_param_named(dma, param_dma, int, 0444); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 17bc87a43ff4..d2fbc2964523 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -258,13 +258,6 @@ config MTD_ALCHEMY help Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards -config MTD_MTX1 - tristate "4G Systems MTX-1 Flash device" - depends on MIPS_MTX1 && MTD_CFI - help - Flash memory access on 4G Systems MTX-1 Board. If you have one of - these boards and would like to use the flash chips on it, say 'Y'. - config MTD_DILNETPC tristate "CFI Flash device mapped on DIL/Net PC" depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 957fb5f70f5e..c6ce8673dab2 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -65,5 +65,4 @@ obj-$(CONFIG_MTD_DMV182) += dmv182.o obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o -obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o diff --git a/drivers/mtd/maps/mtx-1_flash.c b/drivers/mtd/maps/mtx-1_flash.c deleted file mode 100644 index 2a8fde9b92f0..000000000000 --- a/drivers/mtd/maps/mtx-1_flash.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Flash memory access on 4G Systems MTX-1 boards - * - * $Id: mtx-1_flash.c,v 1.2 2005/11/07 11:14:27 gleixner Exp $ - * - * (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz> - * (C) 2005 Joern Engel <joern@wohnheim.fh-wedel.de> - * - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/init.h> -#include <linux/kernel.h> - -#include <linux/mtd/mtd.h> -#include <linux/mtd/map.h> -#include <linux/mtd/partitions.h> - -#include <asm/io.h> - -static struct map_info mtx1_map = { - .name = "MTX-1 flash", - .bankwidth = 4, - .size = 0x2000000, - .phys = 0x1E000000, -}; - -static struct mtd_partition mtx1_partitions[] = { - { - .name = "filesystem", - .size = 0x01C00000, - .offset = 0, - },{ - .name = "yamon", - .size = 0x00100000, - .offset = MTDPART_OFS_APPEND, - .mask_flags = MTD_WRITEABLE, - },{ - .name = "kernel", - .size = 0x002c0000, - .offset = MTDPART_OFS_APPEND, - },{ - .name = "yamon env", - .size = 0x00040000, - .offset = MTDPART_OFS_APPEND, - } -}; - -static struct mtd_info *mtx1_mtd; - -int __init mtx1_mtd_init(void) -{ - int ret = -ENXIO; - - simple_map_init(&mtx1_map); - - mtx1_map.virt = ioremap(mtx1_map.phys, mtx1_map.size); - if (!mtx1_map.virt) - return -EIO; - - mtx1_mtd = do_map_probe("cfi_probe", &mtx1_map); - if (!mtx1_mtd) - goto err; - - mtx1_mtd->owner = THIS_MODULE; - - ret = add_mtd_partitions(mtx1_mtd, mtx1_partitions, - ARRAY_SIZE(mtx1_partitions)); - if (ret) - goto err; - - return 0; - -err: - iounmap(mtx1_map.virt); - return ret; -} - -static void __exit mtx1_mtd_cleanup(void) -{ - if (mtx1_mtd) { - del_mtd_partitions(mtx1_mtd); - map_destroy(mtx1_mtd); - } - if (mtx1_map.virt) - iounmap(mtx1_map.virt); -} - -module_init(mtx1_mtd_init); -module_exit(mtx1_mtd_cleanup); - -MODULE_AUTHOR("Bruno Randolf <bruno.randolf@4g-systems.biz>"); -MODULE_DESCRIPTION("MTX-1 flash map"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 45a41b597da9..2683ee32fc11 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1884,7 +1884,6 @@ config NE_H8300 Say Y here if you want to use the NE2000 compatible controller on the Renesas H8/300 processor. -source "drivers/net/fec_8xx/Kconfig" source "drivers/net/fs_enet/Kconfig" endif # NET_ETHERNET diff --git a/drivers/net/Makefile b/drivers/net/Makefile index dcbfe8421154..9010e58da0f2 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -217,7 +217,6 @@ obj-$(CONFIG_SMC91X) += smc91x.o obj-$(CONFIG_SMC911X) += smc911x.o obj-$(CONFIG_BFIN_MAC) += bfin_mac.o obj-$(CONFIG_DM9000) += dm9000.o -obj-$(CONFIG_FEC_8XX) += fec_8xx/ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o obj-$(CONFIG_MLX4_CORE) += mlx4/ diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig deleted file mode 100644 index afb34ded26ee..000000000000 --- a/drivers/net/fec_8xx/Kconfig +++ /dev/null @@ -1,20 +0,0 @@ -config FEC_8XX - tristate "Motorola 8xx FEC driver" - depends on 8XX - select MII - -config FEC_8XX_GENERIC_PHY - bool "Support any generic PHY" - depends on FEC_8XX - default y - -config FEC_8XX_DM9161_PHY - bool "Support DM9161 PHY" - depends on FEC_8XX - default n - -config FEC_8XX_LXT971_PHY - bool "Support LXT971/LXT972 PHY" - depends on FEC_8XX - default n - diff --git a/drivers/net/fec_8xx/Makefile b/drivers/net/fec_8xx/Makefile deleted file mode 100644 index 70c54f8c48e5..000000000000 --- a/drivers/net/fec_8xx/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# Makefile for the Motorola 8xx FEC ethernet controller -# - -obj-$(CONFIG_FEC_8XX) += fec_8xx.o - -fec_8xx-objs := fec_main.o fec_mii.o - -# the platform instantatiation objects -ifeq ($(CONFIG_NETTA),y) -fec_8xx-objs += fec_8xx-netta.o -endif diff --git a/drivers/net/fec_8xx/fec_8xx-netta.c b/drivers/net/fec_8xx/fec_8xx-netta.c deleted file mode 100644 index 79deee222e28..000000000000 --- a/drivers/net/fec_8xx/fec_8xx-netta.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * FEC instantatiation file for NETTA - */ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> - -#include <asm/8xx_immap.h> -#include <asm/pgtable.h> -#include <asm/mpc8xx.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/cpm1.h> - -#include "fec_8xx.h" - -/*************************************************/ - -static struct fec_platform_info fec1_info = { - .fec_no = 0, - .use_mdio = 1, - .phy_addr = 8, - .fec_irq = SIU_LEVEL1, - .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC6, - .rx_ring = 128, - .tx_ring = 16, - .rx_copybreak = 240, - .use_napi = 1, - .napi_weight = 17, -}; - -static struct fec_platform_info fec2_info = { - .fec_no = 1, - .use_mdio = 1, - .phy_addr = 2, - .fec_irq = SIU_LEVEL3, - .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC7, - .rx_ring = 128, - .tx_ring = 16, - .rx_copybreak = 240, - .use_napi = 1, - .napi_weight = 17, -}; - -static struct net_device *fec1_dev; -static struct net_device *fec2_dev; - -/* XXX custom u-boot & Linux startup needed */ -extern const char *__fw_getenv(const char *var); - -/* access ports */ -#define setbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) | (_v)) -#define clrbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) & ~(_v)) - -#define setbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) | (_v)) -#define clrbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) & ~(_v)) - -int fec_8xx_platform_init(void) -{ - immap_t *immap = (immap_t *)IMAP_ADDR; - bd_t *bd = (bd_t *) __res; - const char *s; - char *e; - int i; - - /* use MDC for MII */ - setbits16(immap->im_ioport.iop_pdpar, 0x0080); - clrbits16(immap->im_ioport.iop_pddir, 0x0080); - - /* configure FEC1 pins */ - setbits16(immap->im_ioport.iop_papar, 0xe810); - setbits16(immap->im_ioport.iop_padir, 0x0810); - clrbits16(immap->im_ioport.iop_padir, 0xe000); - - setbits32(immap->im_cpm.cp_pbpar, 0x00000001); - clrbits32(immap->im_cpm.cp_pbdir, 0x00000001); - - setbits32(immap->im_cpm.cp_cptr, 0x00000100); - clrbits32(immap->im_cpm.cp_cptr, 0x00000050); - - clrbits16(immap->im_ioport.iop_pcpar, 0x0200); - clrbits16(immap->im_ioport.iop_pcdir, 0x0200); - clrbits16(immap->im_ioport.iop_pcso, 0x0200); - setbits16(immap->im_ioport.iop_pcint, 0x0200); - - /* configure FEC2 pins */ - setbits32(immap->im_cpm.cp_pepar, 0x00039620); - setbits32(immap->im_cpm.cp_pedir, 0x00039620); - setbits32(immap->im_cpm.cp_peso, 0x00031000); - clrbits32(immap->im_cpm.cp_peso, 0x00008620); - - setbits32(immap->im_cpm.cp_cptr, 0x00000080); - clrbits32(immap->im_cpm.cp_cptr, 0x00000028); - - clrbits16(immap->im_ioport.iop_pcpar, 0x0200); - clrbits16(immap->im_ioport.iop_pcdir, 0x0200); - clrbits16(immap->im_ioport.iop_pcso, 0x0200); - setbits16(immap->im_ioport.iop_pcint, 0x0200); - - /* fill up */ - fec1_info.sys_clk = bd->bi_intfreq; - fec2_info.sys_clk = bd->bi_intfreq; - - s = __fw_getenv("ethaddr"); - if (s != NULL) { - for (i = 0; i < 6; i++) { - fec1_info.macaddr[i] = simple_strtoul(s, &e, 16); - if (*e) - s = e + 1; - } - } - - s = __fw_getenv("eth1addr"); - if (s != NULL) { - for (i = 0; i < 6; i++) { - fec2_info.macaddr[i] = simple_strtoul(s, &e, 16); - if (*e) - s = e + 1; - } - } - - fec_8xx_init_one(&fec1_info, &fec1_dev); - fec_8xx_init_one(&fec2_info, &fec2_dev); - - return fec1_dev != NULL && fec2_dev != NULL ? 0 : -1; -} - -void fec_8xx_platform_cleanup(void) -{ - if (fec2_dev != NULL) - fec_8xx_cleanup_one(fec2_dev); - - if (fec1_dev != NULL) - fec_8xx_cleanup_one(fec1_dev); -} diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h deleted file mode 100644 index f3b1c6fbba8b..000000000000 --- a/drivers/net/fec_8xx/fec_8xx.h +++ /dev/null @@ -1,220 +0,0 @@ -#ifndef FEC_8XX_H -#define FEC_8XX_H - -#include <linux/mii.h> -#include <linux/netdevice.h> - -#include <linux/types.h> - -/* HW info */ - -/* CRC polynomium used by the FEC for the multicast group filtering */ -#define FEC_CRC_POLY 0x04C11DB7 - -#define MII_ADVERTISE_HALF (ADVERTISE_100HALF | \ - ADVERTISE_10HALF | ADVERTISE_CSMA) -#define MII_ADVERTISE_ALL (ADVERTISE_100FULL | \ - ADVERTISE_10FULL | MII_ADVERTISE_HALF) - -/* Interrupt events/masks. -*/ -#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ -#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ -#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ -#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ -#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ -#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ -#define FEC_ENET_RXF 0x02000000U /* Full frame received */ -#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ -#define FEC_ENET_MII 0x00800000U /* MII interrupt */ -#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ - -#define FEC_ECNTRL_PINMUX 0x00000004 -#define FEC_ECNTRL_ETHER_EN 0x00000002 -#define FEC_ECNTRL_RESET 0x00000001 - -#define FEC_RCNTRL_BC_REJ 0x00000010 -#define FEC_RCNTRL_PROM 0x00000008 -#define FEC_RCNTRL_MII_MODE 0x00000004 -#define FEC_RCNTRL_DRT 0x00000002 -#define FEC_RCNTRL_LOOP 0x00000001 - -#define FEC_TCNTRL_FDEN 0x00000004 -#define FEC_TCNTRL_HBC 0x00000002 -#define FEC_TCNTRL_GTS 0x00000001 - -/* values for MII phy_status */ - -#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ -#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ -#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ -#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ -#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ -#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ -#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ - -#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ -#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ -#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ -#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ -#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ -#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ -#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ -#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ - -typedef struct phy_info { - unsigned int id; - const char *name; - void (*startup) (struct net_device * dev); - void (*shutdown) (struct net_device * dev); - void (*ack_int) (struct net_device * dev); -} phy_info_t; - -/* The FEC stores dest/src/type, data, and checksum for receive packets. - */ -#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ -#define MIN_MTU 46 /* this is data size */ -#define CRC_LEN 4 - -#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) -#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) - -/* Must be a multiple of 4 */ -#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE+3) & ~3) -/* This is needed so that invalidate_xxx wont invalidate too much */ -#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE) - -/* platform interface */ - -struct fec_platform_info { - int fec_no; /* FEC index */ - int use_mdio; /* use external MII */ - int phy_addr; /* the phy address */ - int fec_irq, phy_irq; /* the irq for the controller */ - int rx_ring, tx_ring; /* number of buffers on rx */ - int sys_clk; /* system clock */ - __u8 macaddr[6]; /* mac address */ - int rx_copybreak; /* limit we copy small frames */ - int use_napi; /* use NAPI */ - int napi_weight; /* NAPI weight */ -}; - -/* forward declaration */ -struct fec; - -struct fec_enet_private { - spinlock_t lock; /* during all ops except TX pckt processing */ - spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */ - struct net_device *dev; - struct napi_struct napi; - int fecno; - struct fec *fecp; - const struct fec_platform_info *fpi; - int rx_ring, tx_ring; - dma_addr_t ring_mem_addr; - void *ring_base; - struct sk_buff **rx_skbuff; - struct sk_buff **tx_skbuff; - cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ - cbd_t *tx_bd_base; - cbd_t *dirty_tx; /* ring entries to be free()ed. */ - cbd_t *cur_rx; - cbd_t *cur_tx; - int tx_free; - struct net_device_stats stats; - struct timer_list phy_timer_list; - const struct phy_info *phy; - unsigned int fec_phy_speed; - __u32 msg_enable; - struct mii_if_info mii_if; -}; - -/***************************************************************************/ - -void fec_restart(struct net_device *dev, int duplex, int speed); -void fec_stop(struct net_device *dev); - -/***************************************************************************/ - -int fec_mii_read(struct net_device *dev, int phy_id, int location); -void fec_mii_write(struct net_device *dev, int phy_id, int location, int value); - -int fec_mii_phy_id_detect(struct net_device *dev); -void fec_mii_startup(struct net_device *dev); -void fec_mii_shutdown(struct net_device *dev); -void fec_mii_ack_int(struct net_device *dev); - -void fec_mii_link_status_change_check(struct net_device *dev, int init_media); - -/***************************************************************************/ - -#define FEC1_NO 0x00 -#define FEC2_NO 0x01 -#define FEC3_NO 0x02 - -int fec_8xx_init_one(const struct fec_platform_info *fpi, - struct net_device **devp); -int fec_8xx_cleanup_one(struct net_device *dev); - -/***************************************************************************/ - -#define DRV_MODULE_NAME "fec_8xx" -#define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "0.1" -#define DRV_MODULE_RELDATE "May 6, 2004" - -/***************************************************************************/ - -int fec_8xx_platform_init(void); -void fec_8xx_platform_cleanup(void); - -/***************************************************************************/ - -/* FEC access macros */ -#if defined(CONFIG_8xx) -/* for a 8xx __raw_xxx's are sufficient */ -#define __fec_out32(addr, x) __raw_writel(x, addr) -#define __fec_out16(addr, x) __raw_writew(x, addr) -#define __fec_in32(addr) __raw_readl(addr) -#define __fec_in16(addr) __raw_readw(addr) -#else -/* for others play it safe */ -#define __fec_out32(addr, x) out_be32(addr, x) -#define __fec_out16(addr, x) out_be16(addr, x) -#define __fec_in32(addr) in_be32(addr) -#define __fec_in16(addr) in_be16(addr) -#endif - -/* write */ -#define FW(_fecp, _reg, _v) __fec_out32(&(_fecp)->fec_ ## _reg, (_v)) - -/* read */ -#define FR(_fecp, _reg) __fec_in32(&(_fecp)->fec_ ## _reg) - -/* set bits */ -#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) - -/* clear bits */ -#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) - -/* buffer descriptor access macros */ - -/* write */ -#define CBDW_SC(_cbd, _sc) __fec_out16(&(_cbd)->cbd_sc, (_sc)) -#define CBDW_DATLEN(_cbd, _datlen) __fec_out16(&(_cbd)->cbd_datlen, (_datlen)) -#define CBDW_BUFADDR(_cbd, _bufaddr) __fec_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) - -/* read */ -#define CBDR_SC(_cbd) __fec_in16(&(_cbd)->cbd_sc) -#define CBDR_DATLEN(_cbd) __fec_in16(&(_cbd)->cbd_datlen) -#define CBDR_BUFADDR(_cbd) __fec_in32(&(_cbd)->cbd_bufaddr) - -/* set bits */ -#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) - -/* clear bits */ -#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) - -/***************************************************************************/ - -#endif diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c deleted file mode 100644 index ca8d2e83ab03..000000000000 --- a/drivers/net/fec_8xx/fec_main.c +++ /dev/null @@ -1,1264 +0,0 @@ -/* - * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou <panto@intracom.gr> - * - * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> - * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> - * - * Released under the GPL - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> -#include <linux/dma-mapping.h> - -#include <asm/8xx_immap.h> -#include <asm/pgtable.h> -#include <asm/mpc8xx.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/cpm1.h> - -#include "fec_8xx.h" - -/*************************************************/ - -#define FEC_MAX_MULTICAST_ADDRS 64 - -/*************************************************/ - -static char version[] __devinitdata = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; - -MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); -MODULE_DESCRIPTION("Motorola 8xx FEC ethernet driver"); -MODULE_LICENSE("GPL"); - -int fec_8xx_debug = -1; /* -1 == use FEC_8XX_DEF_MSG_ENABLE as value */ -module_param(fec_8xx_debug, int, 0); -MODULE_PARM_DESC(fec_8xx_debug, - "FEC 8xx bitmapped debugging message enable value"); - - -/*************************************************/ - -/* - * Delay to wait for FEC reset command to complete (in us) - */ -#define FEC_RESET_DELAY 50 - -/*****************************************************************************************/ - -static void fec_whack_reset(fec_t * fecp) -{ - int i; - - /* - * Whack a reset. We should wait for this. - */ - FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); - for (i = 0; - (FR(fecp, ecntrl) & FEC_ECNTRL_RESET) != 0 && i < FEC_RESET_DELAY; - i++) - udelay(1); - - if (i == FEC_RESET_DELAY) - printk(KERN_WARNING "FEC Reset timeout!\n"); - -} - -/****************************************************************************/ - -/* - * Transmitter timeout. - */ -#define TX_TIMEOUT (2*HZ) - -/****************************************************************************/ - -/* - * Returns the CRC needed when filling in the hash table for - * multicast group filtering - * pAddr must point to a MAC address (6 bytes) - */ -static __u32 fec_mulicast_calc_crc(char *pAddr) -{ - u8 byte; - int byte_count; - int bit_count; - __u32 crc = 0xffffffff; - u8 msb; - - for (byte_count = 0; byte_count < 6; byte_count++) { - byte = pAddr[byte_count]; - for (bit_count = 0; bit_count < 8; bit_count++) { - msb = crc >> 31; - crc <<= 1; - if (msb ^ (byte & 0x1)) { - crc ^= FEC_CRC_POLY; - } - byte >>= 1; - } - } - return (crc); -} - -/* - * Set or clear the multicast filter for this adaptor. - * Skeleton taken from sunlance driver. - * The CPM Ethernet implementation allows Multicast as well as individual - * MAC address filtering. Some of the drivers check to make sure it is - * a group multicast address, and discard those that are not. I guess I - * will do the same for now, but just remove the test if you want - * individual filtering as well (do the upper net layers want or support - * this kind of feature?). - */ -static void fec_set_multicast_list(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fecp; - struct dev_mc_list *pmc; - __u32 crc; - int temp; - __u32 csrVal; - int hash_index; - __u32 hthi, htlo; - unsigned long flags; - - - if ((dev->flags & IFF_PROMISC) != 0) { - - spin_lock_irqsave(&fep->lock, flags); - FS(fecp, r_cntrl, FEC_RCNTRL_PROM); - spin_unlock_irqrestore(&fep->lock, flags); - - /* - * Log any net taps. - */ - printk(KERN_WARNING DRV_MODULE_NAME - ": %s: Promiscuous mode enabled.\n", dev->name); - return; - - } - - if ((dev->flags & IFF_ALLMULTI) != 0 || - dev->mc_count > FEC_MAX_MULTICAST_ADDRS) { - /* - * Catch all multicast addresses, set the filter to all 1's. - */ - hthi = 0xffffffffU; - htlo = 0xffffffffU; - } else { - hthi = 0; - htlo = 0; - - /* - * Now populate the hash table - */ - for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) { - crc = fec_mulicast_calc_crc(pmc->dmi_addr); - temp = (crc & 0x3f) >> 1; - hash_index = ((temp & 0x01) << 4) | - ((temp & 0x02) << 2) | - ((temp & 0x04)) | - ((temp & 0x08) >> 2) | - ((temp & 0x10) >> 4); - csrVal = (1 << hash_index); - if (crc & 1) - hthi |= csrVal; - else - htlo |= csrVal; - } - } - - spin_lock_irqsave(&fep->lock, flags); - FC(fecp, r_cntrl, FEC_RCNTRL_PROM); - FW(fecp, hash_table_high, hthi); - FW(fecp, hash_table_low, htlo); - spin_unlock_irqrestore(&fep->lock, flags); -} - -static int fec_set_mac_address(struct net_device *dev, void *addr) -{ - struct sockaddr *mac = addr; - struct fec_enet_private *fep = netdev_priv(dev); - struct fec *fecp = fep->fecp; - int i; - __u32 addrhi, addrlo; - unsigned long flags; - - /* Get pointer to SCC area in parameter RAM. */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = mac->sa_data[i]; - - /* - * Set station address. - */ - addrhi = ((__u32) dev->dev_addr[0] << 24) | - ((__u32) dev->dev_addr[1] << 16) | - ((__u32) dev->dev_addr[2] << 8) | - (__u32) dev->dev_addr[3]; - addrlo = ((__u32) dev->dev_addr[4] << 24) | - ((__u32) dev->dev_addr[5] << 16); - - spin_lock_irqsave(&fep->lock, flags); - FW(fecp, addr_low, addrhi); - FW(fecp, addr_high, addrlo); - spin_unlock_irqrestore(&fep->lock, flags); - - return 0; -} - -/* - * This function is called to start or restart the FEC during a link - * change. This only happens when switching between half and full - * duplex. - */ -void fec_restart(struct net_device *dev, int duplex, int speed) -{ -#ifdef CONFIG_DUET - immap_t *immap = (immap_t *) IMAP_ADDR; - __u32 cptr; -#endif - struct fec_enet_private *fep = netdev_priv(dev); - struct fec *fecp = fep->fecp; - const struct fec_platform_info *fpi = fep->fpi; - cbd_t *bdp; - struct sk_buff *skb; - int i; - __u32 addrhi, addrlo; - - fec_whack_reset(fep->fecp); - - /* - * Set station address. - */ - addrhi = ((__u32) dev->dev_addr[0] << 24) | - ((__u32) dev->dev_addr[1] << 16) | - ((__u32) dev->dev_addr[2] << 8) | - (__u32) dev->dev_addr[3]; - addrlo = ((__u32) dev->dev_addr[4] << 24) | - ((__u32) dev->dev_addr[5] << 16); - FW(fecp, addr_low, addrhi); - FW(fecp, addr_high, addrlo); - - /* - * Reset all multicast. - */ - FW(fecp, hash_table_high, 0); - FW(fecp, hash_table_low, 0); - - /* - * Set maximum receive buffer size. - */ - FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); - FW(fecp, r_hash, PKT_MAXBUF_SIZE); - - /* - * Set receive and transmit descriptor base. - */ - FW(fecp, r_des_start, iopa((__u32) (fep->rx_bd_base))); - FW(fecp, x_des_start, iopa((__u32) (fep->tx_bd_base))); - - fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; - fep->tx_free = fep->tx_ring; - fep->cur_rx = fep->rx_bd_base; - - /* - * Reset SKB receive buffers - */ - for (i = 0; i < fep->rx_ring; i++) { - if ((skb = fep->rx_skbuff[i]) == NULL) - continue; - fep->rx_skbuff[i] = NULL; - dev_kfree_skb(skb); - } - - /* - * Initialize the receive buffer descriptors. - */ - for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { - skb = dev_alloc_skb(ENET_RX_FRSIZE); - if (skb == NULL) { - printk(KERN_WARNING DRV_MODULE_NAME - ": %s Memory squeeze, unable to allocate skb\n", - dev->name); - fep->stats.rx_dropped++; - break; - } - fep->rx_skbuff[i] = skb; - skb->dev = dev; - CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE)); - CBDW_DATLEN(bdp, 0); /* zero */ - CBDW_SC(bdp, BD_ENET_RX_EMPTY | - ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); - } - /* - * if we failed, fillup remainder - */ - for (; i < fep->rx_ring; i++, bdp++) { - fep->rx_skbuff[i] = NULL; - CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); - } - - /* - * Reset SKB transmit buffers. - */ - for (i = 0; i < fep->tx_ring; i++) { - if ((skb = fep->tx_skbuff[i]) == NULL) - continue; - fep->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); - } - - /* - * ...and the same for transmit. - */ - for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { - fep->tx_skbuff[i] = NULL; - CBDW_BUFADDR(bdp, virt_to_bus(NULL)); - CBDW_DATLEN(bdp, 0); - CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); - } - - /* - * Enable big endian and don't care about SDMA FC. - */ - FW(fecp, fun_code, 0x78000000); - - /* - * Set MII speed. - */ - FW(fecp, mii_speed, fep->fec_phy_speed); - - /* - * Clear any outstanding interrupt. - */ - FW(fecp, ievent, 0xffc0); - FW(fecp, ivec, (fpi->fec_irq / 2) << 29); - - /* - * adjust to speed (only for DUET & RMII) - */ -#ifdef CONFIG_DUET - cptr = in_be32(&immap->im_cpm.cp_cptr); - switch (fpi->fec_no) { - case 0: - /* - * check if in RMII mode - */ - if ((cptr & 0x100) == 0) - break; - - if (speed == 10) - cptr |= 0x0000010; - else if (speed == 100) - cptr &= ~0x0000010; - break; - case 1: - /* - * check if in RMII mode - */ - if ((cptr & 0x80) == 0) - break; - - if (speed == 10) - cptr |= 0x0000008; - else if (speed == 100) - cptr &= ~0x0000008; - break; - default: - break; - } - out_be32(&immap->im_cpm.cp_cptr, cptr); -#endif - - FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ - /* - * adjust to duplex mode - */ - if (duplex) { - FC(fecp, r_cntrl, FEC_RCNTRL_DRT); - FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ - } else { - FS(fecp, r_cntrl, FEC_RCNTRL_DRT); - FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ - } - - /* - * Enable interrupts we wish to service. - */ - FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | - FEC_ENET_RXF | FEC_ENET_RXB); - - /* - * And last, enable the transmit and receive processing. - */ - FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - FW(fecp, r_des_active, 0x01000000); -} - -void fec_stop(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fecp; - struct sk_buff *skb; - int i; - - if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) - return; /* already down */ - - FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ - for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && - i < FEC_RESET_DELAY; i++) - udelay(1); - - if (i == FEC_RESET_DELAY) - printk(KERN_WARNING DRV_MODULE_NAME - ": %s FEC timeout on graceful transmit stop\n", - dev->name); - /* - * Disable FEC. Let only MII interrupts. - */ - FW(fecp, imask, 0); - FW(fecp, ecntrl, ~FEC_ECNTRL_ETHER_EN); - - /* - * Reset SKB transmit buffers. - */ - for (i = 0; i < fep->tx_ring; i++) { - if ((skb = fep->tx_skbuff[i]) == NULL) - continue; - fep->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); - } - - /* - * Reset SKB receive buffers - */ - for (i = 0; i < fep->rx_ring; i++) { - if ((skb = fep->rx_skbuff[i]) == NULL) - continue; - fep->rx_skbuff[i] = NULL; - dev_kfree_skb(skb); - } -} - -/* common receive function */ -static int fec_enet_rx_common(struct fec_enet_private *ep, - struct net_device *dev, int budget) -{ - fec_t *fecp = fep->fecp; - const struct fec_platform_info *fpi = fep->fpi; - cbd_t *bdp; - struct sk_buff *skb, *skbn, *skbt; - int received = 0; - __u16 pkt_len, sc; - int curidx; - - /* - * First, grab all of the stats for the incoming packet. - * These get messed up if we get called due to a busy condition. - */ - bdp = fep->cur_rx; - - /* clear RX status bits for napi*/ - if (fpi->use_napi) - FW(fecp, ievent, FEC_ENET_RXF | FEC_ENET_RXB); - - while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { - - curidx = bdp - fep->rx_bd_base; - - /* - * Since we have allocated space to hold a complete frame, - * the last indicator should be set. - */ - if ((sc & BD_ENET_RX_LAST) == 0) - printk(KERN_WARNING DRV_MODULE_NAME - ": %s rcv is not +last\n", - dev->name); - - /* - * Check for errors. - */ - if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | - BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { - fep->stats.rx_errors++; - /* Frame too long or too short. */ - if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) - fep->stats.rx_length_errors++; - /* Frame alignment */ - if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) - fep->stats.rx_frame_errors++; - /* CRC Error */ - if (sc & BD_ENET_RX_CR) - fep->stats.rx_crc_errors++; - /* FIFO overrun */ - if (sc & BD_ENET_RX_OV) - fep->stats.rx_crc_errors++; - - skbn = fep->rx_skbuff[curidx]; - BUG_ON(skbn == NULL); - - } else { - skb = fep->rx_skbuff[curidx]; - BUG_ON(skb == NULL); - - /* - * Process the incoming frame. - */ - fep->stats.rx_packets++; - pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ - fep->stats.rx_bytes += pkt_len + 4; - - if (pkt_len <= fpi->rx_copybreak) { - /* +2 to make IP header L1 cache aligned */ - skbn = dev_alloc_skb(pkt_len + 2); - if (skbn != NULL) { - skb_reserve(skbn, 2); /* align IP header */ - skb_copy_from_linear_data(skb, - skbn->data, - pkt_len); - /* swap */ - skbt = skb; - skb = skbn; - skbn = skbt; - } - } else - skbn = dev_alloc_skb(ENET_RX_FRSIZE); - - if (skbn != NULL) { - skb_put(skb, pkt_len); /* Make room */ - skb->protocol = eth_type_trans(skb, dev); - received++; - if (!fpi->use_napi) - netif_rx(skb); - else - netif_receive_skb(skb); - } else { - printk(KERN_WARNING DRV_MODULE_NAME - ": %s Memory squeeze, dropping packet.\n", - dev->name); - fep->stats.rx_dropped++; - skbn = skb; - } - } - - fep->rx_skbuff[curidx] = skbn; - CBDW_BUFADDR(bdp, dma_map_single(NULL, skbn->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE)); - CBDW_DATLEN(bdp, 0); - CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); - - /* - * Update BD pointer to next entry. - */ - if ((sc & BD_ENET_RX_WRAP) == 0) - bdp++; - else - bdp = fep->rx_bd_base; - - /* - * Doing this here will keep the FEC running while we process - * incoming frames. On a heavily loaded network, we should be - * able to keep up at the expense of system resources. - */ - FW(fecp, r_des_active, 0x01000000); - - if (received >= budget) - break; - - } - - fep->cur_rx = bdp; - - if (fpi->use_napi) { - if (received < budget) { - netif_rx_complete(dev, &fep->napi); - - /* enable RX interrupt bits */ - FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); - } - } - - return received; -} - -static void fec_enet_tx(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - cbd_t *bdp; - struct sk_buff *skb; - int dirtyidx, do_wake; - __u16 sc; - - spin_lock(&fep->lock); - bdp = fep->dirty_tx; - - do_wake = 0; - while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { - - dirtyidx = bdp - fep->tx_bd_base; - - if (fep->tx_free == fep->tx_ring) - break; - - skb = fep->tx_skbuff[dirtyidx]; - - /* - * Check for errors. - */ - if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | - BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { - fep->stats.tx_errors++; - if (sc & BD_ENET_TX_HB) /* No heartbeat */ - fep->stats.tx_heartbeat_errors++; - if (sc & BD_ENET_TX_LC) /* Late collision */ - fep->stats.tx_window_errors++; - if (sc & BD_ENET_TX_RL) /* Retrans limit */ - fep->stats.tx_aborted_errors++; - if (sc & BD_ENET_TX_UN) /* Underrun */ - fep->stats.tx_fifo_errors++; - if (sc & BD_ENET_TX_CSL) /* Carrier lost */ - fep->stats.tx_carrier_errors++; - } else - fep->stats.tx_packets++; - - if (sc & BD_ENET_TX_READY) - printk(KERN_WARNING DRV_MODULE_NAME - ": %s HEY! Enet xmit interrupt and TX_READY.\n", - dev->name); - - /* - * Deferred means some collisions occurred during transmit, - * but we eventually sent the packet OK. - */ - if (sc & BD_ENET_TX_DEF) - fep->stats.collisions++; - - /* - * Free the sk buffer associated with this last transmit. - */ - dev_kfree_skb_irq(skb); - fep->tx_skbuff[dirtyidx] = NULL; - - /* - * Update pointer to next buffer descriptor to be transmitted. - */ - if ((sc & BD_ENET_TX_WRAP) == 0) - bdp++; - else - bdp = fep->tx_bd_base; - - /* - * Since we have freed up a buffer, the ring is no longer - * full. - */ - if (!fep->tx_free++) - do_wake = 1; - } - - fep->dirty_tx = bdp; - - spin_unlock(&fep->lock); - - if (do_wake && netif_queue_stopped(dev)) - netif_wake_queue(dev); -} - -/* - * The interrupt handler. - * This is called from the MPC core interrupt. - */ -static irqreturn_t -fec_enet_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct fec_enet_private *fep; - const struct fec_platform_info *fpi; - fec_t *fecp; - __u32 int_events; - __u32 int_events_napi; - - if (unlikely(dev == NULL)) - return IRQ_NONE; - - fep = netdev_priv(dev); - fecp = fep->fecp; - fpi = fep->fpi; - - /* - * Get the interrupt events that caused us to be here. - */ - while ((int_events = FR(fecp, ievent) & FR(fecp, imask)) != 0) { - - if (!fpi->use_napi) - FW(fecp, ievent, int_events); - else { - int_events_napi = int_events & ~(FEC_ENET_RXF | FEC_ENET_RXB); - FW(fecp, ievent, int_events_napi); - } - - if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR | - FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) - printk(KERN_WARNING DRV_MODULE_NAME - ": %s FEC ERROR(s) 0x%x\n", - dev->name, int_events); - - if ((int_events & FEC_ENET_RXF) != 0) { - if (!fpi->use_napi) - fec_enet_rx_common(fep, dev, ~0); - else { - if (netif_rx_schedule_prep(dev, &fep->napi)) { - /* disable rx interrupts */ - FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); - __netif_rx_schedule(dev, &fep->napi); - } else { - printk(KERN_ERR DRV_MODULE_NAME - ": %s driver bug! interrupt while in poll!\n", - dev->name); - FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); - } - } - } - - if ((int_events & FEC_ENET_TXF) != 0) - fec_enet_tx(dev); - } - - return IRQ_HANDLED; -} - -/* This interrupt occurs when the PHY detects a link change. */ -static irqreturn_t -fec_mii_link_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct fec_enet_private *fep; - const struct fec_platform_info *fpi; - - if (unlikely(dev == NULL)) - return IRQ_NONE; - - fep = netdev_priv(dev); - fpi = fep->fpi; - - if (!fpi->use_mdio) - return IRQ_NONE; - - /* - * Acknowledge the interrupt if possible. If we have not - * found the PHY yet we can't process or acknowledge the - * interrupt now. Instead we ignore this interrupt for now, - * which we can do since it is edge triggered. It will be - * acknowledged later by fec_enet_open(). - */ - if (!fep->phy) - return IRQ_NONE; - - fec_mii_ack_int(dev); - fec_mii_link_status_change_check(dev, 0); - - return IRQ_HANDLED; -} - - -/**********************************************************************************/ - -static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fecp; - cbd_t *bdp; - int curidx; - unsigned long flags; - - spin_lock_irqsave(&fep->tx_lock, flags); - - /* - * Fill in a Tx ring entry - */ - bdp = fep->cur_tx; - - if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { - netif_stop_queue(dev); - spin_unlock_irqrestore(&fep->tx_lock, flags); - - /* - * Ooops. All transmit buffers are full. Bail out. - * This should not happen, since the tx queue should be stopped. - */ - printk(KERN_WARNING DRV_MODULE_NAME - ": %s tx queue full!.\n", dev->name); - return 1; - } - - curidx = bdp - fep->tx_bd_base; - /* - * Clear all of the status flags. - */ - CBDC_SC(bdp, BD_ENET_TX_STATS); - - /* - * Save skb pointer. - */ - fep->tx_skbuff[curidx] = skb; - - fep->stats.tx_bytes += skb->len; - - /* - * Push the data cache so the CPM does not get stale memory data. - */ - CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data, - skb->len, DMA_TO_DEVICE)); - CBDW_DATLEN(bdp, skb->len); - - dev->trans_start = jiffies; - - /* - * If this was the last BD in the ring, start at the beginning again. - */ - if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) - fep->cur_tx++; - else - fep->cur_tx = fep->tx_bd_base; - - if (!--fep->tx_free) - netif_stop_queue(dev); - - /* - * Trigger transmission start - */ - CBDS_SC(bdp, BD_ENET_TX_READY | BD_ENET_TX_INTR | - BD_ENET_TX_LAST | BD_ENET_TX_TC); - FW(fecp, x_des_active, 0x01000000); - - spin_unlock_irqrestore(&fep->tx_lock, flags); - - return 0; -} - -static void fec_timeout(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fep->stats.tx_errors++; - - if (fep->tx_free) - netif_wake_queue(dev); - - /* check link status again */ - fec_mii_link_status_change_check(dev, 0); -} - -static int fec_enet_open(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - const struct fec_platform_info *fpi = fep->fpi; - unsigned long flags; - - napi_enable(&fep->napi); - - /* Install our interrupt handler. */ - if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate FEC IRQ!", dev->name); - napi_disable(&fep->napi); - return -EINVAL; - } - - /* Install our phy interrupt handler */ - if (fpi->phy_irq != -1 && - request_irq(fpi->phy_irq, fec_mii_link_interrupt, 0, "fec-phy", - dev) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate PHY IRQ!", dev->name); - free_irq(fpi->fec_irq, dev); - napi_disable(&fep->napi); - return -EINVAL; - } - - if (fpi->use_mdio) { - fec_mii_startup(dev); - netif_carrier_off(dev); - fec_mii_link_status_change_check(dev, 1); - } else { - spin_lock_irqsave(&fep->lock, flags); - fec_restart(dev, 1, 100); /* XXX this sucks */ - spin_unlock_irqrestore(&fep->lock, flags); - - netif_carrier_on(dev); - netif_start_queue(dev); - } - return 0; -} - -static int fec_enet_close(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - const struct fec_platform_info *fpi = fep->fpi; - unsigned long flags; - - netif_stop_queue(dev); - napi_disable(&fep->napi); - netif_carrier_off(dev); - - if (fpi->use_mdio) - fec_mii_shutdown(dev); - - spin_lock_irqsave(&fep->lock, flags); - fec_stop(dev); - spin_unlock_irqrestore(&fep->lock, flags); - - /* release any irqs */ - if (fpi->phy_irq != -1) - free_irq(fpi->phy_irq, dev); - free_irq(fpi->fec_irq, dev); - - return 0; -} - -static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - return &fep->stats; -} - -static int fec_enet_poll(struct napi_struct *napi, int budget) -{ - struct fec_enet_private *fep = container_of(napi, struct fec_enet_private, napi); - struct net_device *dev = fep->dev; - - return fec_enet_rx_common(fep, dev, budget); -} - -/*************************************************************************/ - -static void fec_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); -} - -static int fec_get_regs_len(struct net_device *dev) -{ - return sizeof(fec_t); -} - -static void fec_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *p) -{ - struct fec_enet_private *fep = netdev_priv(dev); - unsigned long flags; - - if (regs->len < sizeof(fec_t)) - return; - - regs->version = 0; - spin_lock_irqsave(&fep->lock, flags); - memcpy_fromio(p, fep->fecp, sizeof(fec_t)); - spin_unlock_irqrestore(&fep->lock, flags); -} - -static int fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct fec_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_gset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; -} - -static int fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct fec_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_sset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; -} - -static int fec_nway_reset(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - return mii_nway_restart(&fep->mii_if); -} - -static __u32 fec_get_msglevel(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - return fep->msg_enable; -} - -static void fec_set_msglevel(struct net_device *dev, __u32 value) -{ - struct fec_enet_private *fep = netdev_priv(dev); - fep->msg_enable = value; -} - -static const struct ethtool_ops fec_ethtool_ops = { - .get_drvinfo = fec_get_drvinfo, - .get_regs_len = fec_get_regs_len, - .get_settings = fec_get_settings, - .set_settings = fec_set_settings, - .nway_reset = fec_nway_reset, - .get_link = ethtool_op_get_link, - .get_msglevel = fec_get_msglevel, - .set_msglevel = fec_set_msglevel, - .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .set_sg = ethtool_op_set_sg, - .get_regs = fec_get_regs, -}; - -static int fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct fec_enet_private *fep = netdev_priv(dev); - struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; - unsigned long flags; - int rc; - - if (!netif_running(dev)) - return -EINVAL; - - spin_lock_irqsave(&fep->lock, flags); - rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); - spin_unlock_irqrestore(&fep->lock, flags); - return rc; -} - -int fec_8xx_init_one(const struct fec_platform_info *fpi, - struct net_device **devp) -{ - immap_t *immap = (immap_t *) IMAP_ADDR; - static int fec_8xx_version_printed = 0; - struct net_device *dev = NULL; - struct fec_enet_private *fep = NULL; - fec_t *fecp = NULL; - int i; - int err = 0; - int registered = 0; - __u32 siel; - - *devp = NULL; - - switch (fpi->fec_no) { - case 0: - fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec; - break; -#ifdef CONFIG_DUET - case 1: - fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec2; - break; -#endif - default: - return -EINVAL; - } - - if (fec_8xx_version_printed++ == 0) - printk(KERN_INFO "%s", version); - - i = sizeof(*fep) + (sizeof(struct sk_buff **) * - (fpi->rx_ring + fpi->tx_ring)); - - dev = alloc_etherdev(i); - if (!dev) { - err = -ENOMEM; - goto err; - } - - fep = netdev_priv(dev); - fep->dev = dev; - - /* partial reset of FEC */ - fec_whack_reset(fecp); - - /* point rx_skbuff, tx_skbuff */ - fep->rx_skbuff = (struct sk_buff **)&fep[1]; - fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; - - fep->fecp = fecp; - fep->fpi = fpi; - - /* init locks */ - spin_lock_init(&fep->lock); - spin_lock_init(&fep->tx_lock); - - /* - * Set the Ethernet address. - */ - for (i = 0; i < 6; i++) - dev->dev_addr[i] = fpi->macaddr[i]; - - fep->ring_base = dma_alloc_coherent(NULL, - (fpi->tx_ring + fpi->rx_ring) * - sizeof(cbd_t), &fep->ring_mem_addr, - GFP_KERNEL); - if (fep->ring_base == NULL) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s dma alloc failed.\n", dev->name); - err = -ENOMEM; - goto err; - } - - /* - * Set receive and transmit descriptor base. - */ - fep->rx_bd_base = fep->ring_base; - fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; - - /* initialize ring size variables */ - fep->tx_ring = fpi->tx_ring; - fep->rx_ring = fpi->rx_ring; - - /* SIU interrupt */ - if (fpi->phy_irq != -1 && - (fpi->phy_irq >= SIU_IRQ0 && fpi->phy_irq < SIU_LEVEL7)) { - - siel = in_be32(&immap->im_siu_conf.sc_siel); - if ((fpi->phy_irq & 1) == 0) - siel |= (0x80000000 >> fpi->phy_irq); - else - siel &= ~(0x80000000 >> (fpi->phy_irq & ~1)); - out_be32(&immap->im_siu_conf.sc_siel, siel); - } - - /* - * The FEC Ethernet specific entries in the device structure. - */ - dev->open = fec_enet_open; - dev->hard_start_xmit = fec_enet_start_xmit; - dev->tx_timeout = fec_timeout; - dev->watchdog_timeo = TX_TIMEOUT; - dev->stop = fec_enet_close; - dev->get_stats = fec_enet_get_stats; - dev->set_multicast_list = fec_set_multicast_list; - dev->set_mac_address = fec_set_mac_address; - netif_napi_add(dev, &fec->napi, - fec_enet_poll, fpi->napi_weight); - - dev->ethtool_ops = &fec_ethtool_ops; - dev->do_ioctl = fec_ioctl; - - fep->fec_phy_speed = - ((((fpi->sys_clk + 4999999) / 2500000) / 2) & 0x3F) << 1; - - init_timer(&fep->phy_timer_list); - - /* partial reset of FEC so that only MII works */ - FW(fecp, mii_speed, fep->fec_phy_speed); - FW(fecp, ievent, 0xffc0); - FW(fecp, ivec, (fpi->fec_irq / 2) << 29); - FW(fecp, imask, 0); - FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ - FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - - netif_carrier_off(dev); - - err = register_netdev(dev); - if (err != 0) - goto err; - registered = 1; - - if (fpi->use_mdio) { - fep->mii_if.dev = dev; - fep->mii_if.mdio_read = fec_mii_read; - fep->mii_if.mdio_write = fec_mii_write; - fep->mii_if.phy_id_mask = 0x1f; - fep->mii_if.reg_num_mask = 0x1f; - fep->mii_if.phy_id = fec_mii_phy_id_detect(dev); - } - - *devp = dev; - - return 0; - - err: - if (dev != NULL) { - if (fecp != NULL) - fec_whack_reset(fecp); - - if (registered) - unregister_netdev(dev); - - if (fep != NULL) { - if (fep->ring_base) - dma_free_coherent(NULL, - (fpi->tx_ring + - fpi->rx_ring) * - sizeof(cbd_t), fep->ring_base, - fep->ring_mem_addr); - } - free_netdev(dev); - } - return err; -} - -int fec_8xx_cleanup_one(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fecp; - const struct fec_platform_info *fpi = fep->fpi; - - fec_whack_reset(fecp); - - unregister_netdev(dev); - - dma_free_coherent(NULL, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), - fep->ring_base, fep->ring_mem_addr); - - free_netdev(dev); - - return 0; -} - -/**************************************************************************************/ -/**************************************************************************************/ -/**************************************************************************************/ - -static int __init fec_8xx_init(void) -{ - return fec_8xx_platform_init(); -} - -static void __exit fec_8xx_cleanup(void) -{ - fec_8xx_platform_cleanup(); -} - -/**************************************************************************************/ -/**************************************************************************************/ -/**************************************************************************************/ - -module_init(fec_8xx_init); -module_exit(fec_8xx_cleanup); diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c deleted file mode 100644 index 3b6ca29d31f2..000000000000 --- a/drivers/net/fec_8xx/fec_mii.c +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou <panto@intracom.gr> - * - * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> - * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> - * - * Released under the GPL - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> - -#include <asm/8xx_immap.h> -#include <asm/pgtable.h> -#include <asm/mpc8xx.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/cpm1.h> - -/*************************************************/ - -#include "fec_8xx.h" - -/*************************************************/ - -/* Make MII read/write commands for the FEC. -*/ -#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) -#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) -#define mk_mii_end 0 - -/*************************************************/ - -/* XXX both FECs use the MII interface of FEC1 */ -static DEFINE_SPINLOCK(fec_mii_lock); - -#define FEC_MII_LOOPS 10000 - -int fec_mii_read(struct net_device *dev, int phy_id, int location) -{ - struct fec_enet_private *fep = netdev_priv(dev); - fec_t *fecp; - int i, ret = -1; - unsigned long flags; - - /* XXX MII interface is only connected to FEC1 */ - fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec; - - spin_lock_irqsave(&fec_mii_lock, flags); - - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) { - FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ - FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - FW(fecp, ievent, FEC_ENET_MII); - } - - /* Add PHY address to register command. */ - FW(fecp, mii_speed, fep->fec_phy_speed); - FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) { - FW(fecp, ievent, FEC_ENET_MII); - ret = FR(fecp, mii_data) & 0xffff; - } - - spin_unlock_irqrestore(&fec_mii_lock, flags); - - return ret; -} - -void fec_mii_write(struct net_device *dev, int phy_id, int location, int value) -{ - struct fec_enet_private *fep = netdev_priv(dev); - fec_t *fecp; - unsigned long flags; - int i; - - /* XXX MII interface is only connected to FEC1 */ - fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec; - - spin_lock_irqsave(&fec_mii_lock, flags); - - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) { - FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ - FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - FW(fecp, ievent, FEC_ENET_MII); - } - - /* Add PHY address to register command. */ - FW(fecp, mii_speed, fep->fec_phy_speed); /* always adapt mii speed */ - FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) - FW(fecp, ievent, FEC_ENET_MII); - - spin_unlock_irqrestore(&fec_mii_lock, flags); -} - -/*************************************************/ - -#ifdef CONFIG_FEC_8XX_GENERIC_PHY - -/* - * Generic PHY support. - * Should work for all PHYs, but link change is detected by polling - */ - -static void generic_timer_callback(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct fec_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; - - add_timer(&fep->phy_timer_list); - - fec_mii_link_status_change_check(dev, 0); -} - -static void generic_startup(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */ - fep->phy_timer_list.data = (unsigned long)dev; - fep->phy_timer_list.function = generic_timer_callback; - add_timer(&fep->phy_timer_list); -} - -static void generic_shutdown(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - del_timer_sync(&fep->phy_timer_list); -} - -#endif - -#ifdef CONFIG_FEC_8XX_DM9161_PHY - -/* ------------------------------------------------------------------------- */ -/* The Davicom DM9161 is used on the NETTA board */ - -/* register definitions */ - -#define MII_DM9161_ACR 16 /* Aux. Config Register */ -#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */ -#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */ -#define MII_DM9161_INTR 21 /* Interrupt Register */ -#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */ -#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */ - -static void dm9161_startup(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000); -} - -static void dm9161_ack_int(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fec_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR); -} - -static void dm9161_shutdown(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00); -} - -#endif - -#ifdef CONFIG_FEC_8XX_LXT971_PHY - -/* Support for LXT971/972 PHY */ - -#define MII_LXT971_PCR 16 /* Port Control Register */ -#define MII_LXT971_SR2 17 /* Status Register 2 */ -#define MII_LXT971_IER 18 /* Interrupt Enable Register */ -#define MII_LXT971_ISR 19 /* Interrupt Status Register */ -#define MII_LXT971_LCR 20 /* LED Control Register */ -#define MII_LXT971_TCR 30 /* Transmit Control Register */ - -static void lxt971_startup(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x00F2); -} - -static void lxt971_ack_int(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fec_mii_read(dev, fep->mii_if.phy_id, MII_LXT971_ISR); -} - -static void lxt971_shutdown(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x0000); -} -#endif - -/**********************************************************************************/ - -static const struct phy_info phy_info[] = { -#ifdef CONFIG_FEC_8XX_DM9161_PHY - { - .id = 0x00181b88, - .name = "DM9161", - .startup = dm9161_startup, - .ack_int = dm9161_ack_int, - .shutdown = dm9161_shutdown, - }, -#endif -#ifdef CONFIG_FEC_8XX_LXT971_PHY - { - .id = 0x0001378e, - .name = "LXT971/972", - .startup = lxt971_startup, - .ack_int = lxt971_ack_int, - .shutdown = lxt971_shutdown, - }, -#endif -#ifdef CONFIG_FEC_8XX_GENERIC_PHY - { - .id = 0, - .name = "GENERIC", - .startup = generic_startup, - .shutdown = generic_shutdown, - }, -#endif -}; - -/**********************************************************************************/ - -int fec_mii_phy_id_detect(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - const struct fec_platform_info *fpi = fep->fpi; - int i, r, start, end, phytype, physubtype; - const struct phy_info *phy; - int phy_hwid, phy_id; - - /* if no MDIO */ - if (fpi->use_mdio == 0) - return -1; - - phy_hwid = -1; - fep->phy = NULL; - - /* auto-detect? */ - if (fpi->phy_addr == -1) { - start = 0; - end = 32; - } else { /* direct */ - start = fpi->phy_addr; - end = start + 1; - } - - for (phy_id = start; phy_id < end; phy_id++) { - r = fec_mii_read(dev, phy_id, MII_PHYSID1); - if (r == -1 || (phytype = (r & 0xffff)) == 0xffff) - continue; - r = fec_mii_read(dev, phy_id, MII_PHYSID2); - if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff) - continue; - phy_hwid = (phytype << 16) | physubtype; - if (phy_hwid != -1) - break; - } - - if (phy_hwid == -1) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s No PHY detected!\n", dev->name); - return -1; - } - - for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++) - if (phy->id == (phy_hwid >> 4) || phy->id == 0) - break; - - if (i >= ARRAY_SIZE(phy_info)) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s PHY id 0x%08x is not supported!\n", - dev->name, phy_hwid); - return -1; - } - - fep->phy = phy; - - printk(KERN_INFO DRV_MODULE_NAME - ": %s Phy @ 0x%x, type %s (0x%08x)\n", - dev->name, phy_id, fep->phy->name, phy_hwid); - - return phy_id; -} - -void fec_mii_startup(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - const struct fec_platform_info *fpi = fep->fpi; - - if (!fpi->use_mdio || fep->phy == NULL) - return; - - if (fep->phy->startup == NULL) - return; - - (*fep->phy->startup) (dev); -} - -void fec_mii_shutdown(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - const struct fec_platform_info *fpi = fep->fpi; - - if (!fpi->use_mdio || fep->phy == NULL) - return; - - if (fep->phy->shutdown == NULL) - return; - - (*fep->phy->shutdown) (dev); -} - -void fec_mii_ack_int(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - const struct fec_platform_info *fpi = fep->fpi; - - if (!fpi->use_mdio || fep->phy == NULL) - return; - - if (fep->phy->ack_int == NULL) - return; - - (*fep->phy->ack_int) (dev); -} - -/* helper function */ -static int mii_negotiated(struct mii_if_info *mii) -{ - int advert, lpa, val; - - if (!mii_link_ok(mii)) - return 0; - - val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR); - if ((val & BMSR_ANEGCOMPLETE) == 0) - return 0; - - advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE); - lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA); - - return mii_nway_result(advert & lpa); -} - -void fec_mii_link_status_change_check(struct net_device *dev, int init_media) -{ - struct fec_enet_private *fep = netdev_priv(dev); - unsigned int media; - unsigned long flags; - - if (mii_check_media(&fep->mii_if, netif_msg_link(fep), init_media) == 0) - return; - - media = mii_negotiated(&fep->mii_if); - - if (netif_carrier_ok(dev)) { - spin_lock_irqsave(&fep->lock, flags); - fec_restart(dev, !!(media & ADVERTISE_FULL), - (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) ? - 100 : 10); - spin_unlock_irqrestore(&fep->lock, flags); - - netif_start_queue(dev); - } else { - netif_stop_queue(dev); - - spin_lock_irqsave(&fep->lock, flags); - fec_stop(dev); - spin_unlock_irqrestore(&fep->lock, flags); - - } -} diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index a5baaf59ff66..352574a3f056 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -43,7 +43,7 @@ #include <asm/uaccess.h> #ifdef CONFIG_PPC_CPM_NEW_BINDING -#include <asm/of_platform.h> +#include <linux/of_platform.h> #endif #include "fs_enet.h" diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index d7ca31945c82..e3557eca7b6d 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -44,7 +44,7 @@ #endif #ifdef CONFIG_PPC_CPM_NEW_BINDING -#include <asm/of_platform.h> +#include <linux/of_platform.h> #endif #include "fs_enet.h" diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index f0014cfbb275..8f6a43b0e0ff 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -37,7 +37,7 @@ #include <asm/uaccess.h> #ifdef CONFIG_PPC_CPM_NEW_BINDING -#include <asm/of_platform.h> +#include <linux/of_platform.h> #endif #include "fs_enet.h" diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index babc79ad490b..61af02b4c9d8 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -363,25 +363,31 @@ static int emac_reset(struct emac_instance *dev) static void emac_hash_mc(struct emac_instance *dev) { - struct emac_regs __iomem *p = dev->emacp; - u16 gaht[4] = { 0 }; + const int regs = EMAC_XAHT_REGS(dev); + u32 *gaht_base = emac_gaht_base(dev); + u32 gaht_temp[regs]; struct dev_mc_list *dmi; + int i; DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count); + memset(gaht_temp, 0, sizeof (gaht_temp)); + for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { - int bit; + int slot, reg, mask; DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL, dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); - bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26); - gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f); + slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr)); + reg = EMAC_XAHT_SLOT_TO_REG(dev, slot); + mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot); + + gaht_temp[reg] |= mask; } - out_be32(&p->gaht1, gaht[0]); - out_be32(&p->gaht2, gaht[1]); - out_be32(&p->gaht3, gaht[2]); - out_be32(&p->gaht4, gaht[3]); + + for (i = 0; i < regs; i++) + out_be32(gaht_base + i, gaht_temp[i]); } static inline u32 emac_iff2rmr(struct net_device *ndev) @@ -398,7 +404,8 @@ static inline u32 emac_iff2rmr(struct net_device *ndev) if (ndev->flags & IFF_PROMISC) r |= EMAC_RMR_PME; - else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32) + else if (ndev->flags & IFF_ALLMULTI || + (ndev->mc_count > EMAC_XAHT_SLOTS(dev))) r |= EMAC_RMR_PMME; else if (ndev->mc_count > 0) r |= EMAC_RMR_MAE; @@ -542,7 +549,7 @@ static int emac_configure(struct emac_instance *dev) /* Put some arbitrary OUI, Manuf & Rev IDs so we can * identify this GPCS PHY later. */ - out_be32(&p->ipcr, 0xdeadbeef); + out_be32(&p->u1.emac4.ipcr, 0xdeadbeef); } else mr1 |= EMAC_MR1_MF_1000; @@ -2021,10 +2028,10 @@ static int emac_get_regs_len(struct emac_instance *dev) { if (emac_has_feature(dev, EMAC_FTR_EMAC4)) return sizeof(struct emac_ethtool_regs_subhdr) + - EMAC4_ETHTOOL_REGS_SIZE; + EMAC4_ETHTOOL_REGS_SIZE(dev); else return sizeof(struct emac_ethtool_regs_subhdr) + - EMAC_ETHTOOL_REGS_SIZE; + EMAC_ETHTOOL_REGS_SIZE(dev); } static int emac_ethtool_get_regs_len(struct net_device *ndev) @@ -2051,12 +2058,12 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf) hdr->index = dev->cell_index; if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { hdr->version = EMAC4_ETHTOOL_REGS_VER; - memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE); - return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE); + memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev)); + return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev)); } else { hdr->version = EMAC_ETHTOOL_REGS_VER; - memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE); - return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE); + memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev)); + return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev)); } } @@ -2546,7 +2553,9 @@ static int __devinit emac_init_config(struct emac_instance *dev) } /* Check EMAC version */ - if (of_device_is_compatible(np, "ibm,emac4")) { + if (of_device_is_compatible(np, "ibm,emac4sync")) { + dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC); + } else if (of_device_is_compatible(np, "ibm,emac4")) { dev->features |= EMAC_FTR_EMAC4; if (of_device_is_compatible(np, "ibm,emac-440gx")) dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX; @@ -2607,6 +2616,15 @@ static int __devinit emac_init_config(struct emac_instance *dev) } memcpy(dev->ndev->dev_addr, p, 6); + /* IAHT and GAHT filter parameterization */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { + dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT; + dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT; + } else { + dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT; + dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; + } + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); @@ -2678,7 +2696,8 @@ static int __devinit emac_probe(struct of_device *ofdev, goto err_irq_unmap; } // TODO : request_mem_region - dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs)); + dev->emacp = ioremap(dev->rsrc_regs.start, + dev->rsrc_regs.end - dev->rsrc_regs.start + 1); if (dev->emacp == NULL) { printk(KERN_ERR "%s: Can't map device registers!\n", np->full_name); @@ -2892,6 +2911,10 @@ static struct of_device_id emac_match[] = .type = "network", .compatible = "ibm,emac4", }, + { + .type = "network", + .compatible = "ibm,emac4sync", + }, {}, }; diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h index 1683db9870a4..6545e69d12c3 100644 --- a/drivers/net/ibm_newemac/core.h +++ b/drivers/net/ibm_newemac/core.h @@ -33,8 +33,8 @@ #include <linux/netdevice.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> +#include <linux/of_platform.h> -#include <asm/of_platform.h> #include <asm/io.h> #include <asm/dcr.h> @@ -235,6 +235,10 @@ struct emac_instance { u32 fifo_entry_size; u32 mal_burst_size; /* move to MAL ? */ + /* IAHT and GAHT filter parameterization */ + u32 xaht_slots_shift; + u32 xaht_width_shift; + /* Descriptor management */ struct mal_descriptor *tx_desc; @@ -309,6 +313,10 @@ struct emac_instance { * Set if we need phy clock workaround for 440ep or 440gr */ #define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100 +/* + * The 405EX and 460EX contain the EMAC4SYNC core + */ +#define EMAC_FTR_EMAC4SYNC 0x00000200 /* Right now, we don't quite handle the always/possible masks on the @@ -320,7 +328,8 @@ enum { EMAC_FTRS_POSSIBLE = #ifdef CONFIG_IBM_NEW_EMAC_EMAC4 - EMAC_FTR_EMAC4 | EMAC_FTR_HAS_NEW_STACR | + EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC | + EMAC_FTR_HAS_NEW_STACR | EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX | #endif #ifdef CONFIG_IBM_NEW_EMAC_TAH @@ -342,6 +351,71 @@ static inline int emac_has_feature(struct emac_instance *dev, (EMAC_FTRS_POSSIBLE & dev->features & feature); } +/* + * Various instances of the EMAC core have varying 1) number of + * address match slots, 2) width of the registers for handling address + * match slots, 3) number of registers for handling address match + * slots and 4) base offset for those registers. + * + * These macros and inlines handle these differences based on + * parameters supplied by the device structure which are, in turn, + * initialized based on the "compatible" entry in the device tree. + */ + +#define EMAC4_XAHT_SLOTS_SHIFT 6 +#define EMAC4_XAHT_WIDTH_SHIFT 4 + +#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8 +#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5 + +#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift) +#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift) +#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \ + (dev)->xaht_width_shift)) + +#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \ + ((EMAC_XAHT_SLOTS(dev) - 1) - \ + ((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \ + (dev)->xaht_slots_shift))) + +#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \ + ((slot) >> (dev)->xaht_width_shift) + +#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \ + ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \ + ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1))) + +static inline u32 *emac_xaht_base(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + int offset; + + /* The first IAHT entry always is the base of the block of + * IAHT and GAHT registers. + */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) + offset = offsetof(struct emac_regs, u1.emac4sync.iaht1); + else + offset = offsetof(struct emac_regs, u0.emac4.iaht1); + + return ((u32 *)((ptrdiff_t)p + offset)); +} + +static inline u32 *emac_gaht_base(struct emac_instance *dev) +{ + /* GAHT registers always come after an identical number of + * IAHT registers. + */ + return (emac_xaht_base(dev) + EMAC_XAHT_REGS(dev)); +} + +static inline u32 *emac_iaht_base(struct emac_instance *dev) +{ + /* IAHT registers always come before an identical number of + * GAHT registers. + */ + return (emac_xaht_base(dev)); +} /* Ethtool get_regs complex data. * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH @@ -366,4 +440,11 @@ struct emac_ethtool_regs_subhdr { u32 index; }; +#define EMAC_ETHTOOL_REGS_VER 0 +#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ + (dev)->rsrc_regs.start + 1) +#define EMAC4_ETHTOOL_REGS_VER 1 +#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ + (dev)->rsrc_regs.start + 1) + #endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c index 86b756a30784..775c850a425a 100644 --- a/drivers/net/ibm_newemac/debug.c +++ b/drivers/net/ibm_newemac/debug.c @@ -67,29 +67,55 @@ static void emac_desc_dump(struct emac_instance *p) static void emac_mac_dump(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; + const int xaht_regs = EMAC_XAHT_REGS(dev); + u32 *gaht_base = emac_gaht_base(dev); + u32 *iaht_base = emac_iaht_base(dev); + int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC); + int n; printk("** EMAC %s registers **\n" "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" - "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n" - "IAHT: 0x%04x 0x%04x 0x%04x 0x%04x " - "GAHT: 0x%04x 0x%04x 0x%04x 0x%04x\n" - "LSA = %04x%08x IPGVR = 0x%04x\n" - "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n" - "OCTX = 0x%08x OCRX = 0x%08x IPCR = 0x%08x\n", + "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n", dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), in_be32(&p->tmr0), in_be32(&p->tmr1), in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), - in_be32(&p->vtci), - in_be32(&p->iaht1), in_be32(&p->iaht2), in_be32(&p->iaht3), - in_be32(&p->iaht4), - in_be32(&p->gaht1), in_be32(&p->gaht2), in_be32(&p->gaht3), - in_be32(&p->gaht4), + in_be32(&p->vtci) + ); + + if (emac4sync) + printk("MAR = %04x%08x MMAR = %04x%08x\n", + in_be32(&p->u0.emac4sync.mahr), + in_be32(&p->u0.emac4sync.malr), + in_be32(&p->u0.emac4sync.mmahr), + in_be32(&p->u0.emac4sync.mmalr) + ); + + for (n = 0; n < xaht_regs; n++) + printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n)); + + for (n = 0; n < xaht_regs; n++) + printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n)); + + printk("LSA = %04x%08x IPGVR = 0x%04x\n" + "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n" + "OCTX = 0x%08x OCRX = 0x%08x\n", in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr), in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr), - in_be32(&p->octx), in_be32(&p->ocrx), in_be32(&p->ipcr) - ); + in_be32(&p->octx), in_be32(&p->ocrx) + ); + + if (!emac4sync) { + printk("IPCR = 0x%08x\n", + in_be32(&p->u1.emac4.ipcr) + ); + } else { + printk("REVID = 0x%08x TPC = 0x%08x\n", + in_be32(&p->u1.emac4sync.revid), + in_be32(&p->u1.emac4sync.tpc) + ); + } emac_desc_dump(dev); } diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h index 91cb096ab405..0afc2cf5c52b 100644 --- a/drivers/net/ibm_newemac/emac.h +++ b/drivers/net/ibm_newemac/emac.h @@ -27,37 +27,80 @@ #include <linux/types.h> -/* EMAC registers Write Access rules */ +/* EMAC registers Write Access rules */ struct emac_regs { - u32 mr0; /* special */ - u32 mr1; /* Reset */ - u32 tmr0; /* special */ - u32 tmr1; /* special */ - u32 rmr; /* Reset */ - u32 isr; /* Always */ - u32 iser; /* Reset */ - u32 iahr; /* Reset, R, T */ - u32 ialr; /* Reset, R, T */ - u32 vtpid; /* Reset, R, T */ - u32 vtci; /* Reset, R, T */ - u32 ptr; /* Reset, T */ - u32 iaht1; /* Reset, R */ - u32 iaht2; /* Reset, R */ - u32 iaht3; /* Reset, R */ - u32 iaht4; /* Reset, R */ - u32 gaht1; /* Reset, R */ - u32 gaht2; /* Reset, R */ - u32 gaht3; /* Reset, R */ - u32 gaht4; /* Reset, R */ + /* Common registers across all EMAC implementations. */ + u32 mr0; /* Special */ + u32 mr1; /* Reset */ + u32 tmr0; /* Special */ + u32 tmr1; /* Special */ + u32 rmr; /* Reset */ + u32 isr; /* Always */ + u32 iser; /* Reset */ + u32 iahr; /* Reset, R, T */ + u32 ialr; /* Reset, R, T */ + u32 vtpid; /* Reset, R, T */ + u32 vtci; /* Reset, R, T */ + u32 ptr; /* Reset, T */ + union { + /* Registers unique to EMAC4 implementations */ + struct { + u32 iaht1; /* Reset, R */ + u32 iaht2; /* Reset, R */ + u32 iaht3; /* Reset, R */ + u32 iaht4; /* Reset, R */ + u32 gaht1; /* Reset, R */ + u32 gaht2; /* Reset, R */ + u32 gaht3; /* Reset, R */ + u32 gaht4; /* Reset, R */ + } emac4; + /* Registers unique to EMAC4SYNC implementations */ + struct { + u32 mahr; /* Reset, R, T */ + u32 malr; /* Reset, R, T */ + u32 mmahr; /* Reset, R, T */ + u32 mmalr; /* Reset, R, T */ + u32 rsvd0[4]; + } emac4sync; + } u0; + /* Common registers across all EMAC implementations. */ u32 lsah; u32 lsal; - u32 ipgvr; /* Reset, T */ - u32 stacr; /* special */ - u32 trtr; /* special */ - u32 rwmr; /* Reset */ + u32 ipgvr; /* Reset, T */ + u32 stacr; /* Special */ + u32 trtr; /* Special */ + u32 rwmr; /* Reset */ u32 octx; u32 ocrx; - u32 ipcr; + union { + /* Registers unique to EMAC4 implementations */ + struct { + u32 ipcr; + } emac4; + /* Registers unique to EMAC4SYNC implementations */ + struct { + u32 rsvd1; + u32 revid; + u32 rsvd2[2]; + u32 iaht1; /* Reset, R */ + u32 iaht2; /* Reset, R */ + u32 iaht3; /* Reset, R */ + u32 iaht4; /* Reset, R */ + u32 iaht5; /* Reset, R */ + u32 iaht6; /* Reset, R */ + u32 iaht7; /* Reset, R */ + u32 iaht8; /* Reset, R */ + u32 gaht1; /* Reset, R */ + u32 gaht2; /* Reset, R */ + u32 gaht3; /* Reset, R */ + u32 gaht4; /* Reset, R */ + u32 gaht5; /* Reset, R */ + u32 gaht6; /* Reset, R */ + u32 gaht7; /* Reset, R */ + u32 gaht8; /* Reset, R */ + u32 tpc; /* Reset, T */ + } emac4sync; + } u1; }; /* @@ -73,12 +116,6 @@ struct emac_regs { #define PHY_MODE_RTBI 7 #define PHY_MODE_SGMII 8 - -#define EMAC_ETHTOOL_REGS_VER 0 -#define EMAC_ETHTOOL_REGS_SIZE (sizeof(struct emac_regs) - sizeof(u32)) -#define EMAC4_ETHTOOL_REGS_VER 1 -#define EMAC4_ETHTOOL_REGS_SIZE sizeof(struct emac_regs) - /* EMACx_MR0 */ #define EMAC_MR0_RXI 0x80000000 #define EMAC_MR0_TXI 0x40000000 diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c index e32da3de2695..1d5379de6900 100644 --- a/drivers/net/ibm_newemac/rgmii.c +++ b/drivers/net/ibm_newemac/rgmii.c @@ -39,6 +39,7 @@ #define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4)) #define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4)) #define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4)) +#define RGMII_FER_MII(idx) RGMII_FER_GMII(idx) /* RGMIIx_SSR */ #define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) @@ -49,6 +50,7 @@ static inline int rgmii_valid_mode(int phy_mode) { return phy_mode == PHY_MODE_GMII || + phy_mode == PHY_MODE_MII || phy_mode == PHY_MODE_RGMII || phy_mode == PHY_MODE_TBI || phy_mode == PHY_MODE_RTBI; @@ -63,6 +65,8 @@ static inline const char *rgmii_mode_name(int mode) return "TBI"; case PHY_MODE_GMII: return "GMII"; + case PHY_MODE_MII: + return "MII"; case PHY_MODE_RTBI: return "RTBI"; default: @@ -79,6 +83,8 @@ static inline u32 rgmii_mode_mask(int mode, int input) return RGMII_FER_TBI(input); case PHY_MODE_GMII: return RGMII_FER_GMII(input); + case PHY_MODE_MII: + return RGMII_FER_MII(input); case PHY_MODE_RTBI: return RGMII_FER_RTBI(input); default: diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 665341e43055..387a13395015 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -585,8 +585,9 @@ static struct config_item_type netconsole_target_type = { * Group operations and type for netconsole_subsys. */ -static struct config_item *make_netconsole_target(struct config_group *group, - const char *name) +static int make_netconsole_target(struct config_group *group, + const char *name, + struct config_item **new_item) { unsigned long flags; struct netconsole_target *nt; @@ -598,7 +599,7 @@ static struct config_item *make_netconsole_target(struct config_group *group, nt = kzalloc(sizeof(*nt), GFP_KERNEL); if (!nt) { printk(KERN_ERR "netconsole: failed to allocate memory\n"); - return NULL; + return -ENOMEM; } nt->np.name = "netconsole"; @@ -615,7 +616,8 @@ static struct config_item *make_netconsole_target(struct config_group *group, list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); - return &nt->item; + *new_item = &nt->item; + return 0; } static void drop_netconsole_target(struct config_group *group, diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index fb0b918e5ccb..402e81020fb8 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -28,8 +28,8 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/workqueue.h> +#include <linux/of_platform.h> -#include <asm/of_platform.h> #include <asm/uaccess.h> #include <asm/irq.h> #include <asm/io.h> diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index 940474736922..6d9e7ad9fda9 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c @@ -36,8 +36,8 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/fsl_devices.h> +#include <linux/of_platform.h> -#include <asm/of_platform.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 3dd537be87d8..b54e2ea8346b 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1,7 +1,7 @@ /* * linux/drivers/net/wireless/libertas/if_sdio.c * - * Copyright 2007 Pierre Ossman + * Copyright 2007-2008 Pierre Ossman * * Inspired by if_cs.c, Copyright 2007 Holger Schurig * @@ -266,13 +266,10 @@ static int if_sdio_card_to_host(struct if_sdio_card *card) /* * The transfer must be in one transaction or the firmware - * goes suicidal. + * goes suicidal. There's no way to guarantee that for all + * controllers, but we can at least try. */ - chunk = size; - if ((chunk > card->func->cur_blksize) || (chunk > 512)) { - chunk = (chunk + card->func->cur_blksize - 1) / - card->func->cur_blksize * card->func->cur_blksize; - } + chunk = sdio_align_size(card->func, size); ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk); if (ret) @@ -696,13 +693,10 @@ static int if_sdio_host_to_card(struct lbs_private *priv, /* * The transfer must be in one transaction or the firmware - * goes suicidal. + * goes suicidal. There's no way to guarantee that for all + * controllers, but we can at least try. */ - size = nb + 4; - if ((size > card->func->cur_blksize) || (size > 512)) { - size = (size + card->func->cur_blksize - 1) / - card->func->cur_blksize * card->func->cur_blksize; - } + size = sdio_align_size(card->func, nb + 4); packet = kzalloc(sizeof(struct if_sdio_packet) + size, GFP_ATOMIC); diff --git a/drivers/of/device.c b/drivers/of/device.c index 29681c4b700b..8a1d93a2bb81 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -48,16 +48,32 @@ void of_dev_put(struct of_device *dev) } EXPORT_SYMBOL(of_dev_put); -static ssize_t dev_show_devspec(struct device *dev, +static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, char *buf) { struct of_device *ofdev; ofdev = to_of_device(dev); - return sprintf(buf, "%s", ofdev->node->full_name); + return sprintf(buf, "%s\n", ofdev->node->full_name); } -static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL); +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct of_device *ofdev = to_of_device(dev); + ssize_t len = 0; + + len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2); + buf[len] = '\n'; + buf[len+1] = 0; + return len+1; +} + +struct device_attribute of_platform_device_attrs[] = { + __ATTR_RO(devspec), + __ATTR_RO(modalias), + __ATTR_NULL +}; /** * of_release_dev - free an of device structure when all users of it are finished. @@ -78,25 +94,61 @@ EXPORT_SYMBOL(of_release_dev); int of_device_register(struct of_device *ofdev) { - int rc; - BUG_ON(ofdev->node == NULL); - - rc = device_register(&ofdev->dev); - if (rc) - return rc; - - rc = device_create_file(&ofdev->dev, &dev_attr_devspec); - if (rc) - device_unregister(&ofdev->dev); - - return rc; + return device_register(&ofdev->dev); } EXPORT_SYMBOL(of_device_register); void of_device_unregister(struct of_device *ofdev) { - device_remove_file(&ofdev->dev, &dev_attr_devspec); device_unregister(&ofdev->dev); } EXPORT_SYMBOL(of_device_unregister); + +ssize_t of_device_get_modalias(struct of_device *ofdev, + char *str, ssize_t len) +{ + const char *compat; + int cplen, i; + ssize_t tsize, csize, repend; + + /* Name & Type */ + csize = snprintf(str, len, "of:N%sT%s", + ofdev->node->name, ofdev->node->type); + + /* Get compatible property if any */ + compat = of_get_property(ofdev->node, "compatible", &cplen); + if (!compat) + return csize; + + /* Find true end (we tolerate multiple \0 at the end */ + for (i = (cplen - 1); i >= 0 && !compat[i]; i--) + cplen--; + if (!cplen) + return csize; + cplen++; + + /* Check space (need cplen+1 chars including final \0) */ + tsize = csize + cplen; + repend = tsize; + + if (csize >= len) /* @ the limit, all is already filled */ + return tsize; + + if (tsize >= len) { /* limit compat list */ + cplen = len - csize - 1; + repend = len; + } + + /* Copy and do char replacement */ + memcpy(&str[csize + 1], compat, cplen); + for (i = csize; i < repend; i++) { + char c = str[i]; + if (c == '\0') + str[i] = 'C'; + else if (c == ' ') + str[i] = '_'; + } + + return tsize; +} diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c index 000681e98f2c..1c9cab844f10 100644 --- a/drivers/of/gpio.c +++ b/drivers/of/gpio.c @@ -137,38 +137,6 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, } EXPORT_SYMBOL(of_gpio_simple_xlate); -/* Should be sufficient for now, later we'll use dynamic bases. */ -#if defined(CONFIG_PPC32) || defined(CONFIG_SPARC32) -#define GPIOS_PER_CHIP 32 -#else -#define GPIOS_PER_CHIP 64 -#endif - -static int of_get_gpiochip_base(struct device_node *np) -{ - struct device_node *gc = NULL; - int gpiochip_base = 0; - - while ((gc = of_find_all_nodes(gc))) { - if (!of_get_property(gc, "gpio-controller", NULL)) - continue; - - if (gc != np) { - gpiochip_base += GPIOS_PER_CHIP; - continue; - } - - of_node_put(gc); - - if (gpiochip_base >= ARCH_NR_GPIOS) - return -ENOSPC; - - return gpiochip_base; - } - - return -ENOENT; -} - /** * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) * @np: device node of the GPIO chip @@ -205,11 +173,7 @@ int of_mm_gpiochip_add(struct device_node *np, if (!mm_gc->regs) goto err1; - gc->base = of_get_gpiochip_base(np); - if (gc->base < 0) { - ret = gc->base; - goto err1; - } + gc->base = -1; if (!of_gc->xlate) of_gc->xlate = of_gpio_simple_xlate; diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c index b2ccdcbeb896..5c015d310d4a 100644 --- a/drivers/of/of_i2c.c +++ b/drivers/of/of_i2c.c @@ -13,6 +13,7 @@ #include <linux/i2c.h> #include <linux/of.h> +#include <linux/of_i2c.h> #include <linux/module.h> struct i2c_driver_device { diff --git a/drivers/of/platform.c b/drivers/of/platform.c index ca09a63a64db..298de0f95d70 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -17,6 +17,8 @@ #include <linux/of_device.h> #include <linux/of_platform.h> +extern struct device_attribute of_platform_device_attrs[]; + static int of_platform_bus_match(struct device *dev, struct device_driver *drv) { struct of_device *of_dev = to_of_device(dev); @@ -103,6 +105,7 @@ int of_bus_type_init(struct bus_type *bus, const char *name) bus->suspend = of_platform_device_suspend; bus->resume = of_platform_device_resume; bus->shutdown = of_platform_device_shutdown; + bus->dev_attrs = of_platform_device_attrs; return bus_register(bus); } diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 4d1ce2e7361e..7d63f8ced24b 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -2,7 +2,7 @@ # Makefile for the PCI bus specific drivers. # -obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ +obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ pci-driver.o search.o pci-sysfs.o rom.o setup-res.o obj-$(CONFIG_PROC_FS) += proc.o diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index f8c187a763bd..93e37f0666ab 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -30,6 +30,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> +#include <linux/pci-acpi.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/actypes.h> @@ -299,7 +300,7 @@ free_and_return: * * @handle - the handle of the hotplug controller. */ -acpi_status acpi_run_oshp(acpi_handle handle) +static acpi_status acpi_run_oshp(acpi_handle handle) { acpi_status status; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -322,9 +323,6 @@ acpi_status acpi_run_oshp(acpi_handle handle) kfree(string.pointer); return status; } -EXPORT_SYMBOL_GPL(acpi_run_oshp); - - /* acpi_get_hp_params_from_firmware * @@ -374,6 +372,85 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, } EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); +/** + * acpi_get_hp_hw_control_from_firmware + * @dev: the pci_dev of the bridge that has a hotplug controller + * @flags: requested control bits for _OSC + * + * Attempt to take hotplug control from firmware. + */ +int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) +{ + acpi_status status; + acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); + struct pci_dev *pdev = dev; + struct pci_bus *parent; + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; + + flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | + OSC_SHPC_NATIVE_HP_CONTROL | + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); + if (!flags) { + err("Invalid flags %u specified!\n", flags); + return -EINVAL; + } + + /* + * Per PCI firmware specification, we should run the ACPI _OSC + * method to get control of hotplug hardware before using it. If + * an _OSC is missing, we look for an OSHP to do the same thing. + * To handle different BIOS behavior, we look for _OSC and OSHP + * within the scope of the hotplug controller and its parents, + * upto the host bridge under which this controller exists. + */ + while (!handle) { + /* + * This hotplug controller was not listed in the ACPI name + * space at all. Try to get acpi handle of parent pci bus. + */ + if (!pdev || !pdev->bus->parent) + break; + parent = pdev->bus->parent; + dbg("Could not find %s in acpi namespace, trying parent\n", + pci_name(pdev)); + if (!parent->self) + /* Parent must be a host bridge */ + handle = acpi_get_pci_rootbridge_handle( + pci_domain_nr(parent), + parent->number); + else + handle = DEVICE_ACPI_HANDLE(&(parent->self->dev)); + pdev = parent->self; + } + + while (handle) { + acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); + dbg("Trying to get hotplug control for %s \n", + (char *)string.pointer); + status = pci_osc_control_set(handle, flags); + if (status == AE_NOT_FOUND) + status = acpi_run_oshp(handle); + if (ACPI_SUCCESS(status)) { + dbg("Gained control for hotplug HW for pci %s (%s)\n", + pci_name(dev), (char *)string.pointer); + kfree(string.pointer); + return 0; + } + if (acpi_root_bridge(handle)) + break; + chandle = handle; + status = acpi_get_parent(chandle, &handle); + if (ACPI_FAILURE(status)) + break; + } + + dbg("Cannot get control of hotplug hardware for pci %s\n", + pci_name(dev)); + + kfree(string.pointer); + return -ENODEV; +} +EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); /* acpi_root_bridge - check to see if this acpi object is a root bridge * diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index 7a29164d4b32..eecf7cbf4139 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -215,7 +215,6 @@ extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot); -extern u32 acpiphp_get_address (struct acpiphp_slot *slot); /* variables */ extern int acpiphp_debug; diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 7af68ba27903..0e496e866a84 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -70,7 +70,6 @@ static int disable_slot (struct hotplug_slot *slot); static int set_attention_status (struct hotplug_slot *slot, u8 value); static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); -static int get_address (struct hotplug_slot *slot, u32 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); @@ -83,7 +82,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = { .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, - .get_address = get_address, }; @@ -274,23 +272,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) return 0; } - -/** - * get_address - get pci address of a slot - * @hotplug_slot: slot to get status - * @value: pointer to struct pci_busdev (seg, bus, dev) - */ -static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) -{ - struct slot *slot = hotplug_slot->private; - - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - - *value = acpiphp_get_address(slot->acpi_slot); - - return 0; -} - static int __init init_acpi(void) { int retval; @@ -357,7 +338,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) acpiphp_slot->slot = slot; snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); - retval = pci_hp_register(slot->hotplug_slot); + retval = pci_hp_register(slot->hotplug_slot, + acpiphp_slot->bridge->pci_bus, + acpiphp_slot->device); + if (retval == -EBUSY) + goto error_hpslot; if (retval) { err("pci_hp_register failed with error %d\n", retval); goto error_hpslot; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 91156f85a926..a3e4705dd8f0 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -258,7 +258,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) bridge->pci_bus->number, slot->device); retval = acpiphp_register_hotplug_slot(slot); if (retval) { - warn("acpiphp_register_hotplug_slot failed(err code = 0x%x)\n", retval); + if (retval == -EBUSY) + warn("Slot %d already registered by another " + "hotplug driver\n", slot->sun); + else + warn("acpiphp_register_hotplug_slot failed " + "(err code = 0x%x)\n", retval); goto err_exit; } } @@ -1878,19 +1883,3 @@ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot) return (sta == 0) ? 0 : 1; } - - -/* - * pci address (seg/bus/dev) - */ -u32 acpiphp_get_address(struct acpiphp_slot *slot) -{ - u32 address; - struct pci_bus *pci_bus = slot->bridge->pci_bus; - - address = (pci_domain_nr(pci_bus) << 16) | - (pci_bus->number << 8) | - slot->device; - - return address; -} diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index ede9051fdb5d..2b7c45e39370 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -33,8 +33,10 @@ #include <linux/kobject.h> #include <asm/uaccess.h> #include <linux/moduleparam.h> +#include <linux/pci.h> #include "acpiphp.h" +#include "../pci.h" #define DRIVER_VERSION "1.0.1" #define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" @@ -430,7 +432,7 @@ static int __init ibm_acpiphp_init(void) int retval = 0; acpi_status status; struct acpi_device *device; - struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; + struct kobject *sysdir = &pci_slots_kset->kobj; dbg("%s\n", __func__); @@ -477,7 +479,7 @@ init_return: static void __exit ibm_acpiphp_exit(void) { acpi_status status; - struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; + struct kobject *sysdir = &pci_slots_kset->kobj; dbg("%s\n", __func__); diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index d8a6b80ab42a..935947991dc9 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -285,7 +285,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) info->attention_status = cpci_get_attention_status(slot); dbg("registering slot %s", slot->hotplug_slot->name); - status = pci_hp_register(slot->hotplug_slot); + status = pci_hp_register(slot->hotplug_slot, bus, i); if (status) { err("pci_hp_register failed with error %d", status); goto error_name; diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 36b115b27b0b..54defec51d08 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -434,7 +434,9 @@ static int ctrl_slot_setup(struct controller *ctrl, slot->bus, slot->device, slot->number, ctrl->slot_device_offset, slot_number); - result = pci_hp_register(hotplug_slot); + result = pci_hp_register(hotplug_slot, + ctrl->pci_dev->subordinate, + slot->device); if (result) { err("pci_hp_register failed with error %d\n", result); goto error_name; diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c index 7e9a827c2687..40337a06c18a 100644 --- a/drivers/pci/hotplug/fakephp.c +++ b/drivers/pci/hotplug/fakephp.c @@ -66,6 +66,7 @@ struct dummy_slot { struct pci_dev *dev; struct work_struct remove_work; unsigned long removed; + char name[8]; }; static int debug; @@ -100,6 +101,7 @@ static int add_slot(struct pci_dev *dev) struct dummy_slot *dslot; struct hotplug_slot *slot; int retval = -ENOMEM; + static int count = 1; slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!slot) @@ -113,18 +115,18 @@ static int add_slot(struct pci_dev *dev) slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; - slot->name = &dev->dev.bus_id[0]; - dbg("slot->name = %s\n", slot->name); - dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL); if (!dslot) goto error_info; + slot->name = dslot->name; + snprintf(slot->name, sizeof(dslot->name), "fake%d", count++); + dbg("slot->name = %s\n", slot->name); slot->ops = &dummy_hotplug_slot_ops; slot->release = &dummy_release; slot->private = dslot; - retval = pci_hp_register(slot); + retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn)); if (retval) { err("pci_hp_register failed with error %d\n", retval); goto error_dslot; @@ -148,17 +150,17 @@ error: static int __init pci_scan_buses(void) { struct pci_dev *dev = NULL; - int retval = 0; + int lastslot = 0; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - retval = add_slot(dev); - if (retval) { - pci_dev_put(dev); - break; - } + if (PCI_FUNC(dev->devfn) > 0 && + lastslot == PCI_SLOT(dev->devfn)) + continue; + lastslot = PCI_SLOT(dev->devfn); + add_slot(dev); } - return retval; + return 0; } static void remove_slot(struct dummy_slot *dslot) @@ -296,23 +298,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) return 0; } -/* find the hotplug_slot for the pci_dev */ -static struct hotplug_slot *get_slot_from_dev(struct pci_dev *dev) -{ - struct dummy_slot *dslot; - - list_for_each_entry(dslot, &slot_list, node) { - if (dslot->dev == dev) - return dslot->slot; - } - return NULL; -} - - static int disable_slot(struct hotplug_slot *slot) { struct dummy_slot *dslot; - struct hotplug_slot *hslot; struct pci_dev *dev; int func; @@ -322,41 +310,27 @@ static int disable_slot(struct hotplug_slot *slot) dbg("%s - physical_slot = %s\n", __func__, slot->name); - /* don't disable bridged devices just yet, we can't handle them easily... */ - if (dslot->dev->subordinate) { - err("Can't remove PCI devices with other PCI devices behind it yet.\n"); - return -ENODEV; - } - if (test_and_set_bit(0, &dslot->removed)) { - dbg("Slot already scheduled for removal\n"); - return -ENODEV; - } - /* search for subfunctions and disable them first */ - if (!(dslot->dev->devfn & 7)) { - for (func = 1; func < 8; func++) { - dev = pci_get_slot(dslot->dev->bus, - dslot->dev->devfn + func); - if (dev) { - hslot = get_slot_from_dev(dev); - if (hslot) - disable_slot(hslot); - else { - err("Hotplug slot not found for subfunction of PCI device\n"); - return -ENODEV; - } - pci_dev_put(dev); - } else - dbg("No device in slot found\n"); + for (func = 7; func >= 0; func--) { + dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); + if (!dev) + continue; + + if (test_and_set_bit(0, &dslot->removed)) { + dbg("Slot already scheduled for removal\n"); + return -ENODEV; } - } - /* remove the device from the pci core */ - pci_remove_bus_device(dslot->dev); + /* queue work item to blow away this sysfs entry and other + * parts. + */ + INIT_WORK(&dslot->remove_work, remove_slot_worker); + queue_work(dummyphp_wq, &dslot->remove_work); - /* queue work item to blow away this sysfs entry and other parts. */ - INIT_WORK(&dslot->remove_work, remove_slot_worker); - queue_work(dummyphp_wq, &dslot->remove_work); + /* blow away this sysfs entry and other parts. */ + remove_slot(dslot); + pci_dev_put(dev); + } return 0; } diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index dca7efc14be2..8467d0287325 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -1001,7 +1001,8 @@ static int __init ebda_rsrc_controller (void) tmp_slot = list_entry (list, struct slot, ibm_slot_list); snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); - pci_hp_register (tmp_slot->hotplug_slot); + pci_hp_register(tmp_slot->hotplug_slot, + pci_find_bus(0, tmp_slot->bus), tmp_slot->device); } print_ebda_hpc (); diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index a11021e8ce37..5f85b1b120e3 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -40,6 +40,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <asm/uaccess.h> +#include "../pci.h" #define MY_NAME "pci_hotplug" @@ -60,41 +61,7 @@ static int debug; ////////////////////////////////////////////////////////////////// static LIST_HEAD(pci_hotplug_slot_list); - -struct kset *pci_hotplug_slots_kset; - -static ssize_t hotplug_slot_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct hotplug_slot *slot = to_hotplug_slot(kobj); - struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr); - return attribute->show ? attribute->show(slot, buf) : -EIO; -} - -static ssize_t hotplug_slot_attr_store(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t len) -{ - struct hotplug_slot *slot = to_hotplug_slot(kobj); - struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr); - return attribute->store ? attribute->store(slot, buf, len) : -EIO; -} - -static struct sysfs_ops hotplug_slot_sysfs_ops = { - .show = hotplug_slot_attr_show, - .store = hotplug_slot_attr_store, -}; - -static void hotplug_slot_release(struct kobject *kobj) -{ - struct hotplug_slot *slot = to_hotplug_slot(kobj); - if (slot->release) - slot->release(slot); -} - -static struct kobj_type hotplug_slot_ktype = { - .sysfs_ops = &hotplug_slot_sysfs_ops, - .release = &hotplug_slot_release, -}; +static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock); /* these strings match up with the values in pci_bus_speed */ static char *pci_bus_speed_strings[] = { @@ -149,16 +116,15 @@ GET_STATUS(power_status, u8) GET_STATUS(attention_status, u8) GET_STATUS(latch_status, u8) GET_STATUS(adapter_status, u8) -GET_STATUS(address, u32) GET_STATUS(max_bus_speed, enum pci_bus_speed) GET_STATUS(cur_bus_speed, enum pci_bus_speed) -static ssize_t power_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t power_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_power_status (slot, &value); + retval = get_power_status(slot->hotplug, &value); if (retval) goto exit; retval = sprintf (buf, "%d\n", value); @@ -166,9 +132,10 @@ exit: return retval; } -static ssize_t power_write_file (struct hotplug_slot *slot, const char *buf, +static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf, size_t count) { + struct hotplug_slot *slot = pci_slot->hotplug; unsigned long lpower; u8 power; int retval = 0; @@ -204,29 +171,30 @@ exit: return count; } -static struct hotplug_slot_attribute hotplug_slot_attr_power = { +static struct pci_slot_attribute hotplug_slot_attr_power = { .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .show = power_read_file, .store = power_write_file }; -static ssize_t attention_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t attention_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_attention_status (slot, &value); + retval = get_attention_status(slot->hotplug, &value); if (retval) goto exit; - retval = sprintf (buf, "%d\n", value); + retval = sprintf(buf, "%d\n", value); exit: return retval; } -static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf, +static ssize_t attention_write_file(struct pci_slot *slot, const char *buf, size_t count) { + struct hotplug_slot_ops *ops = slot->hotplug->ops; unsigned long lattention; u8 attention; int retval = 0; @@ -235,13 +203,13 @@ static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf, attention = (u8)(lattention & 0xff); dbg (" - attention = %d\n", attention); - if (!try_module_get(slot->ops->owner)) { + if (!try_module_get(ops->owner)) { retval = -ENODEV; goto exit; } - if (slot->ops->set_attention_status) - retval = slot->ops->set_attention_status(slot, attention); - module_put(slot->ops->owner); + if (ops->set_attention_status) + retval = ops->set_attention_status(slot->hotplug, attention); + module_put(ops->owner); exit: if (retval) @@ -249,18 +217,18 @@ exit: return count; } -static struct hotplug_slot_attribute hotplug_slot_attr_attention = { +static struct pci_slot_attribute hotplug_slot_attr_attention = { .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .show = attention_read_file, .store = attention_write_file }; -static ssize_t latch_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t latch_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_latch_status (slot, &value); + retval = get_latch_status(slot->hotplug, &value); if (retval) goto exit; retval = sprintf (buf, "%d\n", value); @@ -269,17 +237,17 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_latch = { +static struct pci_slot_attribute hotplug_slot_attr_latch = { .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO}, .show = latch_read_file, }; -static ssize_t presence_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t presence_read_file(struct pci_slot *slot, char *buf) { int retval; u8 value; - retval = get_adapter_status (slot, &value); + retval = get_adapter_status(slot->hotplug, &value); if (retval) goto exit; retval = sprintf (buf, "%d\n", value); @@ -288,42 +256,20 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_presence = { +static struct pci_slot_attribute hotplug_slot_attr_presence = { .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO}, .show = presence_read_file, }; -static ssize_t address_read_file (struct hotplug_slot *slot, char *buf) -{ - int retval; - u32 address; - - retval = get_address (slot, &address); - if (retval) - goto exit; - retval = sprintf (buf, "%04x:%02x:%02x\n", - (address >> 16) & 0xffff, - (address >> 8) & 0xff, - address & 0xff); - -exit: - return retval; -} - -static struct hotplug_slot_attribute hotplug_slot_attr_address = { - .attr = {.name = "address", .mode = S_IFREG | S_IRUGO}, - .show = address_read_file, -}; - static char *unknown_speed = "Unknown bus speed"; -static ssize_t max_bus_speed_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf) { char *speed_string; int retval; enum pci_bus_speed value; - retval = get_max_bus_speed (slot, &value); + retval = get_max_bus_speed(slot->hotplug, &value); if (retval) goto exit; @@ -338,18 +284,18 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_max_bus_speed = { +static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = { .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, .show = max_bus_speed_read_file, }; -static ssize_t cur_bus_speed_read_file (struct hotplug_slot *slot, char *buf) +static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf) { char *speed_string; int retval; enum pci_bus_speed value; - retval = get_cur_bus_speed (slot, &value); + retval = get_cur_bus_speed(slot->hotplug, &value); if (retval) goto exit; @@ -364,14 +310,15 @@ exit: return retval; } -static struct hotplug_slot_attribute hotplug_slot_attr_cur_bus_speed = { +static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = { .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, .show = cur_bus_speed_read_file, }; -static ssize_t test_write_file (struct hotplug_slot *slot, const char *buf, +static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, size_t count) { + struct hotplug_slot *slot = pci_slot->hotplug; unsigned long ltest; u32 test; int retval = 0; @@ -394,13 +341,14 @@ exit: return count; } -static struct hotplug_slot_attribute hotplug_slot_attr_test = { +static struct pci_slot_attribute hotplug_slot_attr_test = { .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .store = test_write_file }; -static int has_power_file (struct hotplug_slot *slot) +static int has_power_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if ((slot->ops->enable_slot) || @@ -410,8 +358,9 @@ static int has_power_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_attention_file (struct hotplug_slot *slot) +static int has_attention_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if ((slot->ops->set_attention_status) || @@ -420,8 +369,9 @@ static int has_attention_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_latch_file (struct hotplug_slot *slot) +static int has_latch_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_latch_status) @@ -429,8 +379,9 @@ static int has_latch_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_adapter_file (struct hotplug_slot *slot) +static int has_adapter_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_adapter_status) @@ -438,17 +389,9 @@ static int has_adapter_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_address_file (struct hotplug_slot *slot) -{ - if ((!slot) || (!slot->ops)) - return -ENODEV; - if (slot->ops->get_address) - return 0; - return -ENOENT; -} - -static int has_max_bus_speed_file (struct hotplug_slot *slot) +static int has_max_bus_speed_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_max_bus_speed) @@ -456,8 +399,9 @@ static int has_max_bus_speed_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_cur_bus_speed_file (struct hotplug_slot *slot) +static int has_cur_bus_speed_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->get_cur_bus_speed) @@ -465,8 +409,9 @@ static int has_cur_bus_speed_file (struct hotplug_slot *slot) return -ENOENT; } -static int has_test_file (struct hotplug_slot *slot) +static int has_test_file(struct pci_slot *pci_slot) { + struct hotplug_slot *slot = pci_slot->hotplug; if ((!slot) || (!slot->ops)) return -ENODEV; if (slot->ops->hardware_test) @@ -474,7 +419,7 @@ static int has_test_file (struct hotplug_slot *slot) return -ENOENT; } -static int fs_add_slot (struct hotplug_slot *slot) +static int fs_add_slot(struct pci_slot *slot) { int retval = 0; @@ -505,13 +450,6 @@ static int fs_add_slot (struct hotplug_slot *slot) goto exit_adapter; } - if (has_address_file(slot) == 0) { - retval = sysfs_create_file(&slot->kobj, - &hotplug_slot_attr_address.attr); - if (retval) - goto exit_address; - } - if (has_max_bus_speed_file(slot) == 0) { retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); @@ -544,10 +482,6 @@ exit_cur_speed: sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); exit_max_speed: - if (has_address_file(slot) == 0) - sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr); - -exit_address: if (has_adapter_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); @@ -567,7 +501,7 @@ exit: return retval; } -static void fs_remove_slot (struct hotplug_slot *slot) +static void fs_remove_slot(struct pci_slot *slot) { if (has_power_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); @@ -581,9 +515,6 @@ static void fs_remove_slot (struct hotplug_slot *slot) if (has_adapter_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); - if (has_address_file(slot) == 0) - sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr); - if (has_max_bus_speed_file(slot) == 0) sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); @@ -599,27 +530,33 @@ static struct hotplug_slot *get_slot_from_name (const char *name) struct hotplug_slot *slot; struct list_head *tmp; + spin_lock(&pci_hotplug_slot_list_lock); list_for_each (tmp, &pci_hotplug_slot_list) { slot = list_entry (tmp, struct hotplug_slot, slot_list); if (strcmp(slot->name, name) == 0) - return slot; + goto out; } - return NULL; + slot = NULL; +out: + spin_unlock(&pci_hotplug_slot_list_lock); + return slot; } /** * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem + * @bus: bus this slot is on * @slot: pointer to the &struct hotplug_slot to register + * @slot_nr: slot number * * Registers a hotplug slot with the pci hotplug subsystem, which will allow * userspace interaction to the slot. * * Returns 0 if successful, anything else for an error. */ -int pci_hp_register (struct hotplug_slot *slot) +int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr) { int result; - struct hotplug_slot *tmp; + struct pci_slot *pci_slot; if (slot == NULL) return -ENODEV; @@ -632,57 +569,89 @@ int pci_hp_register (struct hotplug_slot *slot) } /* Check if we have already registered a slot with the same name. */ - tmp = get_slot_from_name(slot->name); - if (tmp) + if (get_slot_from_name(slot->name)) return -EEXIST; - slot->kobj.kset = pci_hotplug_slots_kset; - result = kobject_init_and_add(&slot->kobj, &hotplug_slot_ktype, NULL, - "%s", slot->name); - if (result) { - err("Unable to register kobject '%s'", slot->name); - return -EINVAL; + /* + * No problems if we call this interface from both ACPI_PCI_SLOT + * driver and call it here again. If we've already created the + * pci_slot, the interface will simply bump the refcount. + */ + pci_slot = pci_create_slot(bus, slot_nr, slot->name); + if (IS_ERR(pci_slot)) + return PTR_ERR(pci_slot); + + if (pci_slot->hotplug) { + dbg("%s: already claimed\n", __func__); + pci_destroy_slot(pci_slot); + return -EBUSY; } - list_add (&slot->slot_list, &pci_hotplug_slot_list); + slot->pci_slot = pci_slot; + pci_slot->hotplug = slot; + + /* + * Allow pcihp drivers to override the ACPI_PCI_SLOT name. + */ + if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) { + result = kobject_rename(&pci_slot->kobj, slot->name); + if (result) { + pci_destroy_slot(pci_slot); + return result; + } + } + + spin_lock(&pci_hotplug_slot_list_lock); + list_add(&slot->slot_list, &pci_hotplug_slot_list); + spin_unlock(&pci_hotplug_slot_list_lock); + + result = fs_add_slot(pci_slot); + kobject_uevent(&pci_slot->kobj, KOBJ_ADD); + dbg("Added slot %s to the list\n", slot->name); + - result = fs_add_slot (slot); - kobject_uevent(&slot->kobj, KOBJ_ADD); - dbg ("Added slot %s to the list\n", slot->name); return result; } /** * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem - * @slot: pointer to the &struct hotplug_slot to deregister + * @hotplug: pointer to the &struct hotplug_slot to deregister * * The @slot must have been registered with the pci hotplug subsystem * previously with a call to pci_hp_register(). * * Returns 0 if successful, anything else for an error. */ -int pci_hp_deregister (struct hotplug_slot *slot) +int pci_hp_deregister(struct hotplug_slot *hotplug) { struct hotplug_slot *temp; + struct pci_slot *slot; - if (slot == NULL) + if (!hotplug) return -ENODEV; - temp = get_slot_from_name (slot->name); - if (temp != slot) { + temp = get_slot_from_name(hotplug->name); + if (temp != hotplug) return -ENODEV; - } - list_del (&slot->slot_list); - fs_remove_slot (slot); - dbg ("Removed slot %s from the list\n", slot->name); - kobject_put(&slot->kobj); + spin_lock(&pci_hotplug_slot_list_lock); + list_del(&hotplug->slot_list); + spin_unlock(&pci_hotplug_slot_list_lock); + + slot = hotplug->pci_slot; + fs_remove_slot(slot); + dbg("Removed slot %s from the list\n", hotplug->name); + + hotplug->release(hotplug); + slot->hotplug = NULL; + pci_destroy_slot(slot); + return 0; } /** * pci_hp_change_slot_info - changes the slot's information structure in the core - * @slot: pointer to the slot whose info has changed + * @hotplug: pointer to the slot whose info has changed * @info: pointer to the info copy into the slot's info structure * * @slot must have been registered with the pci @@ -690,13 +659,15 @@ int pci_hp_deregister (struct hotplug_slot *slot) * * Returns 0 if successful, anything else for an error. */ -int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, +int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug, struct hotplug_slot_info *info) { - if ((slot == NULL) || (info == NULL)) + struct pci_slot *slot; + if (!hotplug || !info) return -ENODEV; + slot = hotplug->pci_slot; - memcpy (slot->info, info, sizeof (struct hotplug_slot_info)); + memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info)); return 0; } @@ -704,36 +675,22 @@ int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, static int __init pci_hotplug_init (void) { int result; - struct kset *pci_bus_kset; - pci_bus_kset = bus_get_kset(&pci_bus_type); - - pci_hotplug_slots_kset = kset_create_and_add("slots", NULL, - &pci_bus_kset->kobj); - if (!pci_hotplug_slots_kset) { - result = -ENOMEM; - err("Register subsys error\n"); - goto exit; - } result = cpci_hotplug_init(debug); if (result) { err ("cpci_hotplug_init with error %d\n", result); - goto err_subsys; + goto err_cpci; } info (DRIVER_DESC " version: " DRIVER_VERSION "\n"); - goto exit; -err_subsys: - kset_unregister(pci_hotplug_slots_kset); -exit: +err_cpci: return result; } static void __exit pci_hotplug_exit (void) { cpci_hotplug_exit(); - kset_unregister(pci_hotplug_slots_kset); } module_init(pci_hotplug_init); @@ -745,7 +702,6 @@ MODULE_LICENSE("GPL"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); -EXPORT_SYMBOL_GPL(pci_hotplug_slots_kset); EXPORT_SYMBOL_GPL(pci_hp_register); EXPORT_SYMBOL_GPL(pci_hp_deregister); EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 79c9ddaad3fb..e3a1e7e7dba2 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -43,6 +43,7 @@ extern int pciehp_poll_mode; extern int pciehp_poll_time; extern int pciehp_debug; extern int pciehp_force; +extern int pciehp_slot_with_bus; extern struct workqueue_struct *pciehp_wq; #define dbg(format, arg...) \ @@ -96,7 +97,7 @@ struct controller { u32 slot_cap; u8 cap_base; struct timer_list poll_timer; - volatile int cmd_busy; + int cmd_busy; unsigned int no_cmd_complete:1; }; @@ -156,10 +157,10 @@ extern u8 pciehp_handle_power_fault(struct slot *p_slot); extern int pciehp_configure_device(struct slot *p_slot); extern int pciehp_unconfigure_device(struct slot *p_slot); extern void pciehp_queue_pushbutton_work(struct work_struct *work); -int pcie_init(struct controller *ctrl, struct pcie_device *dev); +struct controller *pcie_init(struct pcie_device *dev); int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); -int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev); +int pcie_enable_notification(struct controller *ctrl); static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) { @@ -202,8 +203,13 @@ struct hpc_ops { #include <acpi/actypes.h> #include <linux/pci-acpi.h> -#define pciehp_get_hp_hw_control_from_firmware(dev) \ - pciehp_acpi_get_hp_hw_control_from_firmware(dev) +static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) +{ + u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); + return acpi_get_hp_hw_control_from_firmware(dev, flags); +} + static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, struct hotplug_params *hpp) { diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 48a2ed378914..3677495c4f91 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -72,7 +72,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); -static int get_address (struct hotplug_slot *slot, u32 *value); static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); @@ -85,7 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, - .get_address = get_address, .get_max_bus_speed = get_max_bus_speed, .get_cur_bus_speed = get_cur_bus_speed, }; @@ -185,23 +183,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = { */ static void release_slot(struct hotplug_slot *hotplug_slot) { - struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot); - kfree(slot); -} - -static void make_slot_name(struct slot *slot) -{ - if (pciehp_slot_with_bus) - snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d", - slot->bus, slot->number); - else - snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", - slot->number); + kfree(hotplug_slot->info); + kfree(hotplug_slot); } static int init_slots(struct controller *ctrl) @@ -210,49 +195,34 @@ static int init_slots(struct controller *ctrl) struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; int retval = -ENOMEM; - int i; - - for (i = 0; i < ctrl->num_slots; i++) { - slot = kzalloc(sizeof(*slot), GFP_KERNEL); - if (!slot) - goto error; + list_for_each_entry(slot, &ctrl->slot_list, slot_list) { hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); if (!hotplug_slot) - goto error_slot; - slot->hotplug_slot = hotplug_slot; + goto error; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) goto error_hpslot; - hotplug_slot->info = info; - - hotplug_slot->name = slot->name; - - slot->hp_slot = i; - slot->ctrl = ctrl; - slot->bus = ctrl->pci_dev->subordinate->number; - slot->device = ctrl->slot_device_offset + i; - slot->hpc_ops = ctrl->hpc_ops; - slot->number = ctrl->first_slot; - mutex_init(&slot->lock); - INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); /* register this slot with the hotplug pci core */ + hotplug_slot->info = info; + hotplug_slot->name = slot->name; hotplug_slot->private = slot; hotplug_slot->release = &release_slot; - make_slot_name(slot); hotplug_slot->ops = &pciehp_hotplug_slot_ops; - get_power_status(hotplug_slot, &info->power_status); get_attention_status(hotplug_slot, &info->attention_status); get_latch_status(hotplug_slot, &info->latch_status); get_adapter_status(hotplug_slot, &info->adapter_status); + slot->hotplug_slot = hotplug_slot; dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " "slot_device_offset=%x\n", slot->bus, slot->device, slot->hp_slot, slot->number, ctrl->slot_device_offset); - retval = pci_hp_register(hotplug_slot); + retval = pci_hp_register(hotplug_slot, + ctrl->pci_dev->subordinate, + slot->device); if (retval) { err("pci_hp_register failed with error %d\n", retval); if (retval == -EEXIST) @@ -263,7 +233,7 @@ static int init_slots(struct controller *ctrl) } /* create additional sysfs entries */ if (EMI(ctrl)) { - retval = sysfs_create_file(&hotplug_slot->kobj, + retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj, &hotplug_slot_attr_lock.attr); if (retval) { pci_hp_deregister(hotplug_slot); @@ -271,8 +241,6 @@ static int init_slots(struct controller *ctrl) goto error_info; } } - - list_add(&slot->slot_list, &ctrl->slot_list); } return 0; @@ -280,27 +248,18 @@ error_info: kfree(info); error_hpslot: kfree(hotplug_slot); -error_slot: - kfree(slot); error: return retval; } static void cleanup_slots(struct controller *ctrl) { - struct list_head *tmp; - struct list_head *next; struct slot *slot; - list_for_each_safe(tmp, next, &ctrl->slot_list) { - slot = list_entry(tmp, struct slot, slot_list); - list_del(&slot->slot_list); + list_for_each_entry(slot, &ctrl->slot_list, slot_list) { if (EMI(ctrl)) - sysfs_remove_file(&slot->hotplug_slot->kobj, + sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj, &hotplug_slot_attr_lock.attr); - cancel_delayed_work(&slot->work); - flush_scheduled_work(); - flush_workqueue(pciehp_wq); pci_hp_deregister(slot->hotplug_slot); } } @@ -398,19 +357,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) return 0; } -static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) -{ - struct slot *slot = hotplug_slot->private; - struct pci_bus *bus = slot->ctrl->pci_dev->subordinate; - - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - - *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device; - - return 0; -} - -static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) +static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, + enum pci_bus_speed *value) { struct slot *slot = hotplug_slot->private; int retval; @@ -444,34 +392,30 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ struct controller *ctrl; struct slot *t_slot; u8 value; - struct pci_dev *pdev; + struct pci_dev *pdev = dev->port; - ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); - if (!ctrl) { - err("%s : out of memory\n", __func__); + if (pciehp_force) + dbg("Bypassing BIOS check for pciehp use on %s\n", + pci_name(pdev)); + else if (pciehp_get_hp_hw_control_from_firmware(pdev)) goto err_out_none; - } - INIT_LIST_HEAD(&ctrl->slot_list); - - pdev = dev->port; - ctrl->pci_dev = pdev; - rc = pcie_init(ctrl, dev); - if (rc) { + ctrl = pcie_init(dev); + if (!ctrl) { dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); - goto err_out_free_ctrl; + goto err_out_none; } - - pci_set_drvdata(pdev, ctrl); - - dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", - __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), pdev->irq); + set_service_data(dev, ctrl); /* Setup the slot information structures */ rc = init_slots(ctrl); if (rc) { - err("%s: slot initialization failed\n", PCIE_MODULE_NAME); + if (rc == -EBUSY) + warn("%s: slot already registered by another " + "hotplug driver\n", PCIE_MODULE_NAME); + else + err("%s: slot initialization failed\n", + PCIE_MODULE_NAME); goto err_out_release_ctlr; } @@ -495,20 +439,16 @@ err_out_free_ctrl_slot: cleanup_slots(ctrl); err_out_release_ctlr: ctrl->hpc_ops->release_ctlr(ctrl); -err_out_free_ctrl: - kfree(ctrl); err_out_none: return -ENODEV; } static void pciehp_remove (struct pcie_device *dev) { - struct pci_dev *pdev = dev->port; - struct controller *ctrl = pci_get_drvdata(pdev); + struct controller *ctrl = get_service_data(dev); cleanup_slots(ctrl); ctrl->hpc_ops->release_ctlr(ctrl); - kfree(ctrl); } #ifdef CONFIG_PM @@ -522,13 +462,12 @@ static int pciehp_resume (struct pcie_device *dev) { printk("%s ENTRY\n", __func__); if (pciehp_force) { - struct pci_dev *pdev = dev->port; - struct controller *ctrl = pci_get_drvdata(pdev); + struct controller *ctrl = get_service_data(dev); struct slot *t_slot; u8 status; /* reinitialize the chipset's event detection logic */ - pcie_init_hardware_part2(ctrl, dev); + pcie_enable_notification(ctrl); t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 79f104963166..1323a43285d7 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -247,30 +247,32 @@ static inline void pciehp_free_irq(struct controller *ctrl) free_irq(ctrl->pci_dev->irq, ctrl); } -static inline int pcie_poll_cmd(struct controller *ctrl) +static int pcie_poll_cmd(struct controller *ctrl) { u16 slot_status; int timeout = 1000; - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) - if (slot_status & CMD_COMPLETED) - goto completed; - for (timeout = 1000; timeout > 0; timeout -= 100) { - msleep(100); - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) - if (slot_status & CMD_COMPLETED) - goto completed; + if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { + if (slot_status & CMD_COMPLETED) { + pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); + return 1; + } + } + while (timeout > 1000) { + msleep(10); + timeout -= 10; + if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { + if (slot_status & CMD_COMPLETED) { + pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); + return 1; + } + } } return 0; /* timeout */ - -completed: - pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); - return timeout; } -static inline int pcie_wait_cmd(struct controller *ctrl, int poll) +static void pcie_wait_cmd(struct controller *ctrl, int poll) { - int retval = 0; unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; unsigned long timeout = msecs_to_jiffies(msecs); int rc; @@ -278,16 +280,9 @@ static inline int pcie_wait_cmd(struct controller *ctrl, int poll) if (poll) rc = pcie_poll_cmd(ctrl); else - rc = wait_event_interruptible_timeout(ctrl->queue, - !ctrl->cmd_busy, timeout); + rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); if (!rc) dbg("Command not completed in 1000 msec\n"); - else if (rc < 0) { - retval = -EINTR; - info("Command was interrupted by a signal\n"); - } - - return retval; } /** @@ -342,10 +337,6 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) slot_ctrl &= ~mask; slot_ctrl |= (cmd & mask); - /* Don't enable command completed if caller is changing it. */ - if (!(mask & CMD_CMPL_INTR_ENABLE)) - slot_ctrl |= CMD_CMPL_INTR_ENABLE; - ctrl->cmd_busy = 1; smp_mb(); retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); @@ -365,7 +356,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) if (!(slot_ctrl & HP_INTR_ENABLE) || !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) poll = 1; - retval = pcie_wait_cmd(ctrl, poll); + pcie_wait_cmd(ctrl, poll); } out: mutex_unlock(&ctrl->ctrl_lock); @@ -614,23 +605,6 @@ static void hpc_set_green_led_blink(struct slot *slot) __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } -static void hpc_release_ctlr(struct controller *ctrl) -{ - /* Mask Hot-plug Interrupt Enable */ - if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) - err("%s: Cannot mask hotplut interrupt enable\n", __func__); - - /* Free interrupt handler or interrupt polling timer */ - pciehp_free_irq(ctrl); - - /* - * If this is the last controller to be released, destroy the - * pciehp work queue - */ - if (atomic_dec_and_test(&pciehp_num_controllers)) - destroy_workqueue(pciehp_wq); -} - static int hpc_power_on_slot(struct slot * slot) { struct controller *ctrl = slot->ctrl; @@ -785,7 +759,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) intr_loc |= detected; if (!intr_loc) return IRQ_NONE; - if (pciehp_writew(ctrl, SLOTSTATUS, detected)) { + if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { err("%s: Cannot write to SLOTSTATUS\n", __func__); return IRQ_NONE; } @@ -797,25 +771,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) if (intr_loc & CMD_COMPLETED) { ctrl->cmd_busy = 0; smp_mb(); - wake_up_interruptible(&ctrl->queue); + wake_up(&ctrl->queue); } if (!(intr_loc & ~CMD_COMPLETED)) return IRQ_HANDLED; - /* - * Return without handling events if this handler routine is - * called before controller initialization is done. This may - * happen if hotplug event or another interrupt that shares - * the IRQ with pciehp arrives before slot initialization is - * done after interrupt handler is registered. - * - * FIXME - Need more structural fixes. We need to be ready to - * handle the event before installing interrupt handler. - */ p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); - if (!p_slot || !p_slot->hpc_ops) - return IRQ_HANDLED; /* Check MRL Sensor Changed */ if (intr_loc & MRL_SENS_CHANGED) @@ -992,6 +954,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot, return retval; } +static void pcie_release_ctrl(struct controller *ctrl); static struct hpc_ops pciehp_hpc_ops = { .power_on_slot = hpc_power_on_slot, .power_off_slot = hpc_power_off_slot, @@ -1013,97 +976,11 @@ static struct hpc_ops pciehp_hpc_ops = { .green_led_off = hpc_set_green_led_off, .green_led_blink = hpc_set_green_led_blink, - .release_ctlr = hpc_release_ctlr, + .release_ctlr = pcie_release_ctrl, .check_lnk_status = hpc_check_lnk_status, }; -#ifdef CONFIG_ACPI -static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev) -{ - acpi_status status; - acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); - struct pci_dev *pdev = dev; - struct pci_bus *parent; - struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; - - /* - * Per PCI firmware specification, we should run the ACPI _OSC - * method to get control of hotplug hardware before using it. - * If an _OSC is missing, we look for an OSHP to do the same thing. - * To handle different BIOS behavior, we look for _OSC and OSHP - * within the scope of the hotplug controller and its parents, upto - * the host bridge under which this controller exists. - */ - while (!handle) { - /* - * This hotplug controller was not listed in the ACPI name - * space at all. Try to get acpi handle of parent pci bus. - */ - if (!pdev || !pdev->bus->parent) - break; - parent = pdev->bus->parent; - dbg("Could not find %s in acpi namespace, trying parent\n", - pci_name(pdev)); - if (!parent->self) - /* Parent must be a host bridge */ - handle = acpi_get_pci_rootbridge_handle( - pci_domain_nr(parent), - parent->number); - else - handle = DEVICE_ACPI_HANDLE( - &(parent->self->dev)); - pdev = parent->self; - } - - while (handle) { - acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); - dbg("Trying to get hotplug control for %s \n", - (char *)string.pointer); - status = pci_osc_control_set(handle, - OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | - OSC_PCI_EXPRESS_NATIVE_HP_CONTROL); - if (status == AE_NOT_FOUND) - status = acpi_run_oshp(handle); - if (ACPI_SUCCESS(status)) { - dbg("Gained control for hotplug HW for pci %s (%s)\n", - pci_name(dev), (char *)string.pointer); - kfree(string.pointer); - return 0; - } - if (acpi_root_bridge(handle)) - break; - chandle = handle; - status = acpi_get_parent(chandle, &handle); - if (ACPI_FAILURE(status)) - break; - } - - dbg("Cannot get control of hotplug hardware for pci %s\n", - pci_name(dev)); - - kfree(string.pointer); - return -1; -} -#endif - -static int pcie_init_hardware_part1(struct controller *ctrl, - struct pcie_device *dev) -{ - /* Clear all remaining event bits in Slot Status register */ - if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) { - err("%s: Cannot write to SLOTSTATUS register\n", __func__); - return -1; - } - - /* Mask Hot-plug Interrupt Enable */ - if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) { - err("%s: Cannot mask hotplug interrupt enable\n", __func__); - return -1; - } - return 0; -} - -int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev) +int pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; @@ -1115,30 +992,83 @@ int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev) if (MRL_SENS(ctrl)) cmd |= MRL_DETECT_ENABLE; if (!pciehp_poll_mode) - cmd |= HP_INTR_ENABLE; + cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; - mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | - PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE; + mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | + PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; if (pcie_write_cmd(ctrl, cmd, mask)) { err("%s: Cannot enable software notification\n", __func__); - goto abort; + return -1; } + return 0; +} - if (pciehp_force) - dbg("Bypassing BIOS check for pciehp use on %s\n", - pci_name(ctrl->pci_dev)); - else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev)) - goto abort_disable_intr; +static void pcie_disable_notification(struct controller *ctrl) +{ + u16 mask; + mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | + PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; + if (pcie_write_cmd(ctrl, 0, mask)) + warn("%s: Cannot disable software notification\n", __func__); +} +static int pcie_init_notification(struct controller *ctrl) +{ + if (pciehp_request_irq(ctrl)) + return -1; + if (pcie_enable_notification(ctrl)) { + pciehp_free_irq(ctrl); + return -1; + } return 0; +} - /* We end up here for the many possible ways to fail this API. */ -abort_disable_intr: - if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE)) - err("%s : disabling interrupts failed\n", __func__); -abort: - return -1; +static void pcie_shutdown_notification(struct controller *ctrl) +{ + pcie_disable_notification(ctrl); + pciehp_free_irq(ctrl); +} + +static void make_slot_name(struct slot *slot) +{ + if (pciehp_slot_with_bus) + snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d", + slot->bus, slot->number); + else + snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); +} + +static int pcie_init_slot(struct controller *ctrl) +{ + struct slot *slot; + + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) + return -ENOMEM; + + slot->hp_slot = 0; + slot->ctrl = ctrl; + slot->bus = ctrl->pci_dev->subordinate->number; + slot->device = ctrl->slot_device_offset + slot->hp_slot; + slot->hpc_ops = ctrl->hpc_ops; + slot->number = ctrl->first_slot; + make_slot_name(slot); + mutex_init(&slot->lock); + INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); + list_add(&slot->slot_list, &ctrl->slot_list); + return 0; +} + +static void pcie_cleanup_slot(struct controller *ctrl) +{ + struct slot *slot; + slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list); + list_del(&slot->slot_list); + cancel_delayed_work(&slot->work); + flush_scheduled_work(); + flush_workqueue(pciehp_wq); + kfree(slot); } static inline void dbg_ctrl(struct controller *ctrl) @@ -1176,15 +1106,23 @@ static inline void dbg_ctrl(struct controller *ctrl) dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); pciehp_readw(ctrl, SLOTSTATUS, ®16); dbg("Slot Status : 0x%04x\n", reg16); - pciehp_readw(ctrl, SLOTSTATUS, ®16); + pciehp_readw(ctrl, SLOTCTRL, ®16); dbg("Slot Control : 0x%04x\n", reg16); } -int pcie_init(struct controller *ctrl, struct pcie_device *dev) +struct controller *pcie_init(struct pcie_device *dev) { + struct controller *ctrl; u32 slot_cap; struct pci_dev *pdev = dev->port; + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + err("%s : out of memory\n", __func__); + goto abort; + } + INIT_LIST_HEAD(&ctrl->slot_list); + ctrl->pci_dev = pdev; ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!ctrl->cap_base) { @@ -1215,15 +1153,12 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev) !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) ctrl->no_cmd_complete = 1; - info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", - pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device); + /* Clear all remaining event bits in Slot Status register */ + if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) + goto abort_ctrl; - if (pcie_init_hardware_part1(ctrl, dev)) - goto abort; - - if (pciehp_request_irq(ctrl)) - goto abort; + /* Disable sotfware notification */ + pcie_disable_notification(ctrl); /* * If this is the first controller to be initialized, @@ -1231,18 +1166,39 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev) */ if (atomic_add_return(1, &pciehp_num_controllers) == 1) { pciehp_wq = create_singlethread_workqueue("pciehpd"); - if (!pciehp_wq) { - goto abort_free_irq; - } + if (!pciehp_wq) + goto abort_ctrl; } - if (pcie_init_hardware_part2(ctrl, dev)) - goto abort_free_irq; + info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + if (pcie_init_slot(ctrl)) + goto abort_ctrl; - return 0; + if (pcie_init_notification(ctrl)) + goto abort_slot; -abort_free_irq: - pciehp_free_irq(ctrl); + return ctrl; + +abort_slot: + pcie_cleanup_slot(ctrl); +abort_ctrl: + kfree(ctrl); abort: - return -1; + return NULL; +} + +void pcie_release_ctrl(struct controller *ctrl) +{ + pcie_shutdown_notification(ctrl); + pcie_cleanup_slot(ctrl); + /* + * If this is the last controller to be released, destroy the + * pciehp work queue + */ + if (atomic_dec_and_test(&pciehp_num_controllers)) + destroy_workqueue(pciehp_wq); + kfree(ctrl); } diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index 779c5db71be4..a796301ea03f 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -14,8 +14,10 @@ */ #include <linux/kobject.h> #include <linux/string.h> +#include <linux/pci.h> #include <linux/pci_hotplug.h> #include "rpadlpar.h" +#include "../pci.h" #define DLPAR_KOBJ_NAME "control" @@ -27,7 +29,6 @@ #define MAX_DRC_NAME_LEN 64 - static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t nbytes) { @@ -112,7 +113,7 @@ int dlpar_sysfs_init(void) int error; dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME, - &pci_hotplug_slots_kset->kobj); + &pci_slots_kset->kobj); if (!dlpar_kobj) return -EINVAL; diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 56197b600d36..9b714ea93d20 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -33,33 +33,6 @@ #include <asm/rtas.h> #include "rpaphp.h" -static ssize_t address_read_file (struct hotplug_slot *php_slot, char *buf) -{ - int retval; - struct slot *slot = (struct slot *)php_slot->private; - struct pci_bus *bus; - - if (!slot) - return -ENOENT; - - bus = slot->bus; - if (!bus) - return -ENOENT; - - if (bus->self) - retval = sprintf(buf, pci_name(bus->self)); - else - retval = sprintf(buf, "%04x:%02x:00.0", - pci_domain_nr(bus), bus->number); - - return retval; -} - -static struct hotplug_slot_attribute php_attr_address = { - .attr = {.name = "address", .mode = S_IFREG | S_IRUGO}, - .show = address_read_file, -}; - /* free up the memory used by a slot */ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot) { @@ -135,9 +108,6 @@ int rpaphp_deregister_slot(struct slot *slot) list_del(&slot->rpaphp_slot_list); - /* remove "address" file */ - sysfs_remove_file(&php_slot->kobj, &php_attr_address.attr); - retval = pci_hp_deregister(php_slot); if (retval) err("Problem unregistering a slot %s\n", slot->name); @@ -151,6 +121,7 @@ int rpaphp_register_slot(struct slot *slot) { struct hotplug_slot *php_slot = slot->hotplug_slot; int retval; + int slotno; dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", __func__, slot->dn->full_name, slot->index, slot->name, @@ -162,19 +133,16 @@ int rpaphp_register_slot(struct slot *slot) return -EAGAIN; } - retval = pci_hp_register(php_slot); + if (slot->dn->child) + slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); + else + slotno = -1; + retval = pci_hp_register(php_slot, slot->bus, slotno); if (retval) { err("pci_hp_register failed with error %d\n", retval); return retval; } - /* create "address" file */ - retval = sysfs_create_file(&php_slot->kobj, &php_attr_address.attr); - if (retval) { - err("sysfs_create_file failed with error %d\n", retval); - goto sysfs_fail; - } - /* add slot to our internal list */ list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); info("Slot [%s] registered\n", slot->name); diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index 2fe37cd85b69..410fe0394a8e 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -197,13 +197,15 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, static struct hotplug_slot * sn_hp_destroy(void) { struct slot *slot; + struct pci_slot *pci_slot; struct hotplug_slot *bss_hotplug_slot = NULL; list_for_each_entry(slot, &sn_hp_list, hp_list) { bss_hotplug_slot = slot->hotplug_slot; + pci_slot = bss_hotplug_slot->pci_slot; list_del(&((struct slot *)bss_hotplug_slot->private)-> hp_list); - sysfs_remove_file(&bss_hotplug_slot->kobj, + sysfs_remove_file(&pci_slot->kobj, &sn_slot_path_attr.attr); break; } @@ -614,6 +616,7 @@ static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) static int sn_hotplug_slot_register(struct pci_bus *pci_bus) { int device; + struct pci_slot *pci_slot; struct hotplug_slot *bss_hotplug_slot; int rc = 0; @@ -650,11 +653,12 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) bss_hotplug_slot->ops = &sn_hotplug_slot_ops; bss_hotplug_slot->release = &sn_release_slot; - rc = pci_hp_register(bss_hotplug_slot); + rc = pci_hp_register(bss_hotplug_slot, pci_bus, device); if (rc) goto register_err; - rc = sysfs_create_file(&bss_hotplug_slot->kobj, + pci_slot = bss_hotplug_slot->pci_slot; + rc = sysfs_create_file(&pci_slot->kobj, &sn_slot_path_attr.attr); if (rc) goto register_err; @@ -664,7 +668,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) register_err: dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n", - rc); + rc); alloc_err: if (rc == -ENOMEM) diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index f66e8d6315ab..8a026f750deb 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -170,6 +170,7 @@ extern void shpchp_queue_pushbutton_work(struct work_struct *work); extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); #ifdef CONFIG_ACPI +#include <linux/pci-acpi.h> static inline int get_hp_params_from_firmware(struct pci_dev *dev, struct hotplug_params *hpp) { @@ -177,14 +178,15 @@ static inline int get_hp_params_from_firmware(struct pci_dev *dev, return -ENODEV; return 0; } -#define get_hp_hw_control_from_firmware(pdev) \ - do { \ - if (DEVICE_ACPI_HANDLE(&(pdev->dev))) \ - acpi_run_oshp(DEVICE_ACPI_HANDLE(&(pdev->dev)));\ - } while (0) + +static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev) +{ + u32 flags = OSC_SHPC_NATIVE_HP_CONTROL; + return acpi_get_hp_hw_control_from_firmware(dev, flags); +} #else #define get_hp_params_from_firmware(dev, hpp) (-ENODEV) -#define get_hp_hw_control_from_firmware(dev) do { } while (0) +#define get_hp_hw_control_from_firmware(dev) (0) #endif struct ctrl_reg { diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 97848654652a..a8cbd039b85b 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -39,7 +39,7 @@ int shpchp_debug; int shpchp_poll_mode; int shpchp_poll_time; -int shpchp_slot_with_bus; +static int shpchp_slot_with_bus; struct workqueue_struct *shpchp_wq; #define DRIVER_VERSION "0.4" @@ -68,7 +68,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value); -static int get_address (struct hotplug_slot *slot, u32 *value); static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); @@ -81,7 +80,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, - .get_address = get_address, .get_max_bus_speed = get_max_bus_speed, .get_cur_bus_speed = get_cur_bus_speed, }; @@ -159,7 +157,8 @@ static int init_slots(struct controller *ctrl) dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " "slot_device_offset=%x\n", slot->bus, slot->device, slot->hp_slot, slot->number, ctrl->slot_device_offset); - retval = pci_hp_register(slot->hotplug_slot); + retval = pci_hp_register(slot->hotplug_slot, + ctrl->pci_dev->subordinate, slot->device); if (retval) { err("pci_hp_register failed with error %d\n", retval); if (retval == -EEXIST) @@ -288,19 +287,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) return 0; } -static int get_address (struct hotplug_slot *hotplug_slot, u32 *value) -{ - struct slot *slot = get_slot(hotplug_slot); - struct pci_bus *bus = slot->ctrl->pci_dev->subordinate; - - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); - - *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device; - - return 0; -} - -static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) +static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, + enum pci_bus_speed *value) { struct slot *slot = get_slot(hotplug_slot); int retval; @@ -330,13 +318,14 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp static int is_shpc_capable(struct pci_dev *dev) { - if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == - PCI_DEVICE_ID_AMD_GOLAM_7450)) - return 1; - if (pci_find_capability(dev, PCI_CAP_ID_SHPC)) - return 1; - - return 0; + if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == + PCI_DEVICE_ID_AMD_GOLAM_7450)) + return 1; + if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) + return 0; + if (get_hp_hw_control_from_firmware(dev)) + return 0; + return 1; } static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 7d770b2cd889..7a0bff364cd4 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -1084,7 +1084,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->irq); - get_hp_hw_control_from_firmware(pdev); /* * If this is the first controller to be initialized, diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index bb0642318a95..3f7b81c065d2 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1748,7 +1748,6 @@ int __init init_dmars(void) deferred_flush = kzalloc(g_num_of_iommus * sizeof(struct deferred_flush_tables), GFP_KERNEL); if (!deferred_flush) { - kfree(g_iommus); ret = -ENOMEM; goto error; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 8c61304cbb37..15af618d36e2 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -70,12 +70,10 @@ arch_teardown_msi_irqs(struct pci_dev *dev) } } -static void msi_set_enable(struct pci_dev *dev, int enable) +static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) { - int pos; u16 control; - pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; @@ -85,6 +83,11 @@ static void msi_set_enable(struct pci_dev *dev, int enable) } } +static void msi_set_enable(struct pci_dev *dev, int enable) +{ + __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); +} + static void msix_set_enable(struct pci_dev *dev, int enable) { int pos; @@ -141,7 +144,8 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) mask_bits |= flag & mask; pci_write_config_dword(entry->dev, pos, mask_bits); } else { - msi_set_enable(entry->dev, !flag); + __msi_set_enable(entry->dev, entry->msi_attrib.pos, + !flag); } break; case PCI_CAP_ID_MSIX: @@ -561,9 +565,8 @@ int pci_enable_msi(struct pci_dev* dev) /* Check whether driver already requested for MSI-X irqs */ if (dev->msix_enabled) { - printk(KERN_INFO "PCI: %s: Can't enable MSI. " - "Device already has MSI-X enabled\n", - pci_name(dev)); + dev_info(&dev->dev, "can't enable MSI " + "(MSI-X already enabled)\n"); return -EINVAL; } status = msi_capability_init(dev); @@ -686,9 +689,8 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) /* Check whether driver already requested for MSI irq */ if (dev->msi_enabled) { - printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " - "Device already has an MSI irq assigned\n", - pci_name(dev)); + dev_info(&dev->dev, "can't enable MSI-X " + "(MSI IRQ already assigned)\n"); return -EINVAL; } status = msix_capability_init(dev, entries, nvec); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 9d6fc8e6285d..7764768b6a0e 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -21,12 +21,19 @@ struct acpi_osc_data { acpi_handle handle; - u32 ctrlset_buf[3]; - u32 global_ctrlsets; + u32 support_set; + u32 control_set; + int is_queried; + u32 query_result; struct list_head sibiling; }; static LIST_HEAD(acpi_osc_data_list); +struct acpi_osc_args { + u32 capbuf[3]; + u32 query_result; +}; + static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) { struct acpi_osc_data *data; @@ -44,42 +51,18 @@ static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) return data; } -static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; +static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, + 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; -static acpi_status -acpi_query_osc ( - acpi_handle handle, - u32 level, - void *context, - void **retval ) +static acpi_status acpi_run_osc(acpi_handle handle, + struct acpi_osc_args *osc_args) { - acpi_status status; - struct acpi_object_list input; - union acpi_object in_params[4]; - struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; - union acpi_object *out_obj; - u32 osc_dw0; - acpi_status *ret_status = (acpi_status *)retval; - struct acpi_osc_data *osc_data; - u32 flags = (unsigned long)context, temp; - acpi_handle tmp; - - status = acpi_get_handle(handle, "_OSC", &tmp); - if (ACPI_FAILURE(status)) - return status; - - osc_data = acpi_get_osc_data(handle); - if (!osc_data) { - printk(KERN_ERR "acpi osc data array is full\n"); - return AE_ERROR; - } - - osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS); - - /* do _OSC query for all possible controls */ - temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE]; - osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; + acpi_status status; + struct acpi_object_list input; + union acpi_object in_params[4]; + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *out_obj; + u32 osc_dw0, flags = osc_args->capbuf[OSC_QUERY_TYPE]; /* Setting up input parameters */ input.count = 4; @@ -93,20 +76,19 @@ acpi_query_osc ( in_params[2].integer.value = 3; in_params[3].type = ACPI_TYPE_BUFFER; in_params[3].buffer.length = 12; - in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf; + in_params[3].buffer.pointer = (u8 *)osc_args->capbuf; status = acpi_evaluate_object(handle, "_OSC", &input, &output); if (ACPI_FAILURE(status)) - goto out_nofree; - out_obj = output.pointer; + return status; + out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) { - printk(KERN_DEBUG - "Evaluate _OSC returns wrong type\n"); + printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n"); status = AE_TYPE; - goto query_osc_out; + goto out_kfree; } - osc_dw0 = *((u32 *) out_obj->buffer.pointer); + osc_dw0 = *((u32 *)out_obj->buffer.pointer); if (osc_dw0) { if (osc_dw0 & OSC_REQUEST_ERROR) printk(KERN_DEBUG "_OSC request fails\n"); @@ -115,93 +97,58 @@ acpi_query_osc ( if (osc_dw0 & OSC_INVALID_REVISION_ERROR) printk(KERN_DEBUG "_OSC invalid revision\n"); if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { - /* Update Global Control Set */ - osc_data->global_ctrlsets = - *((u32 *)(out_obj->buffer.pointer + 8)); - status = AE_OK; - goto query_osc_out; + if (flags & OSC_QUERY_ENABLE) + goto out_success; + printk(KERN_DEBUG "_OSC FW not grant req. control\n"); + status = AE_SUPPORT; + goto out_kfree; } status = AE_ERROR; - goto query_osc_out; + goto out_kfree; } - - /* Update Global Control Set */ - osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); +out_success: + if (flags & OSC_QUERY_ENABLE) + osc_args->query_result = + *((u32 *)(out_obj->buffer.pointer + 8)); status = AE_OK; -query_osc_out: +out_kfree: kfree(output.pointer); -out_nofree: - *ret_status = status; - - osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE; - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp; - if (ACPI_FAILURE(status)) { - /* no osc support at all */ - osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0; - } - return status; } - -static acpi_status -acpi_run_osc ( - acpi_handle handle, - void *context) +static acpi_status acpi_query_osc(acpi_handle handle, + u32 level, void *context, void **retval) { - acpi_status status; - struct acpi_object_list input; - union acpi_object in_params[4]; - struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; - union acpi_object *out_obj; - u32 osc_dw0; - - /* Setting up input parameters */ - input.count = 4; - input.pointer = in_params; - in_params[0].type = ACPI_TYPE_BUFFER; - in_params[0].buffer.length = 16; - in_params[0].buffer.pointer = OSC_UUID; - in_params[1].type = ACPI_TYPE_INTEGER; - in_params[1].integer.value = 1; - in_params[2].type = ACPI_TYPE_INTEGER; - in_params[2].integer.value = 3; - in_params[3].type = ACPI_TYPE_BUFFER; - in_params[3].buffer.length = 12; - in_params[3].buffer.pointer = (u8 *)context; + acpi_status status; + struct acpi_osc_data *osc_data; + u32 flags = (unsigned long)context, support_set; + acpi_handle tmp; + struct acpi_osc_args osc_args; - status = acpi_evaluate_object(handle, "_OSC", &input, &output); - if (ACPI_FAILURE (status)) + status = acpi_get_handle(handle, "_OSC", &tmp); + if (ACPI_FAILURE(status)) return status; - out_obj = output.pointer; - if (out_obj->type != ACPI_TYPE_BUFFER) { - printk(KERN_DEBUG - "Evaluate _OSC returns wrong type\n"); - status = AE_TYPE; - goto run_osc_out; + osc_data = acpi_get_osc_data(handle); + if (!osc_data) { + printk(KERN_ERR "acpi osc data array is full\n"); + return AE_ERROR; } - osc_dw0 = *((u32 *) out_obj->buffer.pointer); - if (osc_dw0) { - if (osc_dw0 & OSC_REQUEST_ERROR) - printk(KERN_DEBUG "_OSC request fails\n"); - if (osc_dw0 & OSC_INVALID_UUID_ERROR) - printk(KERN_DEBUG "_OSC invalid UUID\n"); - if (osc_dw0 & OSC_INVALID_REVISION_ERROR) - printk(KERN_DEBUG "_OSC invalid revision\n"); - if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { - printk(KERN_DEBUG "_OSC FW not grant req. control\n"); - status = AE_SUPPORT; - goto run_osc_out; - } - status = AE_ERROR; - goto run_osc_out; + + /* do _OSC query for all possible controls */ + support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS); + osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; + osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; + osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; + + status = acpi_run_osc(handle, &osc_args); + if (ACPI_SUCCESS(status)) { + osc_data->support_set = support_set; + osc_data->query_result = osc_args.query_result; + osc_data->is_queried = 1; } - status = AE_OK; -run_osc_out: - kfree(output.pointer); return status; } @@ -215,15 +162,11 @@ run_osc_out: **/ acpi_status __pci_osc_support_set(u32 flags, const char *hid) { - acpi_status retval = AE_NOT_FOUND; - - if (!(flags & OSC_SUPPORT_MASKS)) { + if (!(flags & OSC_SUPPORT_MASKS)) return AE_TYPE; - } - acpi_get_devices(hid, - acpi_query_osc, - (void *)(unsigned long)flags, - (void **) &retval ); + + acpi_get_devices(hid, acpi_query_osc, + (void *)(unsigned long)flags, NULL); return AE_OK; } @@ -236,10 +179,11 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid) **/ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) { - acpi_status status; - u32 ctrlset; + acpi_status status; + u32 ctrlset, control_set; acpi_handle tmp; struct acpi_osc_data *osc_data; + struct acpi_osc_args osc_args; status = acpi_get_handle(handle, "_OSC", &tmp); if (ACPI_FAILURE(status)) @@ -252,24 +196,25 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) } ctrlset = (flags & OSC_CONTROL_MASKS); - if (!ctrlset) { + if (!ctrlset) return AE_TYPE; - } - if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] && - ((osc_data->global_ctrlsets & ctrlset) != ctrlset)) { + + if (osc_data->is_queried && + ((osc_data->query_result & ctrlset) != ctrlset)) return AE_SUPPORT; - } - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; - status = acpi_run_osc(handle, osc_data->ctrlset_buf); - if (ACPI_FAILURE (status)) { - osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; - } - + + control_set = osc_data->control_set | ctrlset; + osc_args.capbuf[OSC_QUERY_TYPE] = 0; + osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set; + osc_args.capbuf[OSC_CONTROL_TYPE] = control_set; + status = acpi_run_osc(handle, &osc_args); + if (ACPI_SUCCESS(status)) + osc_data->control_set = control_set; + return status; } EXPORT_SYMBOL(pci_osc_control_set); -#ifdef CONFIG_ACPI_SLEEP /* * _SxD returns the D-state with the highest power * (lowest D-state number) supported in the S-state "x". @@ -293,13 +238,11 @@ EXPORT_SYMBOL(pci_osc_control_set); * choose highest power _SxD or any lower power */ -static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev, - pm_message_t state) +static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) { int acpi_state; - acpi_state = acpi_pm_device_sleep_state(&pdev->dev, - device_may_wakeup(&pdev->dev), NULL); + acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL); if (acpi_state < 0) return PCI_POWER_ERROR; @@ -315,7 +258,13 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev, } return PCI_POWER_ERROR; } -#endif + +static bool acpi_pci_power_manageable(struct pci_dev *dev) +{ + acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + + return handle ? acpi_bus_power_manageable(handle) : false; +} static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { @@ -328,12 +277,11 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) [PCI_D3hot] = ACPI_STATE_D3, [PCI_D3cold] = ACPI_STATE_D3 }; + int error = -EINVAL; - if (!handle) - return -ENODEV; /* If the ACPI device has _EJ0, ignore the device */ - if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) - return 0; + if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) + return -ENODEV; switch (state) { case PCI_D0: @@ -341,11 +289,41 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) case PCI_D2: case PCI_D3hot: case PCI_D3cold: - return acpi_bus_set_power(handle, state_conv[state]); + error = acpi_bus_set_power(handle, state_conv[state]); } - return -EINVAL; + + if (!error) + dev_printk(KERN_INFO, &dev->dev, + "power state changed by ACPI to D%d\n", state); + + return error; +} + +static bool acpi_pci_can_wakeup(struct pci_dev *dev) +{ + acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); + + return handle ? acpi_bus_can_wakeup(handle) : false; +} + +static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) +{ + int error = acpi_pm_device_sleep_wake(&dev->dev, enable); + + if (!error) + dev_printk(KERN_INFO, &dev->dev, + "wake-up capability %s by ACPI\n", + enable ? "enabled" : "disabled"); + return error; } +static struct pci_platform_pm_ops acpi_pci_platform_pm = { + .is_manageable = acpi_pci_power_manageable, + .set_state = acpi_pci_set_power_state, + .choose_state = acpi_pci_choose_state, + .can_wakeup = acpi_pci_can_wakeup, + .sleep_wake = acpi_pci_sleep_wake, +}; /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) @@ -397,10 +375,7 @@ static int __init acpi_pci_init(void) ret = register_acpi_bus_type(&acpi_pci_bus); if (ret) return 0; -#ifdef CONFIG_ACPI_SLEEP - platform_pci_choose_state = acpi_pci_choose_state; -#endif - platform_pci_set_power_state = acpi_pci_set_power_state; + pci_set_platform_pm(&acpi_pci_platform_pm); return 0; } arch_initcall(acpi_pci_init); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index e1637bd82b8e..a13f53486114 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -274,7 +274,57 @@ static int pci_device_remove(struct device * dev) return 0; } -static int pci_device_suspend(struct device * dev, pm_message_t state) +static void pci_device_shutdown(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + + if (drv && drv->shutdown) + drv->shutdown(pci_dev); + pci_msi_shutdown(pci_dev); + pci_msix_shutdown(pci_dev); +} + +#ifdef CONFIG_PM_SLEEP + +/* + * Default "suspend" method for devices that have no driver provided suspend, + * or not even a driver at all. + */ +static void pci_default_pm_suspend(struct pci_dev *pci_dev) +{ + pci_save_state(pci_dev); + /* + * mark its power state as "unknown", since we don't know if + * e.g. the BIOS will change its device state when we suspend. + */ + if (pci_dev->current_state == PCI_D0) + pci_dev->current_state = PCI_UNKNOWN; +} + +/* + * Default "resume" method for devices that have no driver provided resume, + * or not even a driver at all. + */ +static int pci_default_pm_resume(struct pci_dev *pci_dev) +{ + int retval = 0; + + /* restore the PCI config space */ + pci_restore_state(pci_dev); + /* if the device was enabled before suspend, reenable */ + retval = pci_reenable_device(pci_dev); + /* + * if the device was busmaster before the suspend, make it busmaster + * again + */ + if (pci_dev->is_busmaster) + pci_set_master(pci_dev); + + return retval; +} + +static int pci_legacy_suspend(struct device *dev, pm_message_t state) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; @@ -284,18 +334,12 @@ static int pci_device_suspend(struct device * dev, pm_message_t state) i = drv->suspend(pci_dev, state); suspend_report_result(drv->suspend, i); } else { - pci_save_state(pci_dev); - /* - * mark its power state as "unknown", since we don't know if - * e.g. the BIOS will change its device state when we suspend. - */ - if (pci_dev->current_state == PCI_D0) - pci_dev->current_state = PCI_UNKNOWN; + pci_default_pm_suspend(pci_dev); } return i; } -static int pci_device_suspend_late(struct device * dev, pm_message_t state) +static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; @@ -308,26 +352,7 @@ static int pci_device_suspend_late(struct device * dev, pm_message_t state) return i; } -/* - * Default resume method for devices that have no driver provided resume, - * or not even a driver at all. - */ -static int pci_default_resume(struct pci_dev *pci_dev) -{ - int retval = 0; - - /* restore the PCI config space */ - pci_restore_state(pci_dev); - /* if the device was enabled before suspend, reenable */ - retval = pci_reenable_device(pci_dev); - /* if the device was busmaster before the suspend, make it busmaster again */ - if (pci_dev->is_busmaster) - pci_set_master(pci_dev); - - return retval; -} - -static int pci_device_resume(struct device * dev) +static int pci_legacy_resume(struct device *dev) { int error; struct pci_dev * pci_dev = to_pci_dev(dev); @@ -336,34 +361,313 @@ static int pci_device_resume(struct device * dev) if (drv && drv->resume) error = drv->resume(pci_dev); else - error = pci_default_resume(pci_dev); + error = pci_default_pm_resume(pci_dev); return error; } -static int pci_device_resume_early(struct device * dev) +static int pci_legacy_resume_early(struct device *dev) { int error = 0; struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; - pci_fixup_device(pci_fixup_resume, pci_dev); - if (drv && drv->resume_early) error = drv->resume_early(pci_dev); return error; } -static void pci_device_shutdown(struct device *dev) +static int pci_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm && drv->pm->prepare) + error = drv->pm->prepare(dev); + + return error; +} + +static void pci_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#ifdef CONFIG_SUSPEND + +static int pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->suspend) { + error = drv->pm->suspend(dev); + suspend_report_result(drv->pm->suspend, error); + } else { + pci_default_pm_suspend(pci_dev); + } + } else { + error = pci_legacy_suspend(dev, PMSG_SUSPEND); + } + pci_fixup_device(pci_fixup_suspend, pci_dev); + + return error; +} + +static int pci_pm_suspend_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; + int error = 0; - if (drv && drv->shutdown) - drv->shutdown(pci_dev); - pci_msi_shutdown(pci_dev); - pci_msix_shutdown(pci_dev); + if (drv && drv->pm) { + if (drv->pm->suspend_noirq) { + error = drv->pm->suspend_noirq(dev); + suspend_report_result(drv->pm->suspend_noirq, error); + } + } else { + error = pci_legacy_suspend_late(dev, PMSG_SUSPEND); + } + + return error; } +static int pci_pm_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error; + + pci_fixup_device(pci_fixup_resume, pci_dev); + + if (drv && drv->pm) { + error = drv->pm->resume ? drv->pm->resume(dev) : + pci_default_pm_resume(pci_dev); + } else { + error = pci_legacy_resume(dev); + } + + return error; +} + +static int pci_pm_resume_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + pci_fixup_device(pci_fixup_resume_early, pci_dev); + + if (drv && drv->pm) { + if (drv->pm->resume_noirq) + error = drv->pm->resume_noirq(dev); + } else { + error = pci_legacy_resume_early(dev); + } + + return error; +} + +#else /* !CONFIG_SUSPEND */ + +#define pci_pm_suspend NULL +#define pci_pm_suspend_noirq NULL +#define pci_pm_resume NULL +#define pci_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATION + +static int pci_pm_freeze(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->freeze) { + error = drv->pm->freeze(dev); + suspend_report_result(drv->pm->freeze, error); + } else { + pci_default_pm_suspend(pci_dev); + } + } else { + error = pci_legacy_suspend(dev, PMSG_FREEZE); + pci_fixup_device(pci_fixup_suspend, pci_dev); + } + + return error; +} + +static int pci_pm_freeze_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->freeze_noirq) { + error = drv->pm->freeze_noirq(dev); + suspend_report_result(drv->pm->freeze_noirq, error); + } + } else { + error = pci_legacy_suspend_late(dev, PMSG_FREEZE); + } + + return error; +} + +static int pci_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->thaw) + error = drv->pm->thaw(dev); + } else { + pci_fixup_device(pci_fixup_resume, to_pci_dev(dev)); + error = pci_legacy_resume(dev); + } + + return error; +} + +static int pci_pm_thaw_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->thaw_noirq) + error = drv->pm->thaw_noirq(dev); + } else { + pci_fixup_device(pci_fixup_resume_early, pci_dev); + error = pci_legacy_resume_early(dev); + } + + return error; +} + +static int pci_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int error = 0; + + pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); + + if (drv && drv->pm) { + if (drv->pm->poweroff) { + error = drv->pm->poweroff(dev); + suspend_report_result(drv->pm->poweroff, error); + } + } else { + error = pci_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return error; +} + +static int pci_pm_poweroff_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + if (drv && drv->pm) { + if (drv->pm->poweroff_noirq) { + error = drv->pm->poweroff_noirq(dev); + suspend_report_result(drv->pm->poweroff_noirq, error); + } + } else { + error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE); + } + + return error; +} + +static int pci_pm_restore(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct device_driver *drv = dev->driver; + int error; + + if (drv && drv->pm) { + error = drv->pm->restore ? drv->pm->restore(dev) : + pci_default_pm_resume(pci_dev); + } else { + error = pci_legacy_resume(dev); + } + pci_fixup_device(pci_fixup_resume, pci_dev); + + return error; +} + +static int pci_pm_restore_noirq(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + int error = 0; + + pci_fixup_device(pci_fixup_resume, pci_dev); + + if (drv && drv->pm) { + if (drv->pm->restore_noirq) + error = drv->pm->restore_noirq(dev); + } else { + error = pci_legacy_resume_early(dev); + } + pci_fixup_device(pci_fixup_resume_early, pci_dev); + + return error; +} + +#else /* !CONFIG_HIBERNATION */ + +#define pci_pm_freeze NULL +#define pci_pm_freeze_noirq NULL +#define pci_pm_thaw NULL +#define pci_pm_thaw_noirq NULL +#define pci_pm_poweroff NULL +#define pci_pm_poweroff_noirq NULL +#define pci_pm_restore NULL +#define pci_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATION */ + +struct pm_ext_ops pci_pm_ops = { + .base = { + .prepare = pci_pm_prepare, + .complete = pci_pm_complete, + .suspend = pci_pm_suspend, + .resume = pci_pm_resume, + .freeze = pci_pm_freeze, + .thaw = pci_pm_thaw, + .poweroff = pci_pm_poweroff, + .restore = pci_pm_restore, + }, + .suspend_noirq = pci_pm_suspend_noirq, + .resume_noirq = pci_pm_resume_noirq, + .freeze_noirq = pci_pm_freeze_noirq, + .thaw_noirq = pci_pm_thaw_noirq, + .poweroff_noirq = pci_pm_poweroff_noirq, + .restore_noirq = pci_pm_restore_noirq, +}; + +#define PCI_PM_OPS_PTR &pci_pm_ops + +#else /* !CONFIG_PM_SLEEP */ + +#define PCI_PM_OPS_PTR NULL + +#endif /* !CONFIG_PM_SLEEP */ + /** * __pci_register_driver - register a new pci driver * @drv: the driver structure to register @@ -386,6 +690,9 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, drv->driver.owner = owner; drv->driver.mod_name = mod_name; + if (drv->pm) + drv->driver.pm = &drv->pm->base; + spin_lock_init(&drv->dynids.lock); INIT_LIST_HEAD(&drv->dynids.list); @@ -511,12 +818,9 @@ struct bus_type pci_bus_type = { .uevent = pci_uevent, .probe = pci_device_probe, .remove = pci_device_remove, - .suspend = pci_device_suspend, - .suspend_late = pci_device_suspend_late, - .resume_early = pci_device_resume_early, - .resume = pci_device_resume, .shutdown = pci_device_shutdown, .dev_attrs = pci_dev_attrs, + .pm = PCI_PM_OPS_PTR, }; static int __init pci_driver_init(void) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e4548ab2a93c..44a46c92b721 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1,6 +1,4 @@ /* - * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $ - * * PCI Bus Services, see include/linux/pci.h for further explanation. * * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, @@ -19,6 +17,7 @@ #include <linux/string.h> #include <linux/log2.h> #include <linux/pci-aspm.h> +#include <linux/pm_wakeup.h> #include <asm/dma.h> /* isa_dma_bridge_buggy */ #include "pci.h" @@ -378,74 +377,90 @@ pci_restore_bars(struct pci_dev *dev) pci_update_resource(dev, &dev->resource[i], i); } -int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); +static struct pci_platform_pm_ops *pci_platform_pm; -/** - * pci_set_power_state - Set the power state of a PCI device - * @dev: PCI device to be suspended - * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering - * - * Transition a device to a new power state, using the Power Management - * Capabilities in the device's config space. - * - * RETURN VALUE: - * -EINVAL if trying to enter a lower state than we're already in. - * 0 if we're already in the requested state. - * -EIO if device does not support PCI PM. - * 0 if we can successfully change the power state. - */ -int -pci_set_power_state(struct pci_dev *dev, pci_power_t state) +int pci_set_platform_pm(struct pci_platform_pm_ops *ops) { - int pm, need_restore = 0; - u16 pmcsr, pmc; + if (!ops->is_manageable || !ops->set_state || !ops->choose_state + || !ops->sleep_wake || !ops->can_wakeup) + return -EINVAL; + pci_platform_pm = ops; + return 0; +} - /* bound the state we're entering */ - if (state > PCI_D3hot) - state = PCI_D3hot; +static inline bool platform_pci_power_manageable(struct pci_dev *dev) +{ + return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; +} - /* - * If the device or the parent bridge can't support PCI PM, ignore - * the request if we're doing anything besides putting it into D0 - * (which would only happen on boot). - */ - if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) - return 0; +static inline int platform_pci_set_power_state(struct pci_dev *dev, + pci_power_t t) +{ + return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; +} - /* find PCI PM capability in list */ - pm = pci_find_capability(dev, PCI_CAP_ID_PM); +static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) +{ + return pci_platform_pm ? + pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; +} - /* abort if the device doesn't support PM capabilities */ - if (!pm) +static inline bool platform_pci_can_wakeup(struct pci_dev *dev) +{ + return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; +} + +static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) +{ + return pci_platform_pm ? + pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; +} + +/** + * pci_raw_set_power_state - Use PCI PM registers to set the power state of + * given PCI device + * @dev: PCI device to handle. + * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. + * + * RETURN VALUE: + * -EINVAL if the requested state is invalid. + * -EIO if device does not support PCI PM or its PM capabilities register has a + * wrong version, or device doesn't support the requested state. + * 0 if device already is in the requested state. + * 0 if device's power state has been successfully changed. + */ +static int +pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) +{ + u16 pmcsr; + bool need_restore = false; + + if (!dev->pm_cap) return -EIO; + if (state < PCI_D0 || state > PCI_D3hot) + return -EINVAL; + /* Validate current state: * Can enter D0 from any state, but if we can only go deeper * to sleep if we're already in a low power state */ - if (state != PCI_D0 && dev->current_state > state) { - printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", - __func__, pci_name(dev), state, dev->current_state); + if (dev->current_state == state) { + /* we're already there */ + return 0; + } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold + && dev->current_state > state) { + dev_err(&dev->dev, "invalid power transition " + "(from state %d to %d)\n", dev->current_state, state); return -EINVAL; - } else if (dev->current_state == state) - return 0; /* we're already there */ - - - pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); - if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { - printk(KERN_DEBUG - "PCI: %s has unsupported PM cap regs version (%u)\n", - pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); - return -EIO; } /* check if this device supports the desired state */ - if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) - return -EIO; - else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) + if ((state == PCI_D1 && !dev->d1_support) + || (state == PCI_D2 && !dev->d2_support)) return -EIO; - pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); /* If we're (effectively) in D3, force entire word to 0. * This doesn't affect PME_Status, disables PME_En, and @@ -461,7 +476,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) case PCI_UNKNOWN: /* Boot-up */ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) - need_restore = 1; + need_restore = true; /* Fall-through: force to D0 */ default: pmcsr = 0; @@ -469,7 +484,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) } /* enter specified state */ - pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); /* Mandatory power management transition delays */ /* see PCI PM 1.1 5.6.1 table 18 */ @@ -478,13 +493,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) else if (state == PCI_D2 || dev->current_state == PCI_D2) udelay(200); - /* - * Give firmware a chance to be called, such as ACPI _PRx, _PSx - * Firmware method after native method ? - */ - if (platform_pci_set_power_state) - platform_pci_set_power_state(dev, state); - dev->current_state = state; /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT @@ -508,8 +516,77 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) return 0; } -pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); - +/** + * pci_update_current_state - Read PCI power state of given device from its + * PCI PM registers and cache it + * @dev: PCI device to handle. + */ +static void pci_update_current_state(struct pci_dev *dev) +{ + if (dev->pm_cap) { + u16 pmcsr; + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + } +} + +/** + * pci_set_power_state - Set the power state of a PCI device + * @dev: PCI device to handle. + * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. + * + * Transition a device to a new power state, using the platform formware and/or + * the device's PCI PM registers. + * + * RETURN VALUE: + * -EINVAL if the requested state is invalid. + * -EIO if device does not support PCI PM or its PM capabilities register has a + * wrong version, or device doesn't support the requested state. + * 0 if device already is in the requested state. + * 0 if device's power state has been successfully changed. + */ +int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +{ + int error; + + /* bound the state we're entering */ + if (state > PCI_D3hot) + state = PCI_D3hot; + else if (state < PCI_D0) + state = PCI_D0; + else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) + /* + * If the device or the parent bridge do not support PCI PM, + * ignore the request if we're doing anything other than putting + * it into D0 (which would only happen on boot). + */ + return 0; + + if (state == PCI_D0 && platform_pci_power_manageable(dev)) { + /* + * Allow the platform to change the state, for example via ACPI + * _PR0, _PS0 and some such, but do not trust it. + */ + int ret = platform_pci_set_power_state(dev, PCI_D0); + if (!ret) + pci_update_current_state(dev); + } + + error = pci_raw_set_power_state(dev, state); + + if (state > PCI_D0 && platform_pci_power_manageable(dev)) { + /* Allow the platform to finalize the transition */ + int ret = platform_pci_set_power_state(dev, state); + if (!ret) { + pci_update_current_state(dev); + error = 0; + } + } + + return error; +} + /** * pci_choose_state - Choose the power state of a PCI device * @dev: PCI device to be suspended @@ -527,11 +604,9 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) if (!pci_find_capability(dev, PCI_CAP_ID_PM)) return PCI_D0; - if (platform_pci_choose_state) { - ret = platform_pci_choose_state(dev, state); - if (ret != PCI_POWER_ERROR) - return ret; - } + ret = platform_pci_choose_state(dev); + if (ret != PCI_POWER_ERROR) + return ret; switch (state.event) { case PM_EVENT_ON: @@ -543,7 +618,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) case PM_EVENT_HIBERNATE: return PCI_D3hot; default: - printk("Unrecognized suspend event %d\n", state.event); + dev_info(&dev->dev, "unrecognized suspend event %d\n", + state.event); BUG(); } return PCI_D0; @@ -568,7 +644,7 @@ static int pci_save_pcie_state(struct pci_dev *dev) else found = 1; if (!save_state) { - dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); + dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); return -ENOMEM; } cap = (u16 *)&save_state->data[0]; @@ -619,7 +695,7 @@ static int pci_save_pcix_state(struct pci_dev *dev) else found = 1; if (!save_state) { - dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); + dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); return -ENOMEM; } cap = (u16 *)&save_state->data[0]; @@ -685,10 +761,9 @@ pci_restore_state(struct pci_dev *dev) for (i = 15; i >= 0; i--) { pci_read_config_dword(dev, i * 4, &val); if (val != dev->saved_config_space[i]) { - printk(KERN_DEBUG "PM: Writing back config space on " - "device %s at offset %x (was %x, writing %x)\n", - pci_name(dev), i, - val, (int)dev->saved_config_space[i]); + dev_printk(KERN_DEBUG, &dev->dev, "restoring config " + "space at offset %#x (was %#x, writing %#x)\n", + i, val, (int)dev->saved_config_space[i]); pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); } @@ -961,6 +1036,46 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) } /** + * pci_pme_capable - check the capability of PCI device to generate PME# + * @dev: PCI device to handle. + * @state: PCI state from which device will issue PME#. + */ +static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) +{ + if (!dev->pm_cap) + return false; + + return !!(dev->pme_support & (1 << state)); +} + +/** + * pci_pme_active - enable or disable PCI device's PME# function + * @dev: PCI device to handle. + * @enable: 'true' to enable PME# generation; 'false' to disable it. + * + * The caller must verify that the device is capable of generating PME# before + * calling this function with @enable equal to 'true'. + */ +static void pci_pme_active(struct pci_dev *dev, bool enable) +{ + u16 pmcsr; + + if (!dev->pm_cap) + return; + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + /* Clear PME_Status by writing 1 to it and enable PME# */ + pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; + if (!enable) + pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; + + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); + + dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", + enable ? "enabled" : "disabled"); +} + +/** * pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected * @state: PCI state from which device will issue wakeup events @@ -971,66 +1086,173 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) * called automatically by this routine. * * Devices with legacy power management (no standard PCI PM capabilities) - * always require such platform hooks. Depending on the platform, devices - * supporting the standard PCI PME# signal may require such platform hooks; - * they always update bits in config space to allow PME# generation. + * always require such platform hooks. * - * -EIO is returned if the device can't ever be a wakeup event source. - * -EINVAL is returned if the device can't generate wakeup events from - * the specified PCI state. Returns zero if the operation is successful. + * RETURN VALUE: + * 0 is returned on success + * -EINVAL is returned if device is not supposed to wake up the system + * Error code depending on the platform is returned if both the platform and + * the native mechanism fail to enable the generation of wake-up events */ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { - int pm; - int status; - u16 value; - - /* Note that drivers should verify device_may_wakeup(&dev->dev) - * before calling this function. Platform code should report - * errors when drivers try to enable wakeup on devices that - * can't issue wakeups, or on which wakeups were disabled by - * userspace updating the /sys/devices.../power/wakeup file. + int error = 0; + bool pme_done = false; + + if (!device_may_wakeup(&dev->dev)) + return -EINVAL; + + /* + * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don + * Anderson we should be doing PME# wake enable followed by ACPI wake + * enable. To disable wake-up we call the platform first, for symmetry. */ - status = call_platform_enable_wakeup(&dev->dev, enable); + if (!enable && platform_pci_can_wakeup(dev)) + error = platform_pci_sleep_wake(dev, false); - /* find PCI PM capability in list */ - pm = pci_find_capability(dev, PCI_CAP_ID_PM); + if (!enable || pci_pme_capable(dev, state)) { + pci_pme_active(dev, enable); + pme_done = true; + } - /* If device doesn't support PM Capabilities, but caller wants to - * disable wake events, it's a NOP. Otherwise fail unless the - * platform hooks handled this legacy device already. - */ - if (!pm) - return enable ? status : 0; + if (enable && platform_pci_can_wakeup(dev)) + error = platform_pci_sleep_wake(dev, true); - /* Check device's ability to generate PME# */ - pci_read_config_word(dev,pm+PCI_PM_PMC,&value); + return pme_done ? 0 : error; +} - value &= PCI_PM_CAP_PME_MASK; - value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ +/** + * pci_prepare_to_sleep - prepare PCI device for system-wide transition into + * a sleep state + * @dev: Device to handle. + * + * Choose the power state appropriate for the device depending on whether + * it can wake up the system and/or is power manageable by the platform + * (PCI_D3hot is the default) and put the device into that state. + */ +int pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state = PCI_D3hot; + int error; - /* Check if it can generate PME# from requested state. */ - if (!value || !(value & (1 << state))) { - /* if it can't, revert what the platform hook changed, - * always reporting the base "EINVAL, can't PME#" error + if (platform_pci_power_manageable(dev)) { + /* + * Call the platform to choose the target state of the device + * and enable wake-up from this state if supported. */ - if (enable) - call_platform_enable_wakeup(&dev->dev, 0); - return enable ? -EINVAL : 0; + pci_power_t state = platform_pci_choose_state(dev); + + switch (state) { + case PCI_POWER_ERROR: + case PCI_UNKNOWN: + break; + case PCI_D1: + case PCI_D2: + if (pci_no_d1d2(dev)) + break; + default: + target_state = state; + } + } else if (device_may_wakeup(&dev->dev)) { + /* + * Find the deepest state from which the device can generate + * wake-up events, make it the target state and enable device + * to generate PME#. + */ + if (!dev->pm_cap) + return -EIO; + + if (dev->pme_support) { + while (target_state + && !(dev->pme_support & (1 << target_state))) + target_state--; + } } - pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); + pci_enable_wake(dev, target_state, true); - /* Clear PME_Status by writing 1 to it and enable PME# */ - value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; + error = pci_set_power_state(dev, target_state); - if (!enable) - value &= ~PCI_PM_CTRL_PME_ENABLE; + if (error) + pci_enable_wake(dev, target_state, false); - pci_write_config_word(dev, pm + PCI_PM_CTRL, value); + return error; +} - return 0; +/** + * pci_back_from_sleep - turn PCI device on during system-wide transition into + * the working state a sleep state + * @dev: Device to handle. + * + * Disable device's sytem wake-up capability and put it into D0. + */ +int pci_back_from_sleep(struct pci_dev *dev) +{ + pci_enable_wake(dev, PCI_D0, false); + return pci_set_power_state(dev, PCI_D0); +} + +/** + * pci_pm_init - Initialize PM functions of given PCI device + * @dev: PCI device to handle. + */ +void pci_pm_init(struct pci_dev *dev) +{ + int pm; + u16 pmc; + + dev->pm_cap = 0; + + /* find PCI PM capability in list */ + pm = pci_find_capability(dev, PCI_CAP_ID_PM); + if (!pm) + return; + /* Check device's ability to generate PME# */ + pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); + + if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { + dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", + pmc & PCI_PM_CAP_VER_MASK); + return; + } + + dev->pm_cap = pm; + + dev->d1_support = false; + dev->d2_support = false; + if (!pci_no_d1d2(dev)) { + if (pmc & PCI_PM_CAP_D1) { + dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n"); + dev->d1_support = true; + } + if (pmc & PCI_PM_CAP_D2) { + dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n"); + dev->d2_support = true; + } + } + + pmc &= PCI_PM_CAP_PME_MASK; + if (pmc) { + dev_printk(KERN_INFO, &dev->dev, + "PME# supported from%s%s%s%s%s\n", + (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", + (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", + (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", + (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", + (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); + dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; + /* + * Make device's PM flags reflect the wake-up capability, but + * let the user space enable it to wake up the system as needed. + */ + device_set_wakeup_capable(&dev->dev, true); + device_set_wakeup_enable(&dev->dev, false); + /* Disable the PME# generation functionality */ + pci_pme_active(dev, false); + } else { + dev->pme_support = 0; + } } int @@ -1116,13 +1338,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) return 0; err_out: - printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " - "for device %s\n", - pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", - bar + 1, /* PCI BAR # */ - (unsigned long long)pci_resource_len(pdev, bar), - (unsigned long long)pci_resource_start(pdev, bar), - pci_name(pdev)); + dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n", + bar, + pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", + (unsigned long long)pci_resource_start(pdev, bar), + (unsigned long long)pci_resource_end(pdev, bar)); return -EBUSY; } @@ -1214,7 +1434,7 @@ pci_set_master(struct pci_dev *dev) pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_MASTER)) { - pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); + dev_dbg(&dev->dev, "enabling bus mastering\n"); cmd |= PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -1279,8 +1499,8 @@ pci_set_cacheline_size(struct pci_dev *dev) if (cacheline_size == pci_cache_line_size) return 0; - printk(KERN_DEBUG "PCI: cache line size of %d is not supported " - "by device %s\n", pci_cache_line_size << 2, pci_name(dev)); + dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " + "supported\n", pci_cache_line_size << 2); return -EINVAL; } @@ -1305,8 +1525,7 @@ pci_set_mwi(struct pci_dev *dev) pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_INVALIDATE)) { - pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", - pci_name(dev)); + dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -1702,5 +1921,7 @@ EXPORT_SYMBOL(pci_set_power_state); EXPORT_SYMBOL(pci_save_state); EXPORT_SYMBOL(pci_restore_state); EXPORT_SYMBOL(pci_enable_wake); +EXPORT_SYMBOL(pci_prepare_to_sleep); +EXPORT_SYMBOL(pci_back_from_sleep); EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 00408c97e5fc..d807cd786f20 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -5,11 +5,36 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); extern void pci_cleanup_rom(struct pci_dev *dev); -/* Firmware callbacks */ -extern pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, - pm_message_t state); -extern int (*platform_pci_set_power_state)(struct pci_dev *dev, - pci_power_t state); +/** + * Firmware PM callbacks + * + * @is_manageable - returns 'true' if given device is power manageable by the + * platform firmware + * + * @set_state - invokes the platform firmware to set the device's power state + * + * @choose_state - returns PCI power state of given device preferred by the + * platform; to be used during system-wide transitions from a + * sleeping state to the working state and vice versa + * + * @can_wakeup - returns 'true' if given device is capable of waking up the + * system from a sleeping state + * + * @sleep_wake - enables/disables the system wake up capability of given device + * + * If given platform is generally capable of power managing PCI devices, all of + * these callbacks are mandatory. + */ +struct pci_platform_pm_ops { + bool (*is_manageable)(struct pci_dev *dev); + int (*set_state)(struct pci_dev *dev, pci_power_t state); + pci_power_t (*choose_state)(struct pci_dev *dev); + bool (*can_wakeup)(struct pci_dev *dev); + int (*sleep_wake)(struct pci_dev *dev, bool enable); +}; + +extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); +extern void pci_pm_init(struct pci_dev *dev); extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); @@ -106,3 +131,16 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) } struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); + +/* PCI slot sysfs helper code */ +#define to_pci_slot(s) container_of(s, struct pci_slot, kobj) + +extern struct kset *pci_slots_kset; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; +#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) + diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 07c3bdb6edc2..77036f46acfe 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -26,6 +26,7 @@ #include <linux/pcieport_if.h> #include "aerdrv.h" +#include "../../pci.h" /* * Version Information @@ -219,8 +220,7 @@ static int __devinit aer_probe (struct pcie_device *dev, /* Alloc rpc data structure */ if (!(rpc = aer_alloc_rpc(dev))) { - printk(KERN_DEBUG "%s: Alloc rpc fails on PCIE device[%s]\n", - __func__, device->bus_id); + dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); aer_remove(dev); return -ENOMEM; } @@ -228,8 +228,7 @@ static int __devinit aer_probe (struct pcie_device *dev, /* Request IRQ ISR */ if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev))) { - printk(KERN_DEBUG "%s: Request ISR fails on PCIE device[%s]\n", - __func__, device->bus_id); + dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); aer_remove(dev); return status; } @@ -273,7 +272,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) * to issue Configuration Requests to those devices. */ msleep(200); - printk(KERN_DEBUG "Complete link reset at Root[%s]\n", dev->dev.bus_id); + dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); /* Enable Root Port's interrupt in response to error messages */ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index d39a78dbd026..30f581b8791f 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -50,10 +50,10 @@ int aer_osc_setup(struct pcie_device *pciedev) } if (ACPI_FAILURE(status)) { - printk(KERN_DEBUG "AER service couldn't init device %s - %s\n", - pciedev->device.bus_id, - (status == AE_SUPPORT || status == AE_NOT_FOUND) ? - "no _OSC support" : "Run ACPI _OSC fails"); + dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't " + "init device: %s\n", + (status == AE_SUPPORT || status == AE_NOT_FOUND) ? + "no _OSC support" : "_OSC failed"); return -1; } diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index aaa82392d1dc..ee5e7b5176d0 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -221,9 +221,9 @@ static void report_error_detected(struct pci_dev *dev, void *data) * of a driver for this device is unaware of * its hw state. */ - printk(KERN_DEBUG "Device ID[%s] has %s\n", - dev->dev.bus_id, (dev->driver) ? - "no AER-aware driver" : "no driver"); + dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n", + dev->driver ? + "no AER-aware driver" : "no driver"); } return; } @@ -304,7 +304,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, { struct aer_broadcast_data result_data; - printk(KERN_DEBUG "Broadcast %s message\n", error_mesg); + dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg); result_data.state = state; if (cb == report_error_detected) result_data.result = PCI_ERS_RESULT_CAN_RECOVER; @@ -404,18 +404,16 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev, data.aer_driver = to_service_driver(aerdev->device.driver); } else { - printk(KERN_DEBUG "No link-reset support to Device ID" - "[%s]\n", - dev->dev.bus_id); + dev_printk(KERN_DEBUG, &dev->dev, "no link-reset " + "support\n"); return PCI_ERS_RESULT_DISCONNECT; } } status = data.aer_driver->reset_link(udev); if (status != PCI_ERS_RESULT_RECOVERED) { - printk(KERN_DEBUG "Link reset at upstream Device ID" - "[%s] failed\n", - udev->dev.bus_id); + dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream " + "device %s failed\n", pci_name(udev)); return PCI_ERS_RESULT_DISCONNECT; } @@ -511,10 +509,12 @@ static void handle_error_source(struct pcie_device * aerdev, } else { status = do_recovery(aerdev, dev, info.severity); if (status == PCI_ERS_RESULT_RECOVERED) { - printk(KERN_DEBUG "AER driver successfully recovered\n"); + dev_printk(KERN_DEBUG, &dev->dev, "AER driver " + "successfully recovered\n"); } else { /* TODO: Should kernel panic here? */ - printk(KERN_DEBUG "AER driver didn't recover\n"); + dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't " + "recover\n"); } } } diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index 3f0976868eda..359fe5568df1 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c @@ -13,6 +13,7 @@ #include <linux/pm.h> #include <linux/pcieport_if.h> +#include "portdrv.h" static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index fb0abfa508dc..890f0d2b370a 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -23,20 +23,20 @@ static int pcie_port_probe_service(struct device *dev) { struct pcie_device *pciedev; struct pcie_port_service_driver *driver; - int status = -ENODEV; + int status; if (!dev || !dev->driver) - return status; + return -ENODEV; driver = to_service_driver(dev->driver); if (!driver || !driver->probe) - return status; + return -ENODEV; pciedev = to_pcie_device(dev); status = driver->probe(pciedev, driver->id_table); if (!status) { - printk(KERN_DEBUG "Load service driver %s on pcie device %s\n", - driver->name, dev->bus_id); + dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", + driver->name); get_device(dev); } return status; @@ -53,8 +53,8 @@ static int pcie_port_remove_service(struct device *dev) pciedev = to_pcie_device(dev); driver = to_service_driver(dev->driver); if (driver && driver->remove) { - printk(KERN_DEBUG "Unload service driver %s on pcie device %s\n", - driver->name, dev->bus_id); + dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", + driver->name); driver->remove(pciedev); put_device(dev); } @@ -103,7 +103,7 @@ static int pcie_port_resume_service(struct device *dev) */ static void release_pcie_device(struct device *dev) { - printk(KERN_DEBUG "Free Port Service[%s]\n", dev->bus_id); + dev_printk(KERN_DEBUG, dev, "free port service\n"); kfree(to_pcie_device(dev)); } @@ -150,7 +150,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) if (pos) { struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; - printk("%s Found MSIX capability\n", __func__); + dev_info(&dev->dev, "found MSI-X capability\n"); status = pci_enable_msix(dev, msix_entries, nvec); if (!status) { int j = 0; @@ -165,7 +165,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) if (status) { pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { - printk("%s Found MSI capability\n", __func__); + dev_info(&dev->dev, "found MSI capability\n"); status = pci_enable_msi(dev); if (!status) { interrupt_mode = PCIE_PORT_MSI_MODE; @@ -252,7 +252,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, return NULL; pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); - printk(KERN_DEBUG "Allocate Port Service[%s]\n", device->device.bus_id); + dev_printk(KERN_DEBUG, &device->device, "allocate port service\n"); return device; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 51d163238d93..367c9c20000d 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -91,9 +91,8 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, pci_set_master(dev); if (!dev->irq && dev->pin) { - printk(KERN_WARNING - "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", - __func__, dev->vendor, dev->device); + dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; " + "check vendor BIOS\n", dev->vendor, dev->device); } if (pcie_port_device_register(dev)) { pci_disable_device(dev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 3706ce7972dd..b1724cf31b66 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -277,8 +277,8 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) res->end = res->start + sz64; #else if (sz64 > 0x100000000ULL) { - printk(KERN_ERR "PCI: Unable to handle 64-bit " - "BAR for device %s\n", pci_name(dev)); + dev_err(&dev->dev, "BAR %d: can't handle 64-bit" + " BAR\n", pos); res->start = 0; res->flags = 0; } else if (lhi) { @@ -329,7 +329,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) return; if (dev->transparent) { - printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); + dev_info(&dev->dev, "transparent bridge\n"); for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) child->resource[i] = child->parent->resource[i - 3]; } @@ -392,7 +392,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) limit |= ((long) mem_limit_hi) << 32; #else if (mem_base_hi || mem_limit_hi) { - printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev)); + dev_err(&dev->dev, "can't handle 64-bit " + "address space for bridge\n"); return; } #endif @@ -414,6 +415,7 @@ static struct pci_bus * pci_alloc_bus(void) INIT_LIST_HEAD(&b->node); INIT_LIST_HEAD(&b->children); INIT_LIST_HEAD(&b->devices); + INIT_LIST_HEAD(&b->slots); } return b; } @@ -511,8 +513,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); - pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n", - pci_name(dev), buses & 0xffffff, pass); + dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", + buses & 0xffffff, pass); /* Disable MasterAbortMode during probing to avoid reporting of bus errors (in some architectures) */ @@ -535,8 +537,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, * ignore it. This can happen with the i450NX chipset. */ if (pci_find_bus(pci_domain_nr(bus), busnr)) { - printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", - pci_domain_nr(bus), busnr); + dev_info(&dev->dev, "bus %04x:%02x already known\n", + pci_domain_nr(bus), busnr); goto out; } @@ -711,8 +713,9 @@ static int pci_setup_device(struct pci_dev * dev) { u32 class; - sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), - dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); + dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), + dev->bus->number, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn)); pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); dev->revision = class & 0xff; @@ -720,7 +723,7 @@ static int pci_setup_device(struct pci_dev * dev) dev->class = class; class >>= 8; - pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev), + dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n", dev->vendor, dev->device, class, dev->hdr_type); /* "Unknown power state" */ @@ -788,13 +791,13 @@ static int pci_setup_device(struct pci_dev * dev) break; default: /* unknown header */ - printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", - pci_name(dev), dev->hdr_type); + dev_err(&dev->dev, "unknown header type %02x, " + "ignoring device\n", dev->hdr_type); return -1; bad: - printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", - pci_name(dev), class, dev->hdr_type); + dev_err(&dev->dev, "ignoring class %02x (doesn't match header " + "type %02x)\n", class, dev->hdr_type); dev->class = PCI_CLASS_NOT_DEFINED; } @@ -927,7 +930,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) return NULL; /* Card hasn't responded in 60 seconds? Must be stuck. */ if (delay > 60 * 1000) { - printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " + printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " "responding\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); @@ -984,6 +987,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); + /* Initialize power management of the device */ + pci_pm_init(dev); + /* * Add the device to our list of discovered devices * and the bus list for fixup functions, etc. diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 963a97642ae9..4400dffbd93a 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -1,6 +1,4 @@ /* - * $Id: proc.c,v 1.13 1998/05/12 07:36:07 mj Exp $ - * * Procfs interface for the PCI bus. * * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz> @@ -482,5 +480,5 @@ static int __init pci_proc_init(void) return 0; } -__initcall(pci_proc_init); +device_initcall(pci_proc_init); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 338a3f94b4d4..12d489395fad 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -556,7 +556,7 @@ static void quirk_via_ioapic(struct pci_dev *dev) pci_write_config_byte (dev, 0x58, tmp); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); /* * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. @@ -576,7 +576,7 @@ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); /* * The AMD io apic can hang the box when an apic irq is masked. @@ -622,7 +622,7 @@ static void quirk_amd_8131_ioapic(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); #endif /* CONFIG_X86_IO_APIC */ /* @@ -774,7 +774,7 @@ static void quirk_cardbus_legacy(struct pci_dev *dev) pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); -DECLARE_PCI_FIXUP_RESUME(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); /* * Following the PCI ordering rules is optional on the AMD762. I'm not @@ -797,7 +797,7 @@ static void quirk_amd_ordering(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); /* * DreamWorks provided workaround for Dunord I-3000 problem @@ -865,7 +865,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) { @@ -885,9 +885,9 @@ static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); /* * Serverworks CSB5 IDE does not fully support native mode @@ -1054,6 +1054,20 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) * its on-board VGA controller */ asus_hides_smbus = 1; } + else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG) + switch(dev->subsystem_device) { + case 0x00b8: /* Compaq Evo D510 CMT */ + case 0x00b9: /* Compaq Evo D510 SFF */ + asus_hides_smbus = 1; + } + else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) + switch (dev->subsystem_device) { + case 0x001A: /* Compaq Deskpro EN SSF P667 815E */ + /* Motherboard doesn't have host bridge + * subvendor/subdevice IDs, therefore checking + * its on-board VGA controller */ + asus_hides_smbus = 1; + } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); @@ -1068,6 +1082,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); static void asus_hides_smbus_lpc(struct pci_dev *dev) { @@ -1093,31 +1109,61 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); -static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) +/* It appears we just have one such device. If not, we have a warning */ +static void __iomem *asus_rcba_base; +static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev) { - u32 val, rcba; - void __iomem *base; + u32 rcba; if (likely(!asus_hides_smbus)) return; + WARN_ON(asus_rcba_base); + pci_read_config_dword(dev, 0xF0, &rcba); - base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */ - if (base == NULL) return; - val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */ - writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */ - iounmap(base); + /* use bits 31:14, 16 kB aligned */ + asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); + if (asus_rcba_base == NULL) + return; +} + +static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev) +{ + u32 val; + + if (likely(!asus_hides_smbus || !asus_rcba_base)) + return; + /* read the Function Disable register, dword mode only */ + val = readl(asus_rcba_base + 0x3418); + writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */ +} + +static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev) +{ + if (likely(!asus_hides_smbus || !asus_rcba_base)) + return; + iounmap(asus_rcba_base); + asus_rcba_base = NULL; dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); } + +static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) +{ + asus_hides_smbus_lpc_ich6_suspend(dev); + asus_hides_smbus_lpc_ich6_resume_early(dev); + asus_hides_smbus_lpc_ich6_resume(dev); +} DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); +DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early); /* * SiS 96x south bridge: BIOS typically hides SMBus device... @@ -1135,10 +1181,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); /* * ... This is further complicated by the fact that some SiS96x south @@ -1172,7 +1218,7 @@ static void quirk_sis_503(struct pci_dev *dev) quirk_sis_96x_smbus(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); /* @@ -1205,7 +1251,7 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) @@ -1270,12 +1316,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, qui DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); #endif @@ -1521,6 +1567,10 @@ extern struct pci_fixup __start_pci_fixups_enable[]; extern struct pci_fixup __end_pci_fixups_enable[]; extern struct pci_fixup __start_pci_fixups_resume[]; extern struct pci_fixup __end_pci_fixups_resume[]; +extern struct pci_fixup __start_pci_fixups_resume_early[]; +extern struct pci_fixup __end_pci_fixups_resume_early[]; +extern struct pci_fixup __start_pci_fixups_suspend[]; +extern struct pci_fixup __end_pci_fixups_suspend[]; void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) @@ -1553,6 +1603,16 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) end = __end_pci_fixups_resume; break; + case pci_fixup_resume_early: + start = __start_pci_fixups_resume_early; + end = __end_pci_fixups_resume_early; + break; + + case pci_fixup_suspend: + start = __start_pci_fixups_suspend; + end = __end_pci_fixups_suspend; + break; + default: /* stupid compiler warning, you would think with an enum... */ return; @@ -1629,7 +1689,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 8ddb918f5f57..827c0a520e2b 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -27,13 +27,6 @@ #include <linux/slab.h> -#define DEBUG_CONFIG 1 -#if DEBUG_CONFIG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - static void pbus_assign_resources_sorted(struct pci_bus *bus) { struct pci_dev *dev; @@ -81,8 +74,8 @@ void pci_setup_cardbus(struct pci_bus *bus) struct pci_dev *bridge = bus->self; struct pci_bus_region region; - printk("PCI: Bus %d, cardbus bridge: %s\n", - bus->number, pci_name(bridge)); + dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); if (bus->resource[0]->flags & IORESOURCE_IO) { @@ -90,7 +83,7 @@ void pci_setup_cardbus(struct pci_bus *bus) * The IO resource is allocated a range twice as large as it * would normally need. This allows us to set both IO regs. */ - printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, @@ -101,7 +94,7 @@ void pci_setup_cardbus(struct pci_bus *bus) pcibios_resource_to_bus(bridge, ®ion, bus->resource[1]); if (bus->resource[1]->flags & IORESOURCE_IO) { - printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, @@ -112,7 +105,7 @@ void pci_setup_cardbus(struct pci_bus *bus) pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); if (bus->resource[2]->flags & IORESOURCE_MEM) { - printk(KERN_INFO " PREFETCH window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, @@ -123,7 +116,7 @@ void pci_setup_cardbus(struct pci_bus *bus) pcibios_resource_to_bus(bridge, ®ion, bus->resource[3]); if (bus->resource[3]->flags & IORESOURCE_MEM) { - printk(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, @@ -151,7 +144,8 @@ static void pci_setup_bridge(struct pci_bus *bus) struct pci_bus_region region; u32 l, bu, lu, io_upper16; - DBG(KERN_INFO "PCI: Bridge: %s\n", pci_name(bridge)); + dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); /* Set up the top and bottom of the PCI I/O segment for this bus. */ pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); @@ -162,7 +156,7 @@ static void pci_setup_bridge(struct pci_bus *bus) l |= region.end & 0xf000; /* Set up upper 16 bits of I/O base/limit. */ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); - DBG(KERN_INFO " IO window: %04lx-%04lx\n", + dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n", (unsigned long)region.start, (unsigned long)region.end); } @@ -170,7 +164,7 @@ static void pci_setup_bridge(struct pci_bus *bus) /* Clear upper 16 bits of I/O base/limit. */ io_upper16 = 0; l = 0x00f0; - DBG(KERN_INFO " IO window: disabled.\n"); + dev_info(&bridge->dev, " IO window: disabled\n"); } /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); @@ -185,13 +179,13 @@ static void pci_setup_bridge(struct pci_bus *bus) if (bus->resource[1]->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; - DBG(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", + dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", (unsigned long)region.start, (unsigned long)region.end); } else { l = 0x0000fff0; - DBG(KERN_INFO " MEM window: disabled.\n"); + dev_info(&bridge->dev, " MEM window: disabled\n"); } pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); @@ -208,13 +202,13 @@ static void pci_setup_bridge(struct pci_bus *bus) l |= region.end & 0xfff00000; bu = upper_32_bits(region.start); lu = upper_32_bits(region.end); - DBG(KERN_INFO " PREFETCH window: 0x%016llx-0x%016llx\n", + dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n", (unsigned long long)region.start, (unsigned long long)region.end); } else { l = 0x0000fff0; - DBG(KERN_INFO " PREFETCH window: disabled.\n"); + dev_info(&bridge->dev, " PREFETCH window: disabled\n"); } pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); @@ -361,9 +355,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; order = __ffs(align) - 20; if (order > 11) { - printk(KERN_WARNING "PCI: region %s/%d " - "too large: 0x%016llx-0x%016llx\n", - pci_name(dev), i, + dev_warn(&dev->dev, "BAR %d too large: " + "%#016llx-%#016llx\n", i, (unsigned long long)r->start, (unsigned long long)r->end); r->flags = 0; @@ -529,8 +522,8 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus) break; default: - printk(KERN_INFO "PCI: not setting up bridge %s " - "for bus %d\n", pci_name(dev), b->number); + dev_info(&dev->dev, "not setting up bridge for bus " + "%04x:%02x\n", pci_domain_nr(b), b->number); break; } } diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index 05ca2ed9eb51..aa795fd428de 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c @@ -47,8 +47,7 @@ pdev_fixup_irq(struct pci_dev *dev, } dev->irq = irq; - pr_debug("PCI: fixup irq: (%s) got %d\n", - kobject_name(&dev->dev.kobj), dev->irq); + dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq); /* Always tell the device, so the driver knows what is the real IRQ to use; the device does not use it. */ diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7d35cdf4579f..1a5fc83c71b3 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -26,8 +26,7 @@ #include "pci.h" -void -pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) +void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) { struct pci_bus_region region; u32 new, check, mask; @@ -43,20 +42,20 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) /* * Ignore non-moveable resources. This might be legacy resources for * which no functional BAR register exists or another important - * system resource we should better not move around in system address - * space. + * system resource we shouldn't move around. */ if (res->flags & IORESOURCE_PCI_FIXED) return; pcibios_resource_to_bus(dev, ®ion, res); - pr_debug(" got res [%llx:%llx] bus [%llx:%llx] flags %lx for " - "BAR %d of %s\n", (unsigned long long)res->start, + dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] " + "flags %#lx\n", resno, + (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long long)region.start, (unsigned long long)region.end, - (unsigned long)res->flags, resno, pci_name(dev)); + (unsigned long)res->flags); new = region.start | (res->flags & PCI_REGION_FLAG_MASK); if (res->flags & IORESOURCE_IO) @@ -81,9 +80,8 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) pci_read_config_dword(dev, reg, &check); if ((new ^ check) & mask) { - printk(KERN_ERR "PCI: Error while updating region " - "%s/%d (%08x != %08x)\n", pci_name(dev), resno, - new, check); + dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n", + resno, new, check); } if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == @@ -92,15 +90,14 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) pci_write_config_dword(dev, reg + 4, new); pci_read_config_dword(dev, reg + 4, &check); if (check != new) { - printk(KERN_ERR "PCI: Error updating region " - "%s/%d (high %08x != %08x)\n", - pci_name(dev), resno, new, check); + dev_err(&dev->dev, "BAR %d: error updating " + "(high %#08x != %#08x)\n", resno, new, check); } } res->flags &= ~IORESOURCE_UNSET; - pr_debug("PCI: moved device %s resource %d (%lx) to %x\n", - pci_name(dev), resno, res->flags, - new & ~PCI_REGION_FLAG_MASK); + dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n", + resno, (unsigned long long)region.start, + (unsigned long long)region.end, res->flags); } int pci_claim_resource(struct pci_dev *dev, int resource) @@ -117,10 +114,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource) err = insert_resource(root, res); if (err) { - printk(KERN_ERR "PCI: %s region %d of %s %s [%llx:%llx]\n", - root ? "Address space collision on" : - "No parent found for", - resource, dtype, pci_name(dev), + dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n", + resource, + root ? "address space collision on" : + "no parent found for", + dtype, (unsigned long long)res->start, (unsigned long long)res->end); } @@ -140,11 +138,10 @@ int pci_assign_resource(struct pci_dev *dev, int resno) align = resource_alignment(res); if (!align) { - printk(KERN_ERR "PCI: Cannot allocate resource (bogus " - "alignment) %d [%llx:%llx] (flags %lx) of %s\n", + dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " + "alignment) [%#llx-%#llx] flags %#lx\n", resno, (unsigned long long)res->start, - (unsigned long long)res->end, res->flags, - pci_name(dev)); + (unsigned long long)res->end, res->flags); return -EINVAL; } @@ -165,11 +162,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } if (ret) { - printk(KERN_ERR "PCI: Failed to allocate %s resource " - "#%d:%llx@%llx for %s\n", + dev_err(&dev->dev, "BAR %d: can't allocate %s resource " + "[%#llx-%#llx]\n", resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", - resno, (unsigned long long)size, - (unsigned long long)res->start, pci_name(dev)); + (unsigned long long)res->start, + (unsigned long long)res->end); } else { res->flags &= ~IORESOURCE_STARTALIGN; if (resno < PCI_BRIDGE_RESOURCES) @@ -205,11 +202,11 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno) } if (ret) { - printk(KERN_ERR "PCI: Failed to allocate %s resource " - "#%d:%llx@%llx for %s\n", + dev_err(&dev->dev, "BAR %d: can't allocate %s resource " + "[%#llx-%#llx\n]", resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", - resno, (unsigned long long)(res->end - res->start + 1), - (unsigned long long)res->start, pci_name(dev)); + (unsigned long long)res->start, + (unsigned long long)res->end); } else if (resno < PCI_BRIDGE_RESOURCES) { pci_update_resource(dev, res, resno); } @@ -239,11 +236,10 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) r_align = resource_alignment(r); if (!r_align) { - printk(KERN_WARNING "PCI: bogus alignment of resource " - "%d [%llx:%llx] (flags %lx) of %s\n", + dev_warn(&dev->dev, "BAR %d: bogus alignment " + "[%#llx-%#llx] flags %#lx\n", i, (unsigned long long)r->start, - (unsigned long long)r->end, r->flags, - pci_name(dev)); + (unsigned long long)r->end, r->flags); continue; } for (list = head; ; list = list->next) { @@ -291,7 +287,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask) if (!r->parent) { dev_err(&dev->dev, "device not available because of " - "BAR %d [%llx:%llx] collisions\n", i, + "BAR %d [%#llx-%#llx] collisions\n", i, (unsigned long long) r->start, (unsigned long long) r->end); return -EINVAL; diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c new file mode 100644 index 000000000000..7e5b85cbd948 --- /dev/null +++ b/drivers/pci/slot.c @@ -0,0 +1,233 @@ +/* + * drivers/pci/slot.c + * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx> + * Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P. + * Alex Chiang <achiang@hp.com> + */ + +#include <linux/kobject.h> +#include <linux/pci.h> +#include <linux/err.h> +#include "pci.h" + +struct kset *pci_slots_kset; +EXPORT_SYMBOL_GPL(pci_slots_kset); + +static ssize_t pci_slot_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct pci_slot *slot = to_pci_slot(kobj); + struct pci_slot_attribute *attribute = to_pci_slot_attr(attr); + return attribute->show ? attribute->show(slot, buf) : -EIO; +} + +static ssize_t pci_slot_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t len) +{ + struct pci_slot *slot = to_pci_slot(kobj); + struct pci_slot_attribute *attribute = to_pci_slot_attr(attr); + return attribute->store ? attribute->store(slot, buf, len) : -EIO; +} + +static struct sysfs_ops pci_slot_sysfs_ops = { + .show = pci_slot_attr_show, + .store = pci_slot_attr_store, +}; + +static ssize_t address_read_file(struct pci_slot *slot, char *buf) +{ + if (slot->number == 0xff) + return sprintf(buf, "%04x:%02x\n", + pci_domain_nr(slot->bus), + slot->bus->number); + else + return sprintf(buf, "%04x:%02x:%02x\n", + pci_domain_nr(slot->bus), + slot->bus->number, + slot->number); +} + +static void pci_slot_release(struct kobject *kobj) +{ + struct pci_slot *slot = to_pci_slot(kobj); + + pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, + slot->bus->number, slot->number); + + list_del(&slot->list); + + kfree(slot); +} + +static struct pci_slot_attribute pci_slot_attr_address = + __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); + +static struct attribute *pci_slot_default_attrs[] = { + &pci_slot_attr_address.attr, + NULL, +}; + +static struct kobj_type pci_slot_ktype = { + .sysfs_ops = &pci_slot_sysfs_ops, + .release = &pci_slot_release, + .default_attrs = pci_slot_default_attrs, +}; + +/** + * pci_create_slot - create or increment refcount for physical PCI slot + * @parent: struct pci_bus of parent bridge + * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder + * @name: user visible string presented in /sys/bus/pci/slots/<name> + * + * PCI slots have first class attributes such as address, speed, width, + * and a &struct pci_slot is used to manage them. This interface will + * either return a new &struct pci_slot to the caller, or if the pci_slot + * already exists, its refcount will be incremented. + * + * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple. + * + * Placeholder slots: + * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify + * a slot. There is one notable exception - pSeries (rpaphp), where the + * @slot_nr cannot be determined until a device is actually inserted into + * the slot. In this scenario, the caller may pass -1 for @slot_nr. + * + * The following semantics are imposed when the caller passes @slot_nr == + * -1. First, the check for existing %struct pci_slot is skipped, as the + * caller may know about several unpopulated slots on a given %struct + * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for + * these slots is then determined by the @name parameter. We expect + * kobject_init_and_add() to warn us if the caller attempts to create + * multiple slots with the same name. The other change in semantics is + * user-visible, which is the 'address' parameter presented in sysfs will + * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the + * %struct pci_bus and bb is the bus number. In other words, the devfn of + * the 'placeholder' slot will not be displayed. + */ + +struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, + const char *name) +{ + struct pci_slot *slot; + int err; + + down_write(&pci_bus_sem); + + if (slot_nr == -1) + goto placeholder; + + /* If we've already created this slot, bump refcount and return. */ + list_for_each_entry(slot, &parent->slots, list) { + if (slot->number == slot_nr) { + kobject_get(&slot->kobj); + pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n", + __func__, + atomic_read(&slot->kobj.kref.refcount), + pci_domain_nr(parent), parent->number, + slot_nr); + goto out; + } + } + +placeholder: + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + slot = ERR_PTR(-ENOMEM); + goto out; + } + + slot->bus = parent; + slot->number = slot_nr; + + slot->kobj.kset = pci_slots_kset; + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, + "%s", name); + if (err) { + printk(KERN_ERR "Unable to register kobject %s\n", name); + goto err; + } + + INIT_LIST_HEAD(&slot->list); + list_add(&slot->list, &parent->slots); + + /* Don't care if debug printk has a -1 for slot_nr */ + pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", + __func__, pci_domain_nr(parent), parent->number, slot_nr); + + out: + up_write(&pci_bus_sem); + return slot; + err: + kfree(slot); + slot = ERR_PTR(err); + goto out; +} +EXPORT_SYMBOL_GPL(pci_create_slot); + +/** + * pci_update_slot_number - update %struct pci_slot -> number + * @slot - %struct pci_slot to update + * @slot_nr - new number for slot + * + * The primary purpose of this interface is to allow callers who earlier + * created a placeholder slot in pci_create_slot() by passing a -1 as + * slot_nr, to update their %struct pci_slot with the correct @slot_nr. + */ + +void pci_update_slot_number(struct pci_slot *slot, int slot_nr) +{ + int name_count = 0; + struct pci_slot *tmp; + + down_write(&pci_bus_sem); + + list_for_each_entry(tmp, &slot->bus->slots, list) { + WARN_ON(tmp->number == slot_nr); + if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj))) + name_count++; + } + + if (name_count > 1) + printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj)); + + slot->number = slot_nr; + up_write(&pci_bus_sem); +} +EXPORT_SYMBOL_GPL(pci_update_slot_number); + +/** + * pci_destroy_slot - decrement refcount for physical PCI slot + * @slot: struct pci_slot to decrement + * + * %struct pci_slot is refcounted, so destroying them is really easy; we + * just call kobject_put on its kobj and let our release methods do the + * rest. + */ + +void pci_destroy_slot(struct pci_slot *slot) +{ + pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, + atomic_read(&slot->kobj.kref.refcount) - 1, + pci_domain_nr(slot->bus), slot->bus->number, slot->number); + + down_write(&pci_bus_sem); + kobject_put(&slot->kobj); + up_write(&pci_bus_sem); +} +EXPORT_SYMBOL_GPL(pci_destroy_slot); + +static int pci_slot_init(void) +{ + struct kset *pci_bus_kset; + + pci_bus_kset = bus_get_kset(&pci_bus_type); + pci_slots_kset = kset_create_and_add("slots", NULL, + &pci_bus_kset->kobj); + if (!pci_slots_kset) { + printk(KERN_ERR "PCI: Slot initialization failure\n"); + return -ENOMEM; + } + return 0; +} + +subsys_initcall(pci_slot_init); diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 9fcff0c33619..65129b54eb09 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -1490,7 +1490,7 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff))) reserved++; } - if ((count) || (reserved > 5) || + if ((count == MAX_TUPLES) || (reserved > 5) || ((!dev_ok || !ident_ok) && (count > 10))) count = 0; diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c index 52d0aa8c2e7a..c21f9a9c3e3f 100644 --- a/drivers/pcmcia/electra_cf.c +++ b/drivers/pcmcia/electra_cf.c @@ -29,9 +29,9 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> +#include <linux/of_platform.h> #include <pcmcia/ss.h> -#include <asm/of_platform.h> static const char driver_name[] = "electra-cf"; diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 13a5fbd50a07..ff66604e90d4 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -49,6 +49,8 @@ #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/bitops.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> #include <asm/io.h> #include <asm/system.h> @@ -57,8 +59,6 @@ #include <asm/8xx_immap.h> #include <asm/irq.h> #include <asm/fs_pd.h> -#include <asm/of_device.h> -#include <asm/of_platform.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index 886dac823ed6..e3fa9a2d9a3d 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h @@ -1,3 +1,8 @@ +/* + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> + */ + extern spinlock_t pnp_lock; void *pnp_alloc(long size); @@ -19,22 +24,118 @@ void pnp_remove_card(struct pnp_card *card); int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev); void pnp_remove_card_device(struct pnp_dev *dev); -struct pnp_option *pnp_build_option(int priority); -struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev); -struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, - int priority); -int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_irq *data); -int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_dma *data); -int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_port *data); -int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_mem *data); +struct pnp_port { + resource_size_t min; /* min base number */ + resource_size_t max; /* max base number */ + resource_size_t align; /* align boundary */ + resource_size_t size; /* size of range */ + unsigned char flags; /* port flags */ +}; + +#define PNP_IRQ_NR 256 +typedef struct { DECLARE_BITMAP(bits, PNP_IRQ_NR); } pnp_irq_mask_t; + +struct pnp_irq { + pnp_irq_mask_t map; /* bitmap for IRQ lines */ + unsigned char flags; /* IRQ flags */ +}; + +struct pnp_dma { + unsigned char map; /* bitmask for DMA channels */ + unsigned char flags; /* DMA flags */ +}; + +struct pnp_mem { + resource_size_t min; /* min base number */ + resource_size_t max; /* max base number */ + resource_size_t align; /* align boundary */ + resource_size_t size; /* size of range */ + unsigned char flags; /* memory flags */ +}; + +#define PNP_OPTION_DEPENDENT 0x80000000 +#define PNP_OPTION_SET_MASK 0xffff +#define PNP_OPTION_SET_SHIFT 12 +#define PNP_OPTION_PRIORITY_MASK 0xfff +#define PNP_OPTION_PRIORITY_SHIFT 0 + +#define PNP_RES_PRIORITY_PREFERRED 0 +#define PNP_RES_PRIORITY_ACCEPTABLE 1 +#define PNP_RES_PRIORITY_FUNCTIONAL 2 +#define PNP_RES_PRIORITY_INVALID PNP_OPTION_PRIORITY_MASK + +struct pnp_option { + struct list_head list; + unsigned int flags; /* independent/dependent, set, priority */ + + unsigned long type; /* IORESOURCE_{IO,MEM,IRQ,DMA} */ + union { + struct pnp_port port; + struct pnp_irq irq; + struct pnp_dma dma; + struct pnp_mem mem; + } u; +}; + +int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags, + pnp_irq_mask_t *map, unsigned char flags); +int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags, + unsigned char map, unsigned char flags); +int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags); +int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags); + +static inline int pnp_option_is_dependent(struct pnp_option *option) +{ + return option->flags & PNP_OPTION_DEPENDENT ? 1 : 0; +} + +static inline unsigned int pnp_option_set(struct pnp_option *option) +{ + return (option->flags >> PNP_OPTION_SET_SHIFT) & PNP_OPTION_SET_MASK; +} + +static inline unsigned int pnp_option_priority(struct pnp_option *option) +{ + return (option->flags >> PNP_OPTION_PRIORITY_SHIFT) & + PNP_OPTION_PRIORITY_MASK; +} + +static inline unsigned int pnp_new_dependent_set(struct pnp_dev *dev, + int priority) +{ + unsigned int flags; + + if (priority > PNP_RES_PRIORITY_FUNCTIONAL) { + dev_warn(&dev->dev, "invalid dependent option priority %d " + "clipped to %d", priority, + PNP_RES_PRIORITY_INVALID); + priority = PNP_RES_PRIORITY_INVALID; + } + + flags = PNP_OPTION_DEPENDENT | + ((dev->num_dependent_sets & PNP_OPTION_SET_MASK) << + PNP_OPTION_SET_SHIFT) | + ((priority & PNP_OPTION_PRIORITY_MASK) << + PNP_OPTION_PRIORITY_SHIFT); + + dev->num_dependent_sets++; + + return flags; +} + +char *pnp_option_priority_name(struct pnp_option *option); +void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option); + void pnp_init_resources(struct pnp_dev *dev); void pnp_fixup_device(struct pnp_dev *dev); -void pnp_free_option(struct pnp_option *option); +void pnp_free_options(struct pnp_dev *dev); int __pnp_add_device(struct pnp_dev *dev); void __pnp_remove_device(struct pnp_dev *dev); @@ -43,29 +144,18 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res); int pnp_check_irq(struct pnp_dev *dev, struct resource *res); int pnp_check_dma(struct pnp_dev *dev, struct resource *res); +char *pnp_resource_type_name(struct resource *res); void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc); -void pnp_init_resource(struct resource *res); - -struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev, - unsigned int type, unsigned int num); - -#define PNP_MAX_PORT 40 -#define PNP_MAX_MEM 24 -#define PNP_MAX_IRQ 2 -#define PNP_MAX_DMA 2 +void pnp_free_resources(struct pnp_dev *dev); +int pnp_resource_type(struct resource *res); struct pnp_resource { + struct list_head list; struct resource res; - unsigned int index; /* ISAPNP config register index */ }; -struct pnp_resource_table { - struct pnp_resource port[PNP_MAX_PORT]; - struct pnp_resource mem[PNP_MAX_MEM]; - struct pnp_resource dma[PNP_MAX_DMA]; - struct pnp_resource irq[PNP_MAX_IRQ]; -}; +void pnp_free_resource(struct pnp_resource *pnp_res); struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, int flags); diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index 20771b7d4482..a411582bcd72 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -99,14 +99,28 @@ static void pnp_free_ids(struct pnp_dev *dev) } } +void pnp_free_resource(struct pnp_resource *pnp_res) +{ + list_del(&pnp_res->list); + kfree(pnp_res); +} + +void pnp_free_resources(struct pnp_dev *dev) +{ + struct pnp_resource *pnp_res, *tmp; + + list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) { + pnp_free_resource(pnp_res); + } +} + static void pnp_release_device(struct device *dmdev) { struct pnp_dev *dev = to_pnp_dev(dmdev); - pnp_free_option(dev->independent); - pnp_free_option(dev->dependent); pnp_free_ids(dev); - kfree(dev->res); + pnp_free_resources(dev); + pnp_free_options(dev); kfree(dev); } @@ -119,12 +133,8 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid if (!dev) return NULL; - dev->res = kzalloc(sizeof(struct pnp_resource_table), GFP_KERNEL); - if (!dev->res) { - kfree(dev); - return NULL; - } - + INIT_LIST_HEAD(&dev->resources); + INIT_LIST_HEAD(&dev->options); dev->protocol = protocol; dev->number = id; dev->dma_mask = DMA_24BIT_MASK; @@ -140,7 +150,6 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid dev_id = pnp_add_id(dev, pnpid); if (!dev_id) { - kfree(dev->res); kfree(dev); return NULL; } diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c index 5695a79f3a52..a876ecf7028c 100644 --- a/drivers/pnp/interface.c +++ b/drivers/pnp/interface.c @@ -3,6 +3,8 @@ * * Some code, especially possible resource dumping is based on isapnp_proc.c (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2002 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/pnp.h> @@ -53,11 +55,13 @@ static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt, ...) static void pnp_print_port(pnp_info_buffer_t * buffer, char *space, struct pnp_port *port) { - pnp_printf(buffer, - "%sport 0x%x-0x%x, align 0x%x, size 0x%x, %i-bit address decoding\n", - space, port->min, port->max, - port->align ? (port->align - 1) : 0, port->size, - port->flags & PNP_PORT_FLAG_16BITADDR ? 16 : 10); + pnp_printf(buffer, "%sport %#llx-%#llx, align %#llx, size %#llx, " + "%i-bit address decoding\n", space, + (unsigned long long) port->min, + (unsigned long long) port->max, + port->align ? ((unsigned long long) port->align - 1) : 0, + (unsigned long long) port->size, + port->flags & IORESOURCE_IO_16BIT_ADDR ? 16 : 10); } static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, @@ -67,7 +71,7 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, pnp_printf(buffer, "%sirq ", space); for (i = 0; i < PNP_IRQ_NR; i++) - if (test_bit(i, irq->map)) { + if (test_bit(i, irq->map.bits)) { if (!first) { pnp_printf(buffer, ","); } else { @@ -78,7 +82,7 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, else pnp_printf(buffer, "%i", i); } - if (bitmap_empty(irq->map, PNP_IRQ_NR)) + if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) pnp_printf(buffer, "<none>"); if (irq->flags & IORESOURCE_IRQ_HIGHEDGE) pnp_printf(buffer, " High-Edge"); @@ -88,6 +92,8 @@ static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, pnp_printf(buffer, " High-Level"); if (irq->flags & IORESOURCE_IRQ_LOWLEVEL) pnp_printf(buffer, " Low-Level"); + if (irq->flags & IORESOURCE_IRQ_OPTIONAL) + pnp_printf(buffer, " (optional)"); pnp_printf(buffer, "\n"); } @@ -148,8 +154,11 @@ static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space, { char *s; - pnp_printf(buffer, "%sMemory 0x%x-0x%x, align 0x%x, size 0x%x", - space, mem->min, mem->max, mem->align, mem->size); + pnp_printf(buffer, "%sMemory %#llx-%#llx, align %#llx, size %#llx", + space, (unsigned long long) mem->min, + (unsigned long long) mem->max, + (unsigned long long) mem->align, + (unsigned long long) mem->size); if (mem->flags & IORESOURCE_MEM_WRITEABLE) pnp_printf(buffer, ", writeable"); if (mem->flags & IORESOURCE_MEM_CACHEABLE) @@ -177,65 +186,58 @@ static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space, } static void pnp_print_option(pnp_info_buffer_t * buffer, char *space, - struct pnp_option *option, int dep) + struct pnp_option *option) { - char *s; - struct pnp_port *port; - struct pnp_irq *irq; - struct pnp_dma *dma; - struct pnp_mem *mem; - - if (dep) { - switch (option->priority) { - case PNP_RES_PRIORITY_PREFERRED: - s = "preferred"; - break; - case PNP_RES_PRIORITY_ACCEPTABLE: - s = "acceptable"; - break; - case PNP_RES_PRIORITY_FUNCTIONAL: - s = "functional"; - break; - default: - s = "invalid"; - } - pnp_printf(buffer, "Dependent: %02i - Priority %s\n", dep, s); + switch (option->type) { + case IORESOURCE_IO: + pnp_print_port(buffer, space, &option->u.port); + break; + case IORESOURCE_MEM: + pnp_print_mem(buffer, space, &option->u.mem); + break; + case IORESOURCE_IRQ: + pnp_print_irq(buffer, space, &option->u.irq); + break; + case IORESOURCE_DMA: + pnp_print_dma(buffer, space, &option->u.dma); + break; } - - for (port = option->port; port; port = port->next) - pnp_print_port(buffer, space, port); - for (irq = option->irq; irq; irq = irq->next) - pnp_print_irq(buffer, space, irq); - for (dma = option->dma; dma; dma = dma->next) - pnp_print_dma(buffer, space, dma); - for (mem = option->mem; mem; mem = mem->next) - pnp_print_mem(buffer, space, mem); } static ssize_t pnp_show_options(struct device *dmdev, struct device_attribute *attr, char *buf) { struct pnp_dev *dev = to_pnp_dev(dmdev); - struct pnp_option *independent = dev->independent; - struct pnp_option *dependent = dev->dependent; - int ret, dep = 1; + pnp_info_buffer_t *buffer; + struct pnp_option *option; + int ret, dep = 0, set = 0; + char *indent; - pnp_info_buffer_t *buffer = (pnp_info_buffer_t *) - pnp_alloc(sizeof(pnp_info_buffer_t)); + buffer = pnp_alloc(sizeof(pnp_info_buffer_t)); if (!buffer) return -ENOMEM; buffer->len = PAGE_SIZE; buffer->buffer = buf; buffer->curr = buffer->buffer; - if (independent) - pnp_print_option(buffer, "", independent, 0); - while (dependent) { - pnp_print_option(buffer, " ", dependent, dep); - dependent = dependent->next; - dep++; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option)) { + indent = " "; + if (!dep || pnp_option_set(option) != set) { + set = pnp_option_set(option); + dep = 1; + pnp_printf(buffer, "Dependent: %02i - " + "Priority %s\n", set, + pnp_option_priority_name(option)); + } + } else { + dep = 0; + indent = ""; + } + pnp_print_option(buffer, indent, option); } + ret = (buffer->curr - buf); kfree(buffer); return ret; @@ -248,79 +250,59 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, char *buf) { struct pnp_dev *dev = to_pnp_dev(dmdev); - struct resource *res; - int i, ret; pnp_info_buffer_t *buffer; + struct pnp_resource *pnp_res; + struct resource *res; + int ret; if (!dev) return -EINVAL; - buffer = (pnp_info_buffer_t *) pnp_alloc(sizeof(pnp_info_buffer_t)); + buffer = pnp_alloc(sizeof(pnp_info_buffer_t)); if (!buffer) return -ENOMEM; + buffer->len = PAGE_SIZE; buffer->buffer = buf; buffer->curr = buffer->buffer; - pnp_printf(buffer, "state = "); - if (dev->active) - pnp_printf(buffer, "active\n"); - else - pnp_printf(buffer, "disabled\n"); - - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "io"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " 0x%llx-0x%llx\n", - (unsigned long long) res->start, - (unsigned long long) res->end); - } - } - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "mem"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " 0x%llx-0x%llx\n", - (unsigned long long) res->start, - (unsigned long long) res->end); - } - } - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "irq"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " %lld\n", - (unsigned long long) res->start); + pnp_printf(buffer, "state = %s\n", dev->active ? "active" : "disabled"); + + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + + pnp_printf(buffer, pnp_resource_type_name(res)); + + if (res->flags & IORESOURCE_DISABLED) { + pnp_printf(buffer, " disabled\n"); + continue; } - } - for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) { - if (pnp_resource_valid(res)) { - pnp_printf(buffer, "dma"); - if (res->flags & IORESOURCE_DISABLED) - pnp_printf(buffer, " disabled\n"); - else - pnp_printf(buffer, " %lld\n", - (unsigned long long) res->start); + + switch (pnp_resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: + pnp_printf(buffer, " %#llx-%#llx\n", + (unsigned long long) res->start, + (unsigned long long) res->end); + break; + case IORESOURCE_IRQ: + case IORESOURCE_DMA: + pnp_printf(buffer, " %lld\n", + (unsigned long long) res->start); + break; } } + ret = (buffer->curr - buf); kfree(buffer); return ret; } -static ssize_t -pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, - const char *ubuf, size_t count) +static ssize_t pnp_set_current_resources(struct device *dmdev, + struct device_attribute *attr, + const char *ubuf, size_t count) { struct pnp_dev *dev = to_pnp_dev(dmdev); - struct pnp_resource *pnp_res; char *buf = (void *)ubuf; int retval = 0; resource_size_t start, end; @@ -368,7 +350,6 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, goto done; } if (!strnicmp(buf, "set", 3)) { - int nport = 0, nmem = 0, nirq = 0, ndma = 0; if (dev->active) goto done; buf += 3; @@ -391,10 +372,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, end = simple_strtoul(buf, &buf, 0); } else end = start; - pnp_res = pnp_add_io_resource(dev, start, end, - 0); - if (pnp_res) - pnp_res->index = nport++; + pnp_add_io_resource(dev, start, end, 0); continue; } if (!strnicmp(buf, "mem", 3)) { @@ -411,10 +389,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, end = simple_strtoul(buf, &buf, 0); } else end = start; - pnp_res = pnp_add_mem_resource(dev, start, end, - 0); - if (pnp_res) - pnp_res->index = nmem++; + pnp_add_mem_resource(dev, start, end, 0); continue; } if (!strnicmp(buf, "irq", 3)) { @@ -422,9 +397,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, while (isspace(*buf)) ++buf; start = simple_strtoul(buf, &buf, 0); - pnp_res = pnp_add_irq_resource(dev, start, 0); - if (pnp_res) - pnp_res->index = nirq++; + pnp_add_irq_resource(dev, start, 0); continue; } if (!strnicmp(buf, "dma", 3)) { @@ -432,9 +405,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr, while (isspace(*buf)) ++buf; start = simple_strtoul(buf, &buf, 0); - pnp_res = pnp_add_dma_resource(dev, start, 0); - if (pnp_res) - pnp_res->index = ndma++; + pnp_add_dma_resource(dev, start, 0); continue; } break; diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c index f1bccdbdeb08..101a835e8759 100644 --- a/drivers/pnp/isapnp/core.c +++ b/drivers/pnp/isapnp/core.c @@ -429,154 +429,135 @@ static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card, * Add IRQ resource to resources list. */ static void __init isapnp_parse_irq_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[3]; - struct pnp_irq *irq; unsigned long bits; + pnp_irq_mask_t map; + unsigned char flags = IORESOURCE_IRQ_HIGHEDGE; isapnp_peek(tmp, size); - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; bits = (tmp[1] << 8) | tmp[0]; - bitmap_copy(irq->map, &bits, 16); + + bitmap_zero(map.bits, PNP_IRQ_NR); + bitmap_copy(map.bits, &bits, 16); + if (size > 2) - irq->flags = tmp[2]; - else - irq->flags = IORESOURCE_IRQ_HIGHEDGE; - pnp_register_irq_resource(dev, option, irq); + flags = tmp[2]; + + pnp_register_irq_resource(dev, option_flags, &map, flags); } /* * Add DMA resource to resources list. */ static void __init isapnp_parse_dma_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[2]; - struct pnp_dma *dma; isapnp_peek(tmp, size); - dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL); - if (!dma) - return; - dma->map = tmp[0]; - dma->flags = tmp[1]; - pnp_register_dma_resource(dev, option, dma); + pnp_register_dma_resource(dev, option_flags, tmp[0], tmp[1]); } /* * Add port resource to resources list. */ static void __init isapnp_parse_port_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[7]; - struct pnp_port *port; + resource_size_t min, max, align, len; + unsigned char flags; isapnp_peek(tmp, size); - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = (tmp[2] << 8) | tmp[1]; - port->max = (tmp[4] << 8) | tmp[3]; - port->align = tmp[5]; - port->size = tmp[6]; - port->flags = tmp[0] ? PNP_PORT_FLAG_16BITADDR : 0; - pnp_register_port_resource(dev, option, port); + min = (tmp[2] << 8) | tmp[1]; + max = (tmp[4] << 8) | tmp[3]; + align = tmp[5]; + len = tmp[6]; + flags = tmp[0] ? IORESOURCE_IO_16BIT_ADDR : 0; + pnp_register_port_resource(dev, option_flags, + min, max, align, len, flags); } /* * Add fixed port resource to resources list. */ static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[3]; - struct pnp_port *port; + resource_size_t base, len; isapnp_peek(tmp, size); - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = (tmp[1] << 8) | tmp[0]; - port->size = tmp[2]; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); + base = (tmp[1] << 8) | tmp[0]; + len = tmp[2]; + pnp_register_port_resource(dev, option_flags, base, base, 0, len, + IORESOURCE_IO_FIXED); } /* * Add memory resource to resources list. */ static void __init isapnp_parse_mem_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[9]; - struct pnp_mem *mem; + resource_size_t min, max, align, len; + unsigned char flags; isapnp_peek(tmp, size); - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = ((tmp[2] << 8) | tmp[1]) << 8; - mem->max = ((tmp[4] << 8) | tmp[3]) << 8; - mem->align = (tmp[6] << 8) | tmp[5]; - mem->size = ((tmp[8] << 8) | tmp[7]) << 8; - mem->flags = tmp[0]; - pnp_register_mem_resource(dev, option, mem); + min = ((tmp[2] << 8) | tmp[1]) << 8; + max = ((tmp[4] << 8) | tmp[3]) << 8; + align = (tmp[6] << 8) | tmp[5]; + len = ((tmp[8] << 8) | tmp[7]) << 8; + flags = tmp[0]; + pnp_register_mem_resource(dev, option_flags, + min, max, align, len, flags); } /* * Add 32-bit memory resource to resources list. */ static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[17]; - struct pnp_mem *mem; + resource_size_t min, max, align, len; + unsigned char flags; isapnp_peek(tmp, size); - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; - mem->max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; - mem->align = - (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9]; - mem->size = - (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13]; - mem->flags = tmp[0]; - pnp_register_mem_resource(dev, option, mem); + min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; + max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; + align = (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9]; + len = (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13]; + flags = tmp[0]; + pnp_register_mem_resource(dev, option_flags, + min, max, align, len, flags); } /* * Add 32-bit fixed memory resource to resources list. */ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, int size) { unsigned char tmp[9]; - struct pnp_mem *mem; + resource_size_t base, len; + unsigned char flags; isapnp_peek(tmp, size); - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = - (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; - mem->size = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; - mem->align = 0; - mem->flags = tmp[0]; - pnp_register_mem_resource(dev, option, mem); + base = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; + len = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; + flags = tmp[0]; + pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags); } /* @@ -604,20 +585,16 @@ isapnp_parse_name(char *name, unsigned int name_max, unsigned short *size) static int __init isapnp_create_device(struct pnp_card *card, unsigned short size) { - int number = 0, skip = 0, priority = 0, compat = 0; + int number = 0, skip = 0, priority, compat = 0; unsigned char type, tmp[17]; - struct pnp_option *option; + unsigned int option_flags; struct pnp_dev *dev; u32 eisa_id; char id[8]; if ((dev = isapnp_parse_device(card, size, number++)) == NULL) return 1; - option = pnp_register_independent_option(dev); - if (!option) { - kfree(dev); - return 1; - } + option_flags = 0; pnp_add_card_device(card, dev); while (1) { @@ -634,16 +611,11 @@ static int __init isapnp_create_device(struct pnp_card *card, return 1; size = 0; skip = 0; - option = pnp_register_independent_option(dev); - if (!option) { - kfree(dev); - return 1; - } + option_flags = 0; pnp_add_card_device(card, dev); } else { skip = 1; } - priority = 0; compat = 0; break; case _STAG_COMPATDEVID: @@ -660,44 +632,42 @@ static int __init isapnp_create_device(struct pnp_card *card, case _STAG_IRQ: if (size < 2 || size > 3) goto __skip; - isapnp_parse_irq_resource(dev, option, size); + isapnp_parse_irq_resource(dev, option_flags, size); size = 0; break; case _STAG_DMA: if (size != 2) goto __skip; - isapnp_parse_dma_resource(dev, option, size); + isapnp_parse_dma_resource(dev, option_flags, size); size = 0; break; case _STAG_STARTDEP: if (size > 1) goto __skip; - priority = 0x100 | PNP_RES_PRIORITY_ACCEPTABLE; + priority = PNP_RES_PRIORITY_ACCEPTABLE; if (size > 0) { isapnp_peek(tmp, size); - priority = 0x100 | tmp[0]; + priority = tmp[0]; size = 0; } - option = pnp_register_dependent_option(dev, priority); - if (!option) - return 1; + option_flags = pnp_new_dependent_set(dev, priority); break; case _STAG_ENDDEP: if (size != 0) goto __skip; - priority = 0; - dev_dbg(&dev->dev, "end dependent options\n"); + option_flags = 0; break; case _STAG_IOPORT: if (size != 7) goto __skip; - isapnp_parse_port_resource(dev, option, size); + isapnp_parse_port_resource(dev, option_flags, size); size = 0; break; case _STAG_FIXEDIO: if (size != 3) goto __skip; - isapnp_parse_fixed_port_resource(dev, option, size); + isapnp_parse_fixed_port_resource(dev, option_flags, + size); size = 0; break; case _STAG_VENDOR: @@ -705,7 +675,7 @@ static int __init isapnp_create_device(struct pnp_card *card, case _LTAG_MEMRANGE: if (size != 9) goto __skip; - isapnp_parse_mem_resource(dev, option, size); + isapnp_parse_mem_resource(dev, option_flags, size); size = 0; break; case _LTAG_ANSISTR: @@ -720,13 +690,14 @@ static int __init isapnp_create_device(struct pnp_card *card, case _LTAG_MEM32RANGE: if (size != 17) goto __skip; - isapnp_parse_mem32_resource(dev, option, size); + isapnp_parse_mem32_resource(dev, option_flags, size); size = 0; break; case _LTAG_FIXEDMEM32RANGE: if (size != 9) goto __skip; - isapnp_parse_fixed_mem32_resource(dev, option, size); + isapnp_parse_fixed_mem32_resource(dev, option_flags, + size); size = 0; break; case _STAG_END: @@ -928,7 +899,6 @@ EXPORT_SYMBOL(isapnp_write_byte); static int isapnp_get_resources(struct pnp_dev *dev) { - struct pnp_resource *pnp_res; int i, ret; dev_dbg(&dev->dev, "get resources\n"); @@ -940,35 +910,23 @@ static int isapnp_get_resources(struct pnp_dev *dev) for (i = 0; i < ISAPNP_MAX_PORT; i++) { ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1)); - if (ret) { - pnp_res = pnp_add_io_resource(dev, ret, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_io_resource(dev, ret, ret, + ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_MEM; i++) { ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8; - if (ret) { - pnp_res = pnp_add_mem_resource(dev, ret, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_mem_resource(dev, ret, ret, + ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_IRQ; i++) { ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8; - if (ret) { - pnp_res = pnp_add_irq_resource(dev, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_irq_resource(dev, ret, + ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_DMA; i++) { ret = isapnp_read_byte(ISAPNP_CFG_DMA + i); - if (ret != 4) { - pnp_res = pnp_add_dma_resource(dev, ret, 0); - if (pnp_res) - pnp_res->index = i; - } + pnp_add_dma_resource(dev, ret, + ret == 4 ? IORESOURCE_DISABLED : 0); } __end: @@ -978,62 +936,45 @@ __end: static int isapnp_set_resources(struct pnp_dev *dev) { - struct pnp_resource *pnp_res; struct resource *res; - int tmp, index; + int tmp; dev_dbg(&dev->dev, "set resources\n"); isapnp_cfg_begin(dev->card->number, dev->number); dev->active = 1; for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { - index = pnp_res->index; + res = pnp_get_resource(dev, IORESOURCE_IO, tmp); + if (pnp_resource_enabled(res)) { dev_dbg(&dev->dev, " set io %d to %#llx\n", - index, (unsigned long long) res->start); - isapnp_write_word(ISAPNP_CFG_PORT + (index << 1), + tmp, (unsigned long long) res->start); + isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1), res->start); } } for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { + res = pnp_get_resource(dev, IORESOURCE_IRQ, tmp); + if (pnp_resource_enabled(res)) { int irq = res->start; if (irq == 2) irq = 9; - index = pnp_res->index; - dev_dbg(&dev->dev, " set irq %d to %d\n", index, irq); - isapnp_write_byte(ISAPNP_CFG_IRQ + (index << 1), irq); + dev_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq); + isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq); } } for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { - index = pnp_res->index; + res = pnp_get_resource(dev, IORESOURCE_DMA, tmp); + if (pnp_resource_enabled(res)) { dev_dbg(&dev->dev, " set dma %d to %lld\n", - index, (unsigned long long) res->start); - isapnp_write_byte(ISAPNP_CFG_DMA + index, res->start); + tmp, (unsigned long long) res->start); + isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start); } } for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, tmp); - if (!pnp_res) - continue; - res = &pnp_res->res; - if (pnp_resource_valid(res)) { - index = pnp_res->index; + res = pnp_get_resource(dev, IORESOURCE_MEM, tmp); + if (pnp_resource_enabled(res)) { dev_dbg(&dev->dev, " set mem %d to %#llx\n", - index, (unsigned long long) res->start); - isapnp_write_word(ISAPNP_CFG_MEM + (index << 3), + tmp, (unsigned long long) res->start); + isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3), (res->start >> 8) & 0xffff); } } diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c index bea0914ff947..b526eaad3f6c 100644 --- a/drivers/pnp/manager.c +++ b/drivers/pnp/manager.c @@ -3,6 +3,8 @@ * * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2003 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/errno.h> @@ -19,82 +21,64 @@ DEFINE_MUTEX(pnp_res_mutex); static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; - - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many I/O port resources\n"); - /* pretend we were successful so at least the manager won't try again */ - return 1; - } - - res = &pnp_res->res; + struct resource *res, local_res; - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_IO, idx); + if (res) { dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx " "flags %#lx\n", idx, (unsigned long long) res->start, (unsigned long long) res->end, res->flags); - return 1; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_IO; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = 0; + res->end = 0; if (!rule->size) { res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " io %d disabled\n", idx); - return 1; /* skip disabled resource requests */ + goto __add; } res->start = rule->min; res->end = res->start + rule->size - 1; - /* run through until pnp_check_port is happy */ while (!pnp_check_port(dev, res)) { res->start += rule->align; res->end = res->start + rule->size - 1; if (res->start > rule->max || !rule->align) { - dev_dbg(&dev->dev, " couldn't assign io %d\n", idx); - return 0; + dev_dbg(&dev->dev, " couldn't assign io %d " + "(min %#llx max %#llx)\n", idx, + (unsigned long long) rule->min, + (unsigned long long) rule->max); + return -EBUSY; } } - dev_dbg(&dev->dev, " assign io %d %#llx-%#llx\n", idx, - (unsigned long long) res->start, (unsigned long long) res->end); - return 1; + +__add: + pnp_add_io_resource(dev, res->start, res->end, res->flags); + return 0; } static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; - - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many memory resources\n"); - /* pretend we were successful so at least the manager won't try again */ - return 1; - } + struct resource *res, local_res; - res = &pnp_res->res; - - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_MEM, idx); + if (res) { dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx " "flags %#lx\n", idx, (unsigned long long) res->start, (unsigned long long) res->end, res->flags); - return 1; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_MEM; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = 0; + res->end = 0; - /* convert pnp flags to standard Linux flags */ if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) res->flags |= IORESOURCE_READONLY; if (rule->flags & IORESOURCE_MEM_CACHEABLE) @@ -107,30 +91,32 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) if (!rule->size) { res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " mem %d disabled\n", idx); - return 1; /* skip disabled resource requests */ + goto __add; } res->start = rule->min; res->end = res->start + rule->size - 1; - /* run through until pnp_check_mem is happy */ while (!pnp_check_mem(dev, res)) { res->start += rule->align; res->end = res->start + rule->size - 1; if (res->start > rule->max || !rule->align) { - dev_dbg(&dev->dev, " couldn't assign mem %d\n", idx); - return 0; + dev_dbg(&dev->dev, " couldn't assign mem %d " + "(min %#llx max %#llx)\n", idx, + (unsigned long long) rule->min, + (unsigned long long) rule->max); + return -EBUSY; } } - dev_dbg(&dev->dev, " assign mem %d %#llx-%#llx\n", idx, - (unsigned long long) res->start, (unsigned long long) res->end); - return 1; + +__add: + pnp_add_mem_resource(dev, res->start, res->end, res->flags); + return 0; } static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; + struct resource *res, local_res; int i; /* IRQ priority: this table is good for i386 */ @@ -138,59 +124,57 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2 }; - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many IRQ resources\n"); - /* pretend we were successful so at least the manager won't try again */ - return 1; - } - - res = &pnp_res->res; - - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_IRQ, idx); + if (res) { dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n", idx, (int) res->start, res->flags); - return 1; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_IRQ; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = -1; + res->end = -1; - if (bitmap_empty(rule->map, PNP_IRQ_NR)) { + if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) { res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " irq %d disabled\n", idx); - return 1; /* skip disabled resource requests */ + goto __add; } /* TBD: need check for >16 IRQ */ - res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16); + res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16); if (res->start < PNP_IRQ_NR) { res->end = res->start; - dev_dbg(&dev->dev, " assign irq %d %d\n", idx, - (int) res->start); - return 1; + goto __add; } for (i = 0; i < 16; i++) { - if (test_bit(xtab[i], rule->map)) { + if (test_bit(xtab[i], rule->map.bits)) { res->start = res->end = xtab[i]; - if (pnp_check_irq(dev, res)) { - dev_dbg(&dev->dev, " assign irq %d %d\n", idx, - (int) res->start); - return 1; - } + if (pnp_check_irq(dev, res)) + goto __add; } } + + if (rule->flags & IORESOURCE_IRQ_OPTIONAL) { + res->start = -1; + res->end = -1; + res->flags |= IORESOURCE_DISABLED; + dev_dbg(&dev->dev, " irq %d disabled (optional)\n", idx); + goto __add; + } + dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx); + return -EBUSY; + +__add: + pnp_add_irq_resource(dev, res->start, res->flags); return 0; } -static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) +static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) { - struct pnp_resource *pnp_res; - struct resource *res; + struct resource *res, local_res; int i; /* DMA priority: this table is good for i386 */ @@ -198,231 +182,99 @@ static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) 1, 3, 5, 6, 7, 0, 2, 4 }; - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, idx); - if (!pnp_res) { - dev_err(&dev->dev, "too many DMA resources\n"); - return; - } - - res = &pnp_res->res; - - /* check if this resource has been manually set, if so skip */ - if (!(res->flags & IORESOURCE_AUTO)) { + res = pnp_get_resource(dev, IORESOURCE_DMA, idx); + if (res) { dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n", idx, (int) res->start, res->flags); - return; + return 0; } - /* set the initial values */ - pnp_res->index = idx; - res->flags |= rule->flags | IORESOURCE_DMA; - res->flags &= ~IORESOURCE_UNSET; + res = &local_res; + res->flags = rule->flags | IORESOURCE_AUTO; + res->start = -1; + res->end = -1; for (i = 0; i < 8; i++) { if (rule->map & (1 << xtab[i])) { res->start = res->end = xtab[i]; - if (pnp_check_dma(dev, res)) { - dev_dbg(&dev->dev, " assign dma %d %d\n", idx, - (int) res->start); - return; - } + if (pnp_check_dma(dev, res)) + goto __add; } } #ifdef MAX_DMA_CHANNELS res->start = res->end = MAX_DMA_CHANNELS; #endif - res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; + res->flags |= IORESOURCE_DISABLED; dev_dbg(&dev->dev, " disable dma %d\n", idx); -} - -void pnp_init_resource(struct resource *res) -{ - unsigned long type; - - type = res->flags & (IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_IRQ | IORESOURCE_DMA); - res->name = NULL; - res->flags = type | IORESOURCE_AUTO | IORESOURCE_UNSET; - if (type == IORESOURCE_IRQ || type == IORESOURCE_DMA) { - res->start = -1; - res->end = -1; - } else { - res->start = 0; - res->end = 0; - } +__add: + pnp_add_dma_resource(dev, res->start, res->flags); + return 0; } -/** - * pnp_init_resources - Resets a resource table to default values. - * @table: pointer to the desired resource table - */ void pnp_init_resources(struct pnp_dev *dev) { - struct resource *res; - int idx; - - for (idx = 0; idx < PNP_MAX_IRQ; idx++) { - res = &dev->res->irq[idx].res; - res->flags = IORESOURCE_IRQ; - pnp_init_resource(res); - } - for (idx = 0; idx < PNP_MAX_DMA; idx++) { - res = &dev->res->dma[idx].res; - res->flags = IORESOURCE_DMA; - pnp_init_resource(res); - } - for (idx = 0; idx < PNP_MAX_PORT; idx++) { - res = &dev->res->port[idx].res; - res->flags = IORESOURCE_IO; - pnp_init_resource(res); - } - for (idx = 0; idx < PNP_MAX_MEM; idx++) { - res = &dev->res->mem[idx].res; - res->flags = IORESOURCE_MEM; - pnp_init_resource(res); - } + pnp_free_resources(dev); } -/** - * pnp_clean_resources - clears resources that were not manually set - * @res: the resources to clean - */ static void pnp_clean_resource_table(struct pnp_dev *dev) { - struct resource *res; - int idx; - - for (idx = 0; idx < PNP_MAX_IRQ; idx++) { - res = &dev->res->irq[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_IRQ; - pnp_init_resource(res); - } - } - for (idx = 0; idx < PNP_MAX_DMA; idx++) { - res = &dev->res->dma[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_DMA; - pnp_init_resource(res); - } - } - for (idx = 0; idx < PNP_MAX_PORT; idx++) { - res = &dev->res->port[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_IO; - pnp_init_resource(res); - } - } - for (idx = 0; idx < PNP_MAX_MEM; idx++) { - res = &dev->res->mem[idx].res; - if (res->flags & IORESOURCE_AUTO) { - res->flags = IORESOURCE_MEM; - pnp_init_resource(res); - } + struct pnp_resource *pnp_res, *tmp; + + list_for_each_entry_safe(pnp_res, tmp, &dev->resources, list) { + if (pnp_res->res.flags & IORESOURCE_AUTO) + pnp_free_resource(pnp_res); } } /** * pnp_assign_resources - assigns resources to the device based on the specified dependent number * @dev: pointer to the desired device - * @depnum: the dependent function number - * - * Only set depnum to 0 if the device does not have dependent options. + * @set: the dependent function number */ -static int pnp_assign_resources(struct pnp_dev *dev, int depnum) +static int pnp_assign_resources(struct pnp_dev *dev, int set) { - struct pnp_port *port; - struct pnp_mem *mem; - struct pnp_irq *irq; - struct pnp_dma *dma; + struct pnp_option *option; int nport = 0, nmem = 0, nirq = 0, ndma = 0; + int ret = 0; - if (!pnp_can_configure(dev)) - return -ENODEV; - - dbg_pnp_show_resources(dev, "before pnp_assign_resources"); + dev_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set); mutex_lock(&pnp_res_mutex); pnp_clean_resource_table(dev); - if (dev->independent) { - dev_dbg(&dev->dev, "assigning independent options\n"); - port = dev->independent->port; - mem = dev->independent->mem; - irq = dev->independent->irq; - dma = dev->independent->dma; - while (port) { - if (!pnp_assign_port(dev, port, nport)) - goto fail; - nport++; - port = port->next; - } - while (mem) { - if (!pnp_assign_mem(dev, mem, nmem)) - goto fail; - nmem++; - mem = mem->next; - } - while (irq) { - if (!pnp_assign_irq(dev, irq, nirq)) - goto fail; - nirq++; - irq = irq->next; - } - while (dma) { - pnp_assign_dma(dev, dma, ndma); - ndma++; - dma = dma->next; - } - } - if (depnum) { - struct pnp_option *dep; - int i; - - dev_dbg(&dev->dev, "assigning dependent option %d\n", depnum); - for (i = 1, dep = dev->dependent; i < depnum; - i++, dep = dep->next) - if (!dep) - goto fail; - port = dep->port; - mem = dep->mem; - irq = dep->irq; - dma = dep->dma; - while (port) { - if (!pnp_assign_port(dev, port, nport)) - goto fail; - nport++; - port = port->next; - } - while (mem) { - if (!pnp_assign_mem(dev, mem, nmem)) - goto fail; - nmem++; - mem = mem->next; - } - while (irq) { - if (!pnp_assign_irq(dev, irq, nirq)) - goto fail; - nirq++; - irq = irq->next; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option) && + pnp_option_set(option) != set) + continue; + + switch (option->type) { + case IORESOURCE_IO: + ret = pnp_assign_port(dev, &option->u.port, nport++); + break; + case IORESOURCE_MEM: + ret = pnp_assign_mem(dev, &option->u.mem, nmem++); + break; + case IORESOURCE_IRQ: + ret = pnp_assign_irq(dev, &option->u.irq, nirq++); + break; + case IORESOURCE_DMA: + ret = pnp_assign_dma(dev, &option->u.dma, ndma++); + break; + default: + ret = -EINVAL; + break; } - while (dma) { - pnp_assign_dma(dev, dma, ndma); - ndma++; - dma = dma->next; - } - } else if (dev->dependent) - goto fail; - - mutex_unlock(&pnp_res_mutex); - dbg_pnp_show_resources(dev, "after pnp_assign_resources"); - return 1; + if (ret < 0) + break; + } -fail: - pnp_clean_resource_table(dev); mutex_unlock(&pnp_res_mutex); - dbg_pnp_show_resources(dev, "after pnp_assign_resources (failed)"); - return 0; + if (ret < 0) { + dev_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret); + pnp_clean_resource_table(dev); + } else + dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded"); + return ret; } /** @@ -431,29 +283,25 @@ fail: */ int pnp_auto_config_dev(struct pnp_dev *dev) { - struct pnp_option *dep; - int i = 1; + int i, ret; if (!pnp_can_configure(dev)) { dev_dbg(&dev->dev, "configuration not supported\n"); return -ENODEV; } - if (!dev->dependent) { - if (pnp_assign_resources(dev, 0)) + ret = pnp_assign_resources(dev, 0); + if (ret == 0) + return 0; + + for (i = 1; i < dev->num_dependent_sets; i++) { + ret = pnp_assign_resources(dev, i); + if (ret == 0) return 0; - } else { - dep = dev->dependent; - do { - if (pnp_assign_resources(dev, i)) - return 0; - dep = dep->next; - i++; - } while (dep); } dev_err(&dev->dev, "unable to assign resources\n"); - return -EBUSY; + return ret; } /** diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 50902773beaf..c1b9ea34977b 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -117,9 +117,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) { int power_state; - power_state = acpi_pm_device_sleep_state(&dev->dev, - device_may_wakeup(&dev->dev), - NULL); + power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); if (power_state < 0) power_state = (state.event == PM_EVENT_ON) ? ACPI_STATE_D0 : ACPI_STATE_D3; diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 46c791adb894..d7e9f2152df0 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -3,6 +3,8 @@ * * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr> * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -98,8 +100,10 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, int irq, flags; int p, t; - if (!valid_IRQ(gsi)) + if (!valid_IRQ(gsi)) { + pnp_add_irq_resource(dev, gsi, IORESOURCE_DISABLED); return; + } /* * in IO-APIC mode, use overrided attribute. Two reasons: @@ -178,13 +182,68 @@ static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start, u64 end = start + len - 1; if (io_decode == ACPI_DECODE_16) - flags |= PNP_PORT_FLAG_16BITADDR; + flags |= IORESOURCE_IO_16BIT_ADDR; if (len == 0 || end >= 0x10003) flags |= IORESOURCE_DISABLED; pnp_add_io_resource(dev, start, end, flags); } +/* + * Device CSRs that do not appear in PCI config space should be described + * via ACPI. This would normally be done with Address Space Descriptors + * marked as "consumer-only," but old versions of Windows and Linux ignore + * the producer/consumer flag, so HP invented a vendor-defined resource to + * describe the location and size of CSR space. + */ +static struct acpi_vendor_uuid hp_ccsr_uuid = { + .subtype = 2, + .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a, + 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad }, +}; + +static int vendor_resource_matches(struct pnp_dev *dev, + struct acpi_resource_vendor_typed *vendor, + struct acpi_vendor_uuid *match, + int expected_len) +{ + int uuid_len = sizeof(vendor->uuid); + u8 uuid_subtype = vendor->uuid_subtype; + u8 *uuid = vendor->uuid; + int actual_len; + + /* byte_length includes uuid_subtype and uuid */ + actual_len = vendor->byte_length - uuid_len - 1; + + if (uuid_subtype == match->subtype && + uuid_len == sizeof(match->data) && + memcmp(uuid, match->data, uuid_len) == 0) { + if (expected_len && expected_len != actual_len) { + dev_err(&dev->dev, "wrong vendor descriptor size; " + "expected %d, found %d bytes\n", + expected_len, actual_len); + return 0; + } + + return 1; + } + + return 0; +} + +static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev, + struct acpi_resource_vendor_typed *vendor) +{ + if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) { + u64 start, length; + + memcpy(&start, vendor->byte_data, sizeof(start)); + memcpy(&length, vendor->byte_data + 8, sizeof(length)); + + pnp_add_mem_resource(dev, start, start + length - 1, 0); + } +} + static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev, u64 start, u64 len, int write_protect) @@ -235,6 +294,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, struct acpi_resource_dma *dma; struct acpi_resource_io *io; struct acpi_resource_fixed_io *fixed_io; + struct acpi_resource_vendor_typed *vendor_typed; struct acpi_resource_memory24 *memory24; struct acpi_resource_memory32 *memory32; struct acpi_resource_fixed_memory32 *fixed_memory32; @@ -248,24 +308,39 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, * _CRS, but some firmware violates this, so parse them all. */ irq = &res->data.irq; - for (i = 0; i < irq->interrupt_count; i++) { - pnpacpi_parse_allocated_irqresource(dev, - irq->interrupts[i], - irq->triggering, - irq->polarity, - irq->sharable); + if (irq->interrupt_count == 0) + pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); + else { + for (i = 0; i < irq->interrupt_count; i++) { + pnpacpi_parse_allocated_irqresource(dev, + irq->interrupts[i], + irq->triggering, + irq->polarity, + irq->sharable); + } + + /* + * The IRQ encoder puts a single interrupt in each + * descriptor, so if a _CRS descriptor has more than + * one interrupt, we won't be able to re-encode it. + */ + if (pnp_can_write(dev) && irq->interrupt_count > 1) { + dev_warn(&dev->dev, "multiple interrupts in " + "_CRS descriptor; configuration can't " + "be changed\n"); + dev->capabilities &= ~PNP_WRITE; + } } break; case ACPI_RESOURCE_TYPE_DMA: dma = &res->data.dma; - if (dma->channel_count > 0) { + if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) flags = dma_flags(dma->type, dma->bus_master, dma->transfer); - if (dma->channels[0] == (u8) -1) - flags |= IORESOURCE_DISABLED; - pnp_add_dma_resource(dev, dma->channels[0], flags); - } + else + flags = IORESOURCE_DISABLED; + pnp_add_dma_resource(dev, dma->channels[0], flags); break; case ACPI_RESOURCE_TYPE_IO: @@ -289,6 +364,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, break; case ACPI_RESOURCE_TYPE_VENDOR: + vendor_typed = &res->data.vendor_typed; + pnpacpi_parse_allocated_vendor(dev, vendor_typed); break; case ACPI_RESOURCE_TYPE_END_TAG: @@ -331,12 +408,29 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, if (extended_irq->producer_consumer == ACPI_PRODUCER) return AE_OK; - for (i = 0; i < extended_irq->interrupt_count; i++) { - pnpacpi_parse_allocated_irqresource(dev, - extended_irq->interrupts[i], - extended_irq->triggering, - extended_irq->polarity, - extended_irq->sharable); + if (extended_irq->interrupt_count == 0) + pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); + else { + for (i = 0; i < extended_irq->interrupt_count; i++) { + pnpacpi_parse_allocated_irqresource(dev, + extended_irq->interrupts[i], + extended_irq->triggering, + extended_irq->polarity, + extended_irq->sharable); + } + + /* + * The IRQ encoder puts a single interrupt in each + * descriptor, so if a _CRS descriptor has more than + * one interrupt, we won't be able to re-encode it. + */ + if (pnp_can_write(dev) && + extended_irq->interrupt_count > 1) { + dev_warn(&dev->dev, "multiple interrupts in " + "_CRS descriptor; configuration can't " + "be changed\n"); + dev->capabilities &= ~PNP_WRITE; + } } break; @@ -373,179 +467,147 @@ int pnpacpi_parse_allocated_resource(struct pnp_dev *dev) } static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_dma *p) { int i; - struct pnp_dma *dma; + unsigned char map = 0, flags; if (p->channel_count == 0) return; - dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL); - if (!dma) - return; for (i = 0; i < p->channel_count; i++) - dma->map |= 1 << p->channels[i]; - - dma->flags = dma_flags(p->type, p->bus_master, p->transfer); + map |= 1 << p->channels[i]; - pnp_register_dma_resource(dev, option, dma); + flags = dma_flags(p->type, p->bus_master, p->transfer); + pnp_register_dma_resource(dev, option_flags, map, flags); } static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_irq *p) { int i; - struct pnp_irq *irq; + pnp_irq_mask_t map; + unsigned char flags; if (p->interrupt_count == 0) return; - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; + bitmap_zero(map.bits, PNP_IRQ_NR); for (i = 0; i < p->interrupt_count; i++) if (p->interrupts[i]) - __set_bit(p->interrupts[i], irq->map); - irq->flags = irq_flags(p->triggering, p->polarity, p->sharable); + __set_bit(p->interrupts[i], map.bits); - pnp_register_irq_resource(dev, option, irq); + flags = irq_flags(p->triggering, p->polarity, p->sharable); + pnp_register_irq_resource(dev, option_flags, &map, flags); } static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_extended_irq *p) { int i; - struct pnp_irq *irq; + pnp_irq_mask_t map; + unsigned char flags; if (p->interrupt_count == 0) return; - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; - for (i = 0; i < p->interrupt_count; i++) - if (p->interrupts[i]) - __set_bit(p->interrupts[i], irq->map); - irq->flags = irq_flags(p->triggering, p->polarity, p->sharable); + bitmap_zero(map.bits, PNP_IRQ_NR); + for (i = 0; i < p->interrupt_count; i++) { + if (p->interrupts[i]) { + if (p->interrupts[i] < PNP_IRQ_NR) + __set_bit(p->interrupts[i], map.bits); + else + dev_err(&dev->dev, "ignoring IRQ %d option " + "(too large for %d entry bitmap)\n", + p->interrupts[i], PNP_IRQ_NR); + } + } - pnp_register_irq_resource(dev, option, irq); + flags = irq_flags(p->triggering, p->polarity, p->sharable); + pnp_register_irq_resource(dev, option_flags, &map, flags); } static __init void pnpacpi_parse_port_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_io *io) { - struct pnp_port *port; + unsigned char flags = 0; if (io->address_length == 0) return; - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = io->minimum; - port->max = io->maximum; - port->align = io->alignment; - port->size = io->address_length; - port->flags = ACPI_DECODE_16 == io->io_decode ? - PNP_PORT_FLAG_16BITADDR : 0; - pnp_register_port_resource(dev, option, port); + + if (io->io_decode == ACPI_DECODE_16) + flags = IORESOURCE_IO_16BIT_ADDR; + pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, + io->alignment, io->address_length, flags); } static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_fixed_io *io) { - struct pnp_port *port; - if (io->address_length == 0) return; - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = io->address; - port->size = io->address_length; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); + + pnp_register_port_resource(dev, option_flags, io->address, io->address, + 0, io->address_length, IORESOURCE_IO_FIXED); } static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_memory24 *p) { - struct pnp_mem *mem; + unsigned char flags = 0; if (p->address_length == 0) return; - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = p->minimum; - mem->max = p->maximum; - mem->align = p->alignment; - mem->size = p->address_length; - - mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? - IORESOURCE_MEM_WRITEABLE : 0; - pnp_register_mem_resource(dev, option, mem); + if (p->write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, + p->alignment, p->address_length, flags); } static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_memory32 *p) { - struct pnp_mem *mem; + unsigned char flags = 0; if (p->address_length == 0) return; - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = p->minimum; - mem->max = p->maximum; - mem->align = p->alignment; - mem->size = p->address_length; - - mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? - IORESOURCE_MEM_WRITEABLE : 0; - pnp_register_mem_resource(dev, option, mem); + if (p->write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, + p->alignment, p->address_length, flags); } static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource_fixed_memory32 *p) { - struct pnp_mem *mem; + unsigned char flags = 0; if (p->address_length == 0) return; - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = p->address; - mem->size = p->address_length; - mem->align = 0; - - mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ? - IORESOURCE_MEM_WRITEABLE : 0; - pnp_register_mem_resource(dev, option, mem); + if (p->write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->address, p->address, + 0, p->address_length, flags); } static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, - struct pnp_option *option, + unsigned int option_flags, struct acpi_resource *r) { struct acpi_resource_address64 addr, *p = &addr; acpi_status status; - struct pnp_mem *mem; - struct pnp_port *port; + unsigned char flags = 0; status = acpi_resource_to_address64(r, p); if (!ACPI_SUCCESS(status)) { @@ -558,49 +620,37 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, return; if (p->resource_type == ACPI_MEMORY_RANGE) { - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = p->minimum; - mem->size = p->address_length; - mem->align = 0; - mem->flags = (p->info.mem.write_protect == - ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE - : 0; - pnp_register_mem_resource(dev, option, mem); - } else if (p->resource_type == ACPI_IO_RANGE) { - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = p->minimum; - port->size = p->address_length; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); - } + if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) + flags = IORESOURCE_MEM_WRITEABLE; + pnp_register_mem_resource(dev, option_flags, p->minimum, + p->minimum, 0, p->address_length, + flags); + } else if (p->resource_type == ACPI_IO_RANGE) + pnp_register_port_resource(dev, option_flags, p->minimum, + p->minimum, 0, p->address_length, + IORESOURCE_IO_FIXED); } struct acpipnp_parse_option_s { - struct pnp_option *option; - struct pnp_option *option_independent; struct pnp_dev *dev; + unsigned int option_flags; }; static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, void *data) { - int priority = 0; + int priority; struct acpipnp_parse_option_s *parse_data = data; struct pnp_dev *dev = parse_data->dev; - struct pnp_option *option = parse_data->option; + unsigned int option_flags = parse_data->option_flags; switch (res->type) { case ACPI_RESOURCE_TYPE_IRQ: - pnpacpi_parse_irq_option(dev, option, &res->data.irq); + pnpacpi_parse_irq_option(dev, option_flags, &res->data.irq); break; case ACPI_RESOURCE_TYPE_DMA: - pnpacpi_parse_dma_option(dev, option, &res->data.dma); + pnpacpi_parse_dma_option(dev, option_flags, &res->data.dma); break; case ACPI_RESOURCE_TYPE_START_DEPENDENT: @@ -620,31 +670,19 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, priority = PNP_RES_PRIORITY_INVALID; break; } - /* TBD: Consider performance/robustness bits */ - option = pnp_register_dependent_option(dev, priority); - if (!option) - return AE_ERROR; - parse_data->option = option; + parse_data->option_flags = pnp_new_dependent_set(dev, priority); break; case ACPI_RESOURCE_TYPE_END_DEPENDENT: - /*only one EndDependentFn is allowed */ - if (!parse_data->option_independent) { - dev_warn(&dev->dev, "more than one EndDependentFn " - "in _PRS\n"); - return AE_ERROR; - } - parse_data->option = parse_data->option_independent; - parse_data->option_independent = NULL; - dev_dbg(&dev->dev, "end dependent options\n"); + parse_data->option_flags = 0; break; case ACPI_RESOURCE_TYPE_IO: - pnpacpi_parse_port_option(dev, option, &res->data.io); + pnpacpi_parse_port_option(dev, option_flags, &res->data.io); break; case ACPI_RESOURCE_TYPE_FIXED_IO: - pnpacpi_parse_fixed_port_option(dev, option, + pnpacpi_parse_fixed_port_option(dev, option_flags, &res->data.fixed_io); break; @@ -653,29 +691,31 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res, break; case ACPI_RESOURCE_TYPE_MEMORY24: - pnpacpi_parse_mem24_option(dev, option, &res->data.memory24); + pnpacpi_parse_mem24_option(dev, option_flags, + &res->data.memory24); break; case ACPI_RESOURCE_TYPE_MEMORY32: - pnpacpi_parse_mem32_option(dev, option, &res->data.memory32); + pnpacpi_parse_mem32_option(dev, option_flags, + &res->data.memory32); break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: - pnpacpi_parse_fixed_mem32_option(dev, option, + pnpacpi_parse_fixed_mem32_option(dev, option_flags, &res->data.fixed_memory32); break; case ACPI_RESOURCE_TYPE_ADDRESS16: case ACPI_RESOURCE_TYPE_ADDRESS32: case ACPI_RESOURCE_TYPE_ADDRESS64: - pnpacpi_parse_address_option(dev, option, res); + pnpacpi_parse_address_option(dev, option_flags, res); break; case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: - pnpacpi_parse_ext_irq_option(dev, option, + pnpacpi_parse_ext_irq_option(dev, option_flags, &res->data.extended_irq); break; @@ -699,12 +739,9 @@ int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev) dev_dbg(&dev->dev, "parse resource options\n"); - parse_data.option = pnp_register_independent_option(dev); - if (!parse_data.option) - return -ENOMEM; - - parse_data.option_independent = parse_data.option; parse_data.dev = dev; + parse_data.option_flags = 0; + status = acpi_walk_resources(handle, METHOD_NAME__PRS, pnpacpi_option_resource, &parse_data); @@ -806,6 +843,13 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev, struct acpi_resource_irq *irq = &resource->data.irq; int triggering, polarity, shareable; + if (!pnp_resource_enabled(p)) { + irq->interrupt_count = 0; + dev_dbg(&dev->dev, " encode irq (%s)\n", + p ? "disabled" : "missing"); + return; + } + decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); irq->triggering = triggering; irq->polarity = polarity; @@ -828,6 +872,13 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev, struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq; int triggering, polarity, shareable; + if (!pnp_resource_enabled(p)) { + extended_irq->interrupt_count = 0; + dev_dbg(&dev->dev, " encode extended irq (%s)\n", + p ? "disabled" : "missing"); + return; + } + decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); extended_irq->producer_consumer = ACPI_CONSUMER; extended_irq->triggering = triggering; @@ -848,6 +899,13 @@ static void pnpacpi_encode_dma(struct pnp_dev *dev, { struct acpi_resource_dma *dma = &resource->data.dma; + if (!pnp_resource_enabled(p)) { + dma->channel_count = 0; + dev_dbg(&dev->dev, " encode dma (%s)\n", + p ? "disabled" : "missing"); + return; + } + /* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */ switch (p->flags & IORESOURCE_DMA_SPEED_MASK) { case IORESOURCE_DMA_TYPEA: @@ -889,17 +947,21 @@ static void pnpacpi_encode_io(struct pnp_dev *dev, { struct acpi_resource_io *io = &resource->data.io; - /* Note: pnp_assign_port will copy pnp_port->flags into p->flags */ - io->io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ? - ACPI_DECODE_16 : ACPI_DECODE_10; - io->minimum = p->start; - io->maximum = p->end; - io->alignment = 0; /* Correct? */ - io->address_length = p->end - p->start + 1; - - dev_dbg(&dev->dev, " encode io %#llx-%#llx decode %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, - io->io_decode); + if (pnp_resource_enabled(p)) { + /* Note: pnp_assign_port copies pnp_port->flags into p->flags */ + io->io_decode = (p->flags & IORESOURCE_IO_16BIT_ADDR) ? + ACPI_DECODE_16 : ACPI_DECODE_10; + io->minimum = p->start; + io->maximum = p->end; + io->alignment = 0; /* Correct? */ + io->address_length = p->end - p->start + 1; + } else { + io->minimum = 0; + io->address_length = 0; + } + + dev_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum, + io->minimum + io->address_length - 1, io->io_decode); } static void pnpacpi_encode_fixed_io(struct pnp_dev *dev, @@ -908,11 +970,16 @@ static void pnpacpi_encode_fixed_io(struct pnp_dev *dev, { struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io; - fixed_io->address = p->start; - fixed_io->address_length = p->end - p->start + 1; + if (pnp_resource_enabled(p)) { + fixed_io->address = p->start; + fixed_io->address_length = p->end - p->start + 1; + } else { + fixed_io->address = 0; + fixed_io->address_length = 0; + } - dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n", - (unsigned long long) p->start, (unsigned long long) p->end); + dev_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address, + fixed_io->address + fixed_io->address_length - 1); } static void pnpacpi_encode_mem24(struct pnp_dev *dev, @@ -921,17 +988,22 @@ static void pnpacpi_encode_mem24(struct pnp_dev *dev, { struct acpi_resource_memory24 *memory24 = &resource->data.memory24; - /* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */ - memory24->write_protect = - (p->flags & IORESOURCE_MEM_WRITEABLE) ? - ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; - memory24->minimum = p->start; - memory24->maximum = p->end; - memory24->alignment = 0; - memory24->address_length = p->end - p->start + 1; - - dev_dbg(&dev->dev, " encode mem24 %#llx-%#llx write_protect %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, + if (pnp_resource_enabled(p)) { + /* Note: pnp_assign_mem copies pnp_mem->flags into p->flags */ + memory24->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ? + ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; + memory24->minimum = p->start; + memory24->maximum = p->end; + memory24->alignment = 0; + memory24->address_length = p->end - p->start + 1; + } else { + memory24->minimum = 0; + memory24->address_length = 0; + } + + dev_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n", + memory24->minimum, + memory24->minimum + memory24->address_length - 1, memory24->write_protect); } @@ -941,16 +1013,21 @@ static void pnpacpi_encode_mem32(struct pnp_dev *dev, { struct acpi_resource_memory32 *memory32 = &resource->data.memory32; - memory32->write_protect = - (p->flags & IORESOURCE_MEM_WRITEABLE) ? - ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; - memory32->minimum = p->start; - memory32->maximum = p->end; - memory32->alignment = 0; - memory32->address_length = p->end - p->start + 1; + if (pnp_resource_enabled(p)) { + memory32->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ? + ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; + memory32->minimum = p->start; + memory32->maximum = p->end; + memory32->alignment = 0; + memory32->address_length = p->end - p->start + 1; + } else { + memory32->minimum = 0; + memory32->alignment = 0; + } - dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx write_protect %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, + dev_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n", + memory32->minimum, + memory32->minimum + memory32->address_length - 1, memory32->write_protect); } @@ -960,15 +1037,20 @@ static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev, { struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32; - fixed_memory32->write_protect = - (p->flags & IORESOURCE_MEM_WRITEABLE) ? - ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; - fixed_memory32->address = p->start; - fixed_memory32->address_length = p->end - p->start + 1; + if (pnp_resource_enabled(p)) { + fixed_memory32->write_protect = + p->flags & IORESOURCE_MEM_WRITEABLE ? + ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY; + fixed_memory32->address = p->start; + fixed_memory32->address_length = p->end - p->start + 1; + } else { + fixed_memory32->address = 0; + fixed_memory32->address_length = 0; + } - dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx " - "write_protect %#x\n", - (unsigned long long) p->start, (unsigned long long) p->end, + dev_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n", + fixed_memory32->address, + fixed_memory32->address + fixed_memory32->address_length - 1, fixed_memory32->write_protect); } diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c index 5ff9a4c0447e..ca567671379e 100644 --- a/drivers/pnp/pnpbios/rsparser.c +++ b/drivers/pnp/pnpbios/rsparser.c @@ -216,137 +216,116 @@ len_err: static __init void pnpbios_parse_mem_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_mem *mem; - - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = ((p[5] << 8) | p[4]) << 8; - mem->max = ((p[7] << 8) | p[6]) << 8; - mem->align = (p[9] << 8) | p[8]; - mem->size = ((p[11] << 8) | p[10]) << 8; - mem->flags = p[3]; - pnp_register_mem_resource(dev, option, mem); + resource_size_t min, max, align, len; + unsigned char flags; + + min = ((p[5] << 8) | p[4]) << 8; + max = ((p[7] << 8) | p[6]) << 8; + align = (p[9] << 8) | p[8]; + len = ((p[11] << 8) | p[10]) << 8; + flags = p[3]; + pnp_register_mem_resource(dev, option_flags, min, max, align, len, + flags); } static __init void pnpbios_parse_mem32_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_mem *mem; - - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; - mem->max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; - mem->align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12]; - mem->size = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16]; - mem->flags = p[3]; - pnp_register_mem_resource(dev, option, mem); + resource_size_t min, max, align, len; + unsigned char flags; + + min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; + max = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; + align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12]; + len = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16]; + flags = p[3]; + pnp_register_mem_resource(dev, option_flags, min, max, align, len, + flags); } static __init void pnpbios_parse_fixed_mem32_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_mem *mem; - - mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL); - if (!mem) - return; - mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; - mem->size = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; - mem->align = 0; - mem->flags = p[3]; - pnp_register_mem_resource(dev, option, mem); + resource_size_t base, len; + unsigned char flags; + + base = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; + len = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8]; + flags = p[3]; + pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags); } static __init void pnpbios_parse_irq_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_irq *irq; unsigned long bits; + pnp_irq_mask_t map; + unsigned char flags = IORESOURCE_IRQ_HIGHEDGE; - irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL); - if (!irq) - return; bits = (p[2] << 8) | p[1]; - bitmap_copy(irq->map, &bits, 16); + + bitmap_zero(map.bits, PNP_IRQ_NR); + bitmap_copy(map.bits, &bits, 16); + if (size > 2) - irq->flags = p[3]; - else - irq->flags = IORESOURCE_IRQ_HIGHEDGE; - pnp_register_irq_resource(dev, option, irq); + flags = p[3]; + + pnp_register_irq_resource(dev, option_flags, &map, flags); } static __init void pnpbios_parse_dma_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_dma *dma; - - dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL); - if (!dma) - return; - dma->map = p[1]; - dma->flags = p[2]; - pnp_register_dma_resource(dev, option, dma); + pnp_register_dma_resource(dev, option_flags, p[1], p[2]); } static __init void pnpbios_parse_port_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_port *port; - - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = (p[3] << 8) | p[2]; - port->max = (p[5] << 8) | p[4]; - port->align = p[6]; - port->size = p[7]; - port->flags = p[1] ? PNP_PORT_FLAG_16BITADDR : 0; - pnp_register_port_resource(dev, option, port); + resource_size_t min, max, align, len; + unsigned char flags; + + min = (p[3] << 8) | p[2]; + max = (p[5] << 8) | p[4]; + align = p[6]; + len = p[7]; + flags = p[1] ? IORESOURCE_IO_16BIT_ADDR : 0; + pnp_register_port_resource(dev, option_flags, min, max, align, len, + flags); } static __init void pnpbios_parse_fixed_port_option(struct pnp_dev *dev, unsigned char *p, int size, - struct pnp_option *option) + unsigned int option_flags) { - struct pnp_port *port; - - port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL); - if (!port) - return; - port->min = port->max = (p[2] << 8) | p[1]; - port->size = p[3]; - port->align = 0; - port->flags = PNP_PORT_FLAG_FIXED; - pnp_register_port_resource(dev, option, port); + resource_size_t base, len; + + base = (p[2] << 8) | p[1]; + len = p[3]; + pnp_register_port_resource(dev, option_flags, base, base, 0, len, + IORESOURCE_IO_FIXED); } static __init unsigned char * pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, - struct pnp_dev *dev) + struct pnp_dev *dev) { unsigned int len, tag; - int priority = 0; - struct pnp_option *option, *option_independent; + int priority; + unsigned int option_flags; if (!p) return NULL; dev_dbg(&dev->dev, "parse resource options\n"); - - option_independent = option = pnp_register_independent_option(dev); - if (!option) - return NULL; - + option_flags = 0; while ((char *)p < (char *)end) { /* determine the type of tag */ @@ -363,37 +342,38 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, case LARGE_TAG_MEM: if (len != 9) goto len_err; - pnpbios_parse_mem_option(dev, p, len, option); + pnpbios_parse_mem_option(dev, p, len, option_flags); break; case LARGE_TAG_MEM32: if (len != 17) goto len_err; - pnpbios_parse_mem32_option(dev, p, len, option); + pnpbios_parse_mem32_option(dev, p, len, option_flags); break; case LARGE_TAG_FIXEDMEM32: if (len != 9) goto len_err; - pnpbios_parse_fixed_mem32_option(dev, p, len, option); + pnpbios_parse_fixed_mem32_option(dev, p, len, + option_flags); break; case SMALL_TAG_IRQ: if (len < 2 || len > 3) goto len_err; - pnpbios_parse_irq_option(dev, p, len, option); + pnpbios_parse_irq_option(dev, p, len, option_flags); break; case SMALL_TAG_DMA: if (len != 2) goto len_err; - pnpbios_parse_dma_option(dev, p, len, option); + pnpbios_parse_dma_option(dev, p, len, option_flags); break; case SMALL_TAG_PORT: if (len != 7) goto len_err; - pnpbios_parse_port_option(dev, p, len, option); + pnpbios_parse_port_option(dev, p, len, option_flags); break; case SMALL_TAG_VENDOR: @@ -403,28 +383,23 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, case SMALL_TAG_FIXEDPORT: if (len != 3) goto len_err; - pnpbios_parse_fixed_port_option(dev, p, len, option); + pnpbios_parse_fixed_port_option(dev, p, len, + option_flags); break; case SMALL_TAG_STARTDEP: if (len > 1) goto len_err; - priority = 0x100 | PNP_RES_PRIORITY_ACCEPTABLE; + priority = PNP_RES_PRIORITY_ACCEPTABLE; if (len > 0) - priority = 0x100 | p[1]; - option = pnp_register_dependent_option(dev, priority); - if (!option) - return NULL; + priority = p[1]; + option_flags = pnp_new_dependent_set(dev, priority); break; case SMALL_TAG_ENDDEP: if (len != 0) goto len_err; - if (option_independent == option) - dev_warn(&dev->dev, "missing " - "SMALL_TAG_STARTDEP tag\n"); - option = option_independent; - dev_dbg(&dev->dev, "end dependent options\n"); + option_flags = 0; break; case SMALL_TAG_END: @@ -526,8 +501,16 @@ len_err: static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[4] = (base >> 8) & 0xff; p[5] = ((base >> 8) >> 8) & 0xff; @@ -536,15 +519,22 @@ static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p, p[10] = (len >> 8) & 0xff; p[11] = ((len >> 8) >> 8) & 0xff; - dev_dbg(&dev->dev, " encode mem %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1); } static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[4] = base & 0xff; p[5] = (base >> 8) & 0xff; @@ -559,15 +549,22 @@ static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p, p[18] = (len >> 16) & 0xff; p[19] = (len >> 24) & 0xff; - dev_dbg(&dev->dev, " encode mem32 %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1); } static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[4] = base & 0xff; p[5] = (base >> 8) & 0xff; @@ -578,40 +575,54 @@ static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p, p[10] = (len >> 16) & 0xff; p[11] = (len >> 24) & 0xff; - dev_dbg(&dev->dev, " encode fixed_mem32 %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base, + base + len - 1); } static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long map = 0; + unsigned long map; + + if (pnp_resource_enabled(res)) + map = 1 << res->start; + else + map = 0; - map = 1 << res->start; p[1] = map & 0xff; p[2] = (map >> 8) & 0xff; - dev_dbg(&dev->dev, " encode irq %llu\n", - (unsigned long long)res->start); + dev_dbg(&dev->dev, " encode irq mask %#lx\n", map); } static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long map = 0; + unsigned long map; + + if (pnp_resource_enabled(res)) + map = 1 << res->start; + else + map = 0; - map = 1 << res->start; p[1] = map & 0xff; - dev_dbg(&dev->dev, " encode dma %llu\n", - (unsigned long long)res->start); + dev_dbg(&dev->dev, " encode dma mask %#lx\n", map); } static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p, struct resource *res) { - unsigned long base = res->start; - unsigned long len = res->end - res->start + 1; + unsigned long base; + unsigned long len; + + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } p[2] = base & 0xff; p[3] = (base >> 8) & 0xff; @@ -619,8 +630,7 @@ static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p, p[5] = (base >> 8) & 0xff; p[7] = len & 0xff; - dev_dbg(&dev->dev, " encode io %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1); } static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p, @@ -629,12 +639,20 @@ static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p, unsigned long base = res->start; unsigned long len = res->end - res->start + 1; + if (pnp_resource_enabled(res)) { + base = res->start; + len = res->end - res->start + 1; + } else { + base = 0; + len = 0; + } + p[1] = base & 0xff; p[2] = (base >> 8) & 0xff; p[3] = len & 0xff; - dev_dbg(&dev->dev, " encode fixed_io %#llx-%#llx\n", - (unsigned long long) res->start, (unsigned long long) res->end); + dev_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base, + base + len - 1); } static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 1ff3bb585ab2..55f55ed72dc7 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -5,6 +5,8 @@ * when building up the resource structure for the first time. * * Copyright (c) 2000 Peter Denison <peterd@pnd-pc.demon.co.uk> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> * * Heavily based on PCI quirks handling which is * @@ -20,203 +22,207 @@ #include <linux/kallsyms.h> #include "base.h" +static void quirk_awe32_add_ports(struct pnp_dev *dev, + struct pnp_option *option, + unsigned int offset) +{ + struct pnp_option *new_option; + + new_option = kmalloc(sizeof(struct pnp_option), GFP_KERNEL); + if (!new_option) { + dev_err(&dev->dev, "couldn't add ioport region to option set " + "%d\n", pnp_option_set(option)); + return; + } + + *new_option = *option; + new_option->u.port.min += offset; + new_option->u.port.max += offset; + list_add(&new_option->list, &option->list); + + dev_info(&dev->dev, "added ioport region %#llx-%#llx to set %d\n", + (unsigned long long) new_option->u.port.min, + (unsigned long long) new_option->u.port.max, + pnp_option_set(option)); +} + static void quirk_awe32_resources(struct pnp_dev *dev) { - struct pnp_port *port, *port2, *port3; - struct pnp_option *res = dev->dependent; + struct pnp_option *option; + unsigned int set = ~0; /* - * Unfortunately the isapnp_add_port_resource is too tightly bound - * into the PnP discovery sequence, and cannot be used. Link in the - * two extra ports (at offset 0x400 and 0x800 from the one given) by - * hand. + * Add two extra ioport regions (at offset 0x400 and 0x800 from the + * one given) to every dependent option set. */ - for (; res; res = res->next) { - port2 = pnp_alloc(sizeof(struct pnp_port)); - if (!port2) - return; - port3 = pnp_alloc(sizeof(struct pnp_port)); - if (!port3) { - kfree(port2); - return; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option) && + pnp_option_set(option) != set) { + set = pnp_option_set(option); + quirk_awe32_add_ports(dev, option, 0x800); + quirk_awe32_add_ports(dev, option, 0x400); } - port = res->port; - memcpy(port2, port, sizeof(struct pnp_port)); - memcpy(port3, port, sizeof(struct pnp_port)); - port->next = port2; - port2->next = port3; - port2->min += 0x400; - port2->max += 0x400; - port3->min += 0x800; - port3->max += 0x800; - dev_info(&dev->dev, - "AWE32 quirk - added ioports 0x%lx and 0x%lx\n", - (unsigned long)port2->min, - (unsigned long)port3->min); } } static void quirk_cmi8330_resources(struct pnp_dev *dev) { - struct pnp_option *res = dev->dependent; - unsigned long tmp; - - for (; res; res = res->next) { - - struct pnp_irq *irq; - struct pnp_dma *dma; + struct pnp_option *option; + struct pnp_irq *irq; + struct pnp_dma *dma; - for (irq = res->irq; irq; irq = irq->next) { // Valid irqs are 5, 7, 10 - tmp = 0x04A0; - bitmap_copy(irq->map, &tmp, 16); // 0000 0100 1010 0000 - } + list_for_each_entry(option, &dev->options, list) { + if (!pnp_option_is_dependent(option)) + continue; - for (dma = res->dma; dma; dma = dma->next) // Valid 8bit dma channels are 1,3 + if (option->type == IORESOURCE_IRQ) { + irq = &option->u.irq; + bitmap_zero(irq->map.bits, PNP_IRQ_NR); + __set_bit(5, irq->map.bits); + __set_bit(7, irq->map.bits); + __set_bit(10, irq->map.bits); + dev_info(&dev->dev, "set possible IRQs in " + "option set %d to 5, 7, 10\n", + pnp_option_set(option)); + } else if (option->type == IORESOURCE_DMA) { + dma = &option->u.dma; if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) == - IORESOURCE_DMA_8BIT) - dma->map = 0x000A; + IORESOURCE_DMA_8BIT && + dma->map != 0x0A) { + dev_info(&dev->dev, "changing possible " + "DMA channel mask in option set %d " + "from %#02x to 0x0A (1, 3)\n", + pnp_option_set(option), dma->map); + dma->map = 0x0A; + } + } } - dev_info(&dev->dev, "CMI8330 quirk - forced possible IRQs to 5, 7, 10 " - "and DMA channels to 1, 3\n"); } static void quirk_sb16audio_resources(struct pnp_dev *dev) { + struct pnp_option *option; + unsigned int prev_option_flags = ~0, n = 0; struct pnp_port *port; - struct pnp_option *res = dev->dependent; - int changed = 0; /* - * The default range on the mpu port for these devices is 0x388-0x388. + * The default range on the OPL port for these devices is 0x388-0x388. * Here we increase that range so that two such cards can be * auto-configured. */ + list_for_each_entry(option, &dev->options, list) { + if (prev_option_flags != option->flags) { + prev_option_flags = option->flags; + n = 0; + } - for (; res; res = res->next) { - port = res->port; - if (!port) - continue; - port = port->next; - if (!port) - continue; - port = port->next; - if (!port) - continue; - if (port->min != port->max) - continue; - port->max += 0x70; - changed = 1; + if (pnp_option_is_dependent(option) && + option->type == IORESOURCE_IO) { + n++; + port = &option->u.port; + if (n == 3 && port->min == port->max) { + port->max += 0x70; + dev_info(&dev->dev, "increased option port " + "range from %#llx-%#llx to " + "%#llx-%#llx\n", + (unsigned long long) port->min, + (unsigned long long) port->min, + (unsigned long long) port->min, + (unsigned long long) port->max); + } + } } - if (changed) - dev_info(&dev->dev, "SB audio device quirk - increased port range\n"); } -static struct pnp_option *quirk_isapnp_mpu_options(struct pnp_dev *dev) +static struct pnp_option *pnp_clone_dependent_set(struct pnp_dev *dev, + unsigned int set) { - struct pnp_option *head = NULL; - struct pnp_option *prev = NULL; - struct pnp_option *res; - - /* - * Build a functional IRQ-less variant of each MPU option. - */ - - for (res = dev->dependent; res; res = res->next) { - struct pnp_option *curr; - struct pnp_port *port; - struct pnp_port *copy; + struct pnp_option *tail = NULL, *first_new_option = NULL; + struct pnp_option *option, *new_option; + unsigned int flags; - port = res->port; - if (!port || !res->irq) - continue; + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option)) + tail = option; + } + if (!tail) { + dev_err(&dev->dev, "no dependent option sets\n"); + return NULL; + } - copy = pnp_alloc(sizeof *copy); - if (!copy) - break; + flags = pnp_new_dependent_set(dev, PNP_RES_PRIORITY_FUNCTIONAL); + list_for_each_entry(option, &dev->options, list) { + if (pnp_option_is_dependent(option) && + pnp_option_set(option) == set) { + new_option = kmalloc(sizeof(struct pnp_option), + GFP_KERNEL); + if (!new_option) { + dev_err(&dev->dev, "couldn't clone dependent " + "set %d\n", set); + return NULL; + } - copy->min = port->min; - copy->max = port->max; - copy->align = port->align; - copy->size = port->size; - copy->flags = port->flags; + *new_option = *option; + new_option->flags = flags; + if (!first_new_option) + first_new_option = new_option; - curr = pnp_build_option(PNP_RES_PRIORITY_FUNCTIONAL); - if (!curr) { - kfree(copy); - break; + list_add(&new_option->list, &tail->list); + tail = new_option; } - curr->port = copy; - - if (prev) - prev->next = curr; - else - head = curr; - prev = curr; } - if (head) - dev_info(&dev->dev, "adding IRQ-less MPU options\n"); - return head; + return first_new_option; } -static void quirk_ad1815_mpu_resources(struct pnp_dev *dev) + +static void quirk_add_irq_optional_dependent_sets(struct pnp_dev *dev) { - struct pnp_option *res; + struct pnp_option *new_option; + unsigned int num_sets, i, set; struct pnp_irq *irq; - /* - * Distribute the independent IRQ over the dependent options - */ - - res = dev->independent; - if (!res) - return; - - irq = res->irq; - if (!irq || irq->next) - return; - - res = dev->dependent; - if (!res) - return; - - while (1) { - struct pnp_irq *copy; - - copy = pnp_alloc(sizeof *copy); - if (!copy) - break; - - memcpy(copy->map, irq->map, sizeof copy->map); - copy->flags = irq->flags; + num_sets = dev->num_dependent_sets; + for (i = 0; i < num_sets; i++) { + new_option = pnp_clone_dependent_set(dev, i); + if (!new_option) + return; - copy->next = res->irq; /* Yes, this is NULL */ - res->irq = copy; + set = pnp_option_set(new_option); + while (new_option && pnp_option_set(new_option) == set) { + if (new_option->type == IORESOURCE_IRQ) { + irq = &new_option->u.irq; + irq->flags |= IORESOURCE_IRQ_OPTIONAL; + } + dbg_pnp_show_option(dev, new_option); + new_option = list_entry(new_option->list.next, + struct pnp_option, list); + } - if (!res->next) - break; - res = res->next; + dev_info(&dev->dev, "added dependent option set %d (same as " + "set %d except IRQ optional)\n", set, i); } - kfree(irq); - - res->next = quirk_isapnp_mpu_options(dev); - - res = dev->independent; - res->irq = NULL; } -static void quirk_isapnp_mpu_resources(struct pnp_dev *dev) +static void quirk_ad1815_mpu_resources(struct pnp_dev *dev) { - struct pnp_option *res; + struct pnp_option *option; + struct pnp_irq *irq = NULL; + unsigned int independent_irqs = 0; + + list_for_each_entry(option, &dev->options, list) { + if (option->type == IORESOURCE_IRQ && + !pnp_option_is_dependent(option)) { + independent_irqs++; + irq = &option->u.irq; + } + } - res = dev->dependent; - if (!res) + if (independent_irqs != 1) return; - while (res->next) - res = res->next; - - res->next = quirk_isapnp_mpu_options(dev); + irq->flags |= IORESOURCE_IRQ_OPTIONAL; + dev_info(&dev->dev, "made independent IRQ optional\n"); } #include <linux/pci.h> @@ -248,8 +254,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) for (j = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, j)); j++) { - if (res->flags & IORESOURCE_UNSET || - (res->start == 0 && res->end == 0)) + if (res->start == 0 && res->end == 0) continue; pnp_start = res->start; @@ -312,10 +317,10 @@ static struct pnp_fixup pnp_fixups[] = { {"CTL0043", quirk_sb16audio_resources}, {"CTL0044", quirk_sb16audio_resources}, {"CTL0045", quirk_sb16audio_resources}, - /* Add IRQ-less MPU options */ + /* Add IRQ-optional MPU options */ {"ADS7151", quirk_ad1815_mpu_resources}, - {"ADS7181", quirk_isapnp_mpu_resources}, - {"AZT0002", quirk_isapnp_mpu_resources}, + {"ADS7181", quirk_add_irq_optional_dependent_sets}, + {"AZT0002", quirk_add_irq_optional_dependent_sets}, /* PnP resources that might overlap PCI BARs */ {"PNP0c01", quirk_system_pci_resources}, {"PNP0c02", quirk_system_pci_resources}, diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index 390b50096e30..4cfe3a1efdfb 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -3,6 +3,8 @@ * * based on isapnp.c resource management (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2003 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/module.h> @@ -28,201 +30,121 @@ static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some * option registration */ -struct pnp_option *pnp_build_option(int priority) +struct pnp_option *pnp_build_option(struct pnp_dev *dev, unsigned long type, + unsigned int option_flags) { - struct pnp_option *option = pnp_alloc(sizeof(struct pnp_option)); + struct pnp_option *option; + option = kzalloc(sizeof(struct pnp_option), GFP_KERNEL); if (!option) return NULL; - option->priority = priority & 0xff; - /* make sure the priority is valid */ - if (option->priority > PNP_RES_PRIORITY_FUNCTIONAL) - option->priority = PNP_RES_PRIORITY_INVALID; - - return option; -} - -struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev) -{ - struct pnp_option *option; - - option = pnp_build_option(PNP_RES_PRIORITY_PREFERRED); - - /* this should never happen but if it does we'll try to continue */ - if (dev->independent) - dev_err(&dev->dev, "independent resource already registered\n"); - dev->independent = option; + option->flags = option_flags; + option->type = type; - dev_dbg(&dev->dev, "new independent option\n"); + list_add_tail(&option->list, &dev->options); return option; } -struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, - int priority) +int pnp_register_irq_resource(struct pnp_dev *dev, unsigned int option_flags, + pnp_irq_mask_t *map, unsigned char flags) { struct pnp_option *option; + struct pnp_irq *irq; - option = pnp_build_option(priority); - - if (dev->dependent) { - struct pnp_option *parent = dev->dependent; - while (parent->next) - parent = parent->next; - parent->next = option; - } else - dev->dependent = option; - - dev_dbg(&dev->dev, "new dependent option (priority %#x)\n", priority); - return option; -} - -int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_irq *data) -{ - struct pnp_irq *ptr; -#ifdef DEBUG - char buf[PNP_IRQ_NR]; /* hex-encoded, so this is overkill but safe */ -#endif + option = pnp_build_option(dev, IORESOURCE_IRQ, option_flags); + if (!option) + return -ENOMEM; - ptr = option->irq; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->irq = data; + irq = &option->u.irq; + irq->map = *map; + irq->flags = flags; #ifdef CONFIG_PCI { int i; for (i = 0; i < 16; i++) - if (test_bit(i, data->map)) + if (test_bit(i, irq->map.bits)) pcibios_penalize_isa_irq(i, 0); } #endif -#ifdef DEBUG - bitmap_scnprintf(buf, sizeof(buf), data->map, PNP_IRQ_NR); - dev_dbg(&dev->dev, " irq bitmask %s flags %#x\n", buf, - data->flags); -#endif + dbg_pnp_show_option(dev, option); return 0; } -int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_dma *data) +int pnp_register_dma_resource(struct pnp_dev *dev, unsigned int option_flags, + unsigned char map, unsigned char flags) { - struct pnp_dma *ptr; - - ptr = option->dma; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->dma = data; - - dev_dbg(&dev->dev, " dma bitmask %#x flags %#x\n", data->map, - data->flags); - return 0; -} + struct pnp_option *option; + struct pnp_dma *dma; -int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_port *data) -{ - struct pnp_port *ptr; - - ptr = option->port; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->port = data; - - dev_dbg(&dev->dev, " io " - "min %#x max %#x align %d size %d flags %#x\n", - data->min, data->max, data->align, data->size, data->flags); - return 0; -} + option = pnp_build_option(dev, IORESOURCE_DMA, option_flags); + if (!option) + return -ENOMEM; -int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option, - struct pnp_mem *data) -{ - struct pnp_mem *ptr; - - ptr = option->mem; - while (ptr && ptr->next) - ptr = ptr->next; - if (ptr) - ptr->next = data; - else - option->mem = data; - - dev_dbg(&dev->dev, " mem " - "min %#x max %#x align %d size %d flags %#x\n", - data->min, data->max, data->align, data->size, data->flags); + dma = &option->u.dma; + dma->map = map; + dma->flags = flags; + + dbg_pnp_show_option(dev, option); return 0; } -static void pnp_free_port(struct pnp_port *port) +int pnp_register_port_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags) { - struct pnp_port *next; + struct pnp_option *option; + struct pnp_port *port; - while (port) { - next = port->next; - kfree(port); - port = next; - } -} + option = pnp_build_option(dev, IORESOURCE_IO, option_flags); + if (!option) + return -ENOMEM; -static void pnp_free_irq(struct pnp_irq *irq) -{ - struct pnp_irq *next; + port = &option->u.port; + port->min = min; + port->max = max; + port->align = align; + port->size = size; + port->flags = flags; - while (irq) { - next = irq->next; - kfree(irq); - irq = next; - } + dbg_pnp_show_option(dev, option); + return 0; } -static void pnp_free_dma(struct pnp_dma *dma) +int pnp_register_mem_resource(struct pnp_dev *dev, unsigned int option_flags, + resource_size_t min, resource_size_t max, + resource_size_t align, resource_size_t size, + unsigned char flags) { - struct pnp_dma *next; + struct pnp_option *option; + struct pnp_mem *mem; - while (dma) { - next = dma->next; - kfree(dma); - dma = next; - } -} + option = pnp_build_option(dev, IORESOURCE_MEM, option_flags); + if (!option) + return -ENOMEM; -static void pnp_free_mem(struct pnp_mem *mem) -{ - struct pnp_mem *next; + mem = &option->u.mem; + mem->min = min; + mem->max = max; + mem->align = align; + mem->size = size; + mem->flags = flags; - while (mem) { - next = mem->next; - kfree(mem); - mem = next; - } + dbg_pnp_show_option(dev, option); + return 0; } -void pnp_free_option(struct pnp_option *option) +void pnp_free_options(struct pnp_dev *dev) { - struct pnp_option *next; - - while (option) { - next = option->next; - pnp_free_port(option->port); - pnp_free_irq(option->irq); - pnp_free_dma(option->dma); - pnp_free_mem(option->mem); + struct pnp_option *option, *tmp; + + list_for_each_entry_safe(option, tmp, &dev->options, list) { + list_del(&option->list); kfree(option); - option = next; } } @@ -237,7 +159,7 @@ void pnp_free_option(struct pnp_option *option) !((*(enda) < *(startb)) || (*(endb) < *(starta))) #define cannot_compare(flags) \ -((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED)) +((flags) & IORESOURCE_DISABLED) int pnp_check_port(struct pnp_dev *dev, struct resource *res) { @@ -364,6 +286,61 @@ static irqreturn_t pnp_test_handler(int irq, void *dev_id) return IRQ_HANDLED; } +#ifdef CONFIG_PCI +static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci, + unsigned int irq) +{ + u32 class; + u8 progif; + + if (pci->irq == irq) { + dev_dbg(&pnp->dev, "device %s using irq %d\n", + pci_name(pci), irq); + return 1; + } + + /* + * See pci_setup_device() and ata_pci_sff_activate_host() for + * similar IDE legacy detection. + */ + pci_read_config_dword(pci, PCI_CLASS_REVISION, &class); + class >>= 8; /* discard revision ID */ + progif = class & 0xff; + class >>= 8; + + if (class == PCI_CLASS_STORAGE_IDE) { + /* + * Unless both channels are native-PCI mode only, + * treat the compatibility IRQs as busy. + */ + if ((progif & 0x5) != 0x5) + if (pci_get_legacy_ide_irq(pci, 0) == irq || + pci_get_legacy_ide_irq(pci, 1) == irq) { + dev_dbg(&pnp->dev, "legacy IDE device %s " + "using irq %d\n", pci_name(pci), irq); + return 1; + } + } + + return 0; +} +#endif + +static int pci_uses_irq(struct pnp_dev *pnp, unsigned int irq) +{ +#ifdef CONFIG_PCI + struct pci_dev *pci = NULL; + + for_each_pci_dev(pci) { + if (pci_dev_uses_irq(pnp, pci, irq)) { + pci_dev_put(pci); + return 1; + } + } +#endif + return 0; +} + int pnp_check_irq(struct pnp_dev *dev, struct resource *res) { int i; @@ -395,18 +372,9 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) } } -#ifdef CONFIG_PCI /* check if the resource is being used by a pci device */ - { - struct pci_dev *pci = NULL; - for_each_pci_dev(pci) { - if (pci->irq == *irq) { - pci_dev_put(pci); - return 0; - } - } - } -#endif + if (pci_uses_irq(dev, *irq)) + return 0; /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ @@ -499,81 +467,37 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res) #endif } -struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev, - unsigned int type, unsigned int num) +int pnp_resource_type(struct resource *res) { - struct pnp_resource_table *res = dev->res; - - switch (type) { - case IORESOURCE_IO: - if (num >= PNP_MAX_PORT) - return NULL; - return &res->port[num]; - case IORESOURCE_MEM: - if (num >= PNP_MAX_MEM) - return NULL; - return &res->mem[num]; - case IORESOURCE_IRQ: - if (num >= PNP_MAX_IRQ) - return NULL; - return &res->irq[num]; - case IORESOURCE_DMA: - if (num >= PNP_MAX_DMA) - return NULL; - return &res->dma[num]; - } - return NULL; + return res->flags & (IORESOURCE_IO | IORESOURCE_MEM | + IORESOURCE_IRQ | IORESOURCE_DMA); } struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned int type, unsigned int num) { struct pnp_resource *pnp_res; + struct resource *res; - pnp_res = pnp_get_pnp_resource(dev, type, num); - if (pnp_res) - return &pnp_res->res; - + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + if (pnp_resource_type(res) == type && num-- == 0) + return res; + } return NULL; } EXPORT_SYMBOL(pnp_get_resource); -static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev, int type) +static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev) { struct pnp_resource *pnp_res; - int i; - switch (type) { - case IORESOURCE_IO: - for (i = 0; i < PNP_MAX_PORT; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - case IORESOURCE_MEM: - for (i = 0; i < PNP_MAX_MEM; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - case IORESOURCE_IRQ: - for (i = 0; i < PNP_MAX_IRQ; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - case IORESOURCE_DMA: - for (i = 0; i < PNP_MAX_DMA; i++) { - pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, i); - if (pnp_res && !pnp_resource_valid(&pnp_res->res)) - return pnp_res; - } - break; - } - return NULL; + pnp_res = kzalloc(sizeof(struct pnp_resource), GFP_KERNEL); + if (!pnp_res) + return NULL; + + list_add_tail(&pnp_res->list, &dev->resources); + return pnp_res; } struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, @@ -581,15 +505,10 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_IRQ); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for IRQ %d\n", - irq); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for IRQ %d\n", irq); return NULL; } @@ -607,15 +526,10 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_DMA); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for DMA %d\n", - dma); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for DMA %d\n", dma); return NULL; } @@ -634,16 +548,12 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_IO); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for IO " - "%#llx-%#llx\n",(unsigned long long) start, - (unsigned long long) end); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for IO %#llx-%#llx\n", + (unsigned long long) start, + (unsigned long long) end); return NULL; } @@ -663,16 +573,12 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev, { struct pnp_resource *pnp_res; struct resource *res; - static unsigned char warned; - pnp_res = pnp_new_resource(dev, IORESOURCE_MEM); + pnp_res = pnp_new_resource(dev); if (!pnp_res) { - if (!warned) { - dev_err(&dev->dev, "can't add resource for MEM " - "%#llx-%#llx\n",(unsigned long long) start, - (unsigned long long) end); - warned = 1; - } + dev_err(&dev->dev, "can't add resource for MEM %#llx-%#llx\n", + (unsigned long long) start, + (unsigned long long) end); return NULL; } @@ -686,6 +592,52 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev, return pnp_res; } +/* + * Determine whether the specified resource is a possible configuration + * for this device. + */ +int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start, + resource_size_t size) +{ + struct pnp_option *option; + struct pnp_port *port; + struct pnp_mem *mem; + struct pnp_irq *irq; + struct pnp_dma *dma; + + list_for_each_entry(option, &dev->options, list) { + if (option->type != type) + continue; + + switch (option->type) { + case IORESOURCE_IO: + port = &option->u.port; + if (port->min == start && port->size == size) + return 1; + break; + case IORESOURCE_MEM: + mem = &option->u.mem; + if (mem->min == start && mem->size == size) + return 1; + break; + case IORESOURCE_IRQ: + irq = &option->u.irq; + if (start < PNP_IRQ_NR && + test_bit(start, irq->map.bits)) + return 1; + break; + case IORESOURCE_DMA: + dma = &option->u.dma; + if (dma->map & (1 << start)) + return 1; + break; + } + } + + return 0; +} +EXPORT_SYMBOL(pnp_possible_config); + /* format is: pnp_reserve_irq=irq1[,irq2] .... */ static int __init pnp_setup_reserve_irq(char *str) { diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c index 95b076c18c07..bbf78ef4ba02 100644 --- a/drivers/pnp/support.c +++ b/drivers/pnp/support.c @@ -2,6 +2,8 @@ * support.c - standard functions for the use of pnp protocol drivers * * Copyright 2003 Adam Belay <ambx1@neo.rr.com> + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/module.h> @@ -16,6 +18,10 @@ */ int pnp_is_active(struct pnp_dev *dev) { + /* + * I don't think this is very reliable because pnp_disable_dev() + * only clears out auto-assigned resources. + */ if (!pnp_port_start(dev, 0) && pnp_port_len(dev, 0) <= 1 && !pnp_mem_start(dev, 0) && pnp_mem_len(dev, 0) <= 1 && pnp_irq(dev, 0) == -1 && pnp_dma(dev, 0) == -1) @@ -52,39 +58,154 @@ void pnp_eisa_id_to_string(u32 id, char *str) str[7] = '\0'; } +char *pnp_resource_type_name(struct resource *res) +{ + switch (pnp_resource_type(res)) { + case IORESOURCE_IO: + return "io"; + case IORESOURCE_MEM: + return "mem"; + case IORESOURCE_IRQ: + return "irq"; + case IORESOURCE_DMA: + return "dma"; + } + return NULL; +} + void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc) { #ifdef DEBUG + char buf[128]; + int len = 0; + struct pnp_resource *pnp_res; struct resource *res; - int i; - dev_dbg(&dev->dev, "current resources: %s\n", desc); - - for (i = 0; i < PNP_MAX_IRQ; i++) { - res = pnp_get_resource(dev, IORESOURCE_IRQ, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " irq %lld flags %#lx\n", - (unsigned long long) res->start, res->flags); + if (list_empty(&dev->resources)) { + dev_dbg(&dev->dev, "%s: no current resources\n", desc); + return; } - for (i = 0; i < PNP_MAX_DMA; i++) { - res = pnp_get_resource(dev, IORESOURCE_DMA, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " dma %lld flags %#lx\n", - (unsigned long long) res->start, res->flags); + + dev_dbg(&dev->dev, "%s: current resources:\n", desc); + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + + len += snprintf(buf + len, sizeof(buf) - len, " %-3s ", + pnp_resource_type_name(res)); + + if (res->flags & IORESOURCE_DISABLED) { + dev_dbg(&dev->dev, "%sdisabled\n", buf); + continue; + } + + switch (pnp_resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: + len += snprintf(buf + len, sizeof(buf) - len, + "%#llx-%#llx flags %#lx", + (unsigned long long) res->start, + (unsigned long long) res->end, + res->flags); + break; + case IORESOURCE_IRQ: + case IORESOURCE_DMA: + len += snprintf(buf + len, sizeof(buf) - len, + "%lld flags %#lx", + (unsigned long long) res->start, + res->flags); + break; + } + dev_dbg(&dev->dev, "%s\n", buf); } - for (i = 0; i < PNP_MAX_PORT; i++) { - res = pnp_get_resource(dev, IORESOURCE_IO, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " io %#llx-%#llx flags %#lx\n", - (unsigned long long) res->start, - (unsigned long long) res->end, res->flags); +#endif +} + +char *pnp_option_priority_name(struct pnp_option *option) +{ + switch (pnp_option_priority(option)) { + case PNP_RES_PRIORITY_PREFERRED: + return "preferred"; + case PNP_RES_PRIORITY_ACCEPTABLE: + return "acceptable"; + case PNP_RES_PRIORITY_FUNCTIONAL: + return "functional"; } - for (i = 0; i < PNP_MAX_MEM; i++) { - res = pnp_get_resource(dev, IORESOURCE_MEM, i); - if (res && !(res->flags & IORESOURCE_UNSET)) - dev_dbg(&dev->dev, " mem %#llx-%#llx flags %#lx\n", - (unsigned long long) res->start, - (unsigned long long) res->end, res->flags); + return "invalid"; +} + +void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option) +{ +#ifdef DEBUG + char buf[128]; + int len = 0, i; + struct pnp_port *port; + struct pnp_mem *mem; + struct pnp_irq *irq; + struct pnp_dma *dma; + + if (pnp_option_is_dependent(option)) + len += snprintf(buf + len, sizeof(buf) - len, + " dependent set %d (%s) ", + pnp_option_set(option), + pnp_option_priority_name(option)); + else + len += snprintf(buf + len, sizeof(buf) - len, " independent "); + + switch (option->type) { + case IORESOURCE_IO: + port = &option->u.port; + len += snprintf(buf + len, sizeof(buf) - len, "io min %#llx " + "max %#llx align %lld size %lld flags %#x", + (unsigned long long) port->min, + (unsigned long long) port->max, + (unsigned long long) port->align, + (unsigned long long) port->size, port->flags); + break; + case IORESOURCE_MEM: + mem = &option->u.mem; + len += snprintf(buf + len, sizeof(buf) - len, "mem min %#llx " + "max %#llx align %lld size %lld flags %#x", + (unsigned long long) mem->min, + (unsigned long long) mem->max, + (unsigned long long) mem->align, + (unsigned long long) mem->size, mem->flags); + break; + case IORESOURCE_IRQ: + irq = &option->u.irq; + len += snprintf(buf + len, sizeof(buf) - len, "irq"); + if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) + len += snprintf(buf + len, sizeof(buf) - len, + " <none>"); + else { + for (i = 0; i < PNP_IRQ_NR; i++) + if (test_bit(i, irq->map.bits)) + len += snprintf(buf + len, + sizeof(buf) - len, + " %d", i); + } + len += snprintf(buf + len, sizeof(buf) - len, " flags %#x", + irq->flags); + if (irq->flags & IORESOURCE_IRQ_OPTIONAL) + len += snprintf(buf + len, sizeof(buf) - len, + " (optional)"); + break; + case IORESOURCE_DMA: + dma = &option->u.dma; + len += snprintf(buf + len, sizeof(buf) - len, "dma"); + if (!dma->map) + len += snprintf(buf + len, sizeof(buf) - len, + " <none>"); + else { + for (i = 0; i < 8; i++) + if (dma->map & (1 << i)) + len += snprintf(buf + len, + sizeof(buf) - len, + " %d", i); + } + len += snprintf(buf + len, sizeof(buf) - len, " (bitmask %#x) " + "flags %#x", dma->map, dma->flags); + break; } + dev_dbg(&dev->dev, "%s\n", buf); #endif } diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c index cf4e07b01d48..764f3a310685 100644 --- a/drivers/pnp/system.c +++ b/drivers/pnp/system.c @@ -60,7 +60,7 @@ static void reserve_resources_of_dev(struct pnp_dev *dev) int i; for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) { - if (res->flags & IORESOURCE_UNSET) + if (res->flags & IORESOURCE_DISABLED) continue; if (res->start == 0) continue; /* disabled */ @@ -81,7 +81,7 @@ static void reserve_resources_of_dev(struct pnp_dev *dev) } for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) { - if (res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED)) + if (res->flags & IORESOURCE_DISABLED) continue; reserve_range(dev, res->start, res->end, 0); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index d91df38ee4f7..85fcb4371054 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -333,7 +333,8 @@ dasd_diag_check_device(struct dasd_device *device) if (IS_ERR(block)) { DEV_MESSAGE(KERN_WARNING, device, "%s", "could not allocate dasd block structure"); - kfree(device->private); + device->private = NULL; + kfree(private); return PTR_ERR(block); } device->block = block; @@ -348,7 +349,8 @@ dasd_diag_check_device(struct dasd_device *device) if (rc) { DEV_MESSAGE(KERN_WARNING, device, "failed to retrieve device " "information (rc=%d)", rc); - return -ENOTSUPP; + rc = -EOPNOTSUPP; + goto out; } /* Figure out position of label block */ @@ -362,7 +364,8 @@ dasd_diag_check_device(struct dasd_device *device) default: DEV_MESSAGE(KERN_WARNING, device, "unsupported device class " "(class=%d)", private->rdc_data.vdev_class); - return -ENOTSUPP; + rc = -EOPNOTSUPP; + goto out; } DBF_DEV_EVENT(DBF_INFO, device, @@ -379,7 +382,8 @@ dasd_diag_check_device(struct dasd_device *device) if (label == NULL) { DEV_MESSAGE(KERN_WARNING, device, "%s", "No memory to allocate initialization request"); - return -ENOMEM; + rc = -ENOMEM; + goto out; } rc = 0; end_block = 0; @@ -403,7 +407,7 @@ dasd_diag_check_device(struct dasd_device *device) DEV_MESSAGE(KERN_WARNING, device, "%s", "DIAG call failed"); rc = -EOPNOTSUPP; - goto out; + goto out_label; } mdsk_term_io(device); if (rc == 0) @@ -413,7 +417,7 @@ dasd_diag_check_device(struct dasd_device *device) DEV_MESSAGE(KERN_WARNING, device, "device access failed " "(rc=%d)", rc); rc = -EIO; - goto out; + goto out_label; } /* check for label block */ if (memcmp(label->label_id, DASD_DIAG_CMS1, @@ -439,8 +443,15 @@ dasd_diag_check_device(struct dasd_device *device) (unsigned long) (block->blocks << block->s2b_shift) >> 1); } -out: +out_label: free_page((long) label); +out: + if (rc) { + device->block = NULL; + dasd_free_block(block); + device->private = NULL; + kfree(private); + } return rc; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index e0b77210d37a..3590fdb5b2fd 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1418,8 +1418,10 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, /* service information message SIM */ - if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) { + if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && + ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { dasd_3990_erp_handle_sim(device, irb->ecw); + dasd_schedule_device_bh(device); return; } diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index aee4656127f7..aa0c533423a5 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -139,7 +139,8 @@ dasd_fba_check_characteristics(struct dasd_device *device) if (IS_ERR(block)) { DEV_MESSAGE(KERN_WARNING, device, "%s", "could not allocate dasd block structure"); - kfree(device->private); + device->private = NULL; + kfree(private); return PTR_ERR(block); } device->block = block; @@ -152,6 +153,10 @@ dasd_fba_check_characteristics(struct dasd_device *device) DEV_MESSAGE(KERN_WARNING, device, "Read device characteristics returned error %d", rc); + device->block = NULL; + dasd_free_block(block); + device->private = NULL; + kfree(private); return rc; } diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 0a9f1cccbe58..b0ac44b27127 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -345,7 +345,7 @@ static int get_urd_class(struct urdev *urd) cc = diag210(&ur_diag210); switch (cc) { case 0: - return -ENOTSUPP; + return -EOPNOTSUPP; case 2: return ur_diag210.vrdcvcla; /* virtual device class */ case 3: @@ -621,7 +621,7 @@ static int verify_device(struct urdev *urd) case DEV_CLASS_UR_I: return verify_uri_device(urd); default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } @@ -654,7 +654,7 @@ static int get_file_reclen(struct urdev *urd) case DEV_CLASS_UR_I: return get_uri_file_reclen(urd); default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } @@ -827,7 +827,7 @@ static int ur_probe(struct ccw_device *cdev) goto fail_remove_attr; } if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto fail_remove_attr; } spin_lock_irq(get_ccwdev_lock(cdev)); @@ -892,7 +892,7 @@ static int ur_set_online(struct ccw_device *cdev) } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); } else { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto fail_free_cdev; } diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 047dd92ae804..7fd84be11931 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -29,6 +29,7 @@ #define TO_USER 0 #define TO_KERNEL 1 +#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */ enum arch_id { ARCH_S390 = 0, @@ -51,6 +52,7 @@ static struct debug_info *zcore_dbf; static int hsa_available; static struct dentry *zcore_dir; static struct dentry *zcore_file; +static struct dentry *zcore_memmap_file; /* * Copy memory from HSA to kernel or user memory (not reentrant): @@ -476,6 +478,54 @@ static const struct file_operations zcore_fops = { .release = zcore_release, }; +static ssize_t zcore_memmap_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, filp->private_data, + MEMORY_CHUNKS * CHUNK_INFO_SIZE); +} + +static int zcore_memmap_open(struct inode *inode, struct file *filp) +{ + int i; + char *buf; + struct mem_chunk *chunk_array; + + chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), + GFP_KERNEL); + if (!chunk_array) + return -ENOMEM; + detect_memory_layout(chunk_array); + buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL); + if (!buf) { + kfree(chunk_array); + return -ENOMEM; + } + for (i = 0; i < MEMORY_CHUNKS; i++) { + sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", + (unsigned long long) chunk_array[i].addr, + (unsigned long long) chunk_array[i].size); + if (chunk_array[i].size == 0) + break; + } + kfree(chunk_array); + filp->private_data = buf; + return 0; +} + +static int zcore_memmap_release(struct inode *inode, struct file *filp) +{ + kfree(filp->private_data); + return 0; +} + +static const struct file_operations zcore_memmap_fops = { + .owner = THIS_MODULE, + .read = zcore_memmap_read, + .open = zcore_memmap_open, + .release = zcore_memmap_release, +}; + static void __init set_s390_lc_mask(union save_area *map) { @@ -554,18 +604,44 @@ static int __init check_sdias(void) return 0; } -static void __init zcore_header_init(int arch, struct zcore_header *hdr) +static int __init get_mem_size(unsigned long *mem) +{ + int i; + struct mem_chunk *chunk_array; + + chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), + GFP_KERNEL); + if (!chunk_array) + return -ENOMEM; + detect_memory_layout(chunk_array); + for (i = 0; i < MEMORY_CHUNKS; i++) { + if (chunk_array[i].size == 0) + break; + *mem += chunk_array[i].size; + } + kfree(chunk_array); + return 0; +} + +static int __init zcore_header_init(int arch, struct zcore_header *hdr) { + int rc; + unsigned long memory = 0; + if (arch == ARCH_S390X) hdr->arch_id = DUMP_ARCH_S390X; else hdr->arch_id = DUMP_ARCH_S390; - hdr->mem_size = sys_info.mem_size; - hdr->rmem_size = sys_info.mem_size; + rc = get_mem_size(&memory); + if (rc) + return rc; + hdr->mem_size = memory; + hdr->rmem_size = memory; hdr->mem_end = sys_info.mem_size; - hdr->num_pages = sys_info.mem_size / PAGE_SIZE; + hdr->num_pages = memory / PAGE_SIZE; hdr->tod = get_clock(); get_cpu_id(&hdr->cpu_id); + return 0; } static int __init zcore_init(void) @@ -608,7 +684,9 @@ static int __init zcore_init(void) if (rc) goto fail; - zcore_header_init(arch, &zcore_header); + rc = zcore_header_init(arch, &zcore_header); + if (rc) + goto fail; zcore_dir = debugfs_create_dir("zcore" , NULL); if (!zcore_dir) { @@ -618,13 +696,22 @@ static int __init zcore_init(void) zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, &zcore_fops); if (!zcore_file) { - debugfs_remove(zcore_dir); rc = -ENOMEM; - goto fail; + goto fail_dir; + } + zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir, + NULL, &zcore_memmap_fops); + if (!zcore_memmap_file) { + rc = -ENOMEM; + goto fail_file; } hsa_available = 1; return 0; +fail_file: + debugfs_remove(zcore_file); +fail_dir: + debugfs_remove(zcore_dir); fail: diag308(DIAG308_REL_HSA, NULL); return rc; diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index 91e9e3f3073a..bd79bd165396 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o obj-$(CONFIG_CHSC_SCH) += chsc_sch.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o + +qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 65264a38057d..29826fdd47b8 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -27,7 +27,13 @@ static void *sei_page; -static int chsc_error_from_response(int response) +/** + * chsc_error_from_response() - convert a chsc response to an error + * @response: chsc response code + * + * Returns an appropriate Linux error code for @response. + */ +int chsc_error_from_response(int response) { switch (response) { case 0x0001: @@ -45,6 +51,7 @@ static int chsc_error_from_response(int response) return -EIO; } } +EXPORT_SYMBOL_GPL(chsc_error_from_response); struct chsc_ssd_area { struct chsc_header request; diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index fb6c4d6c45b4..ba59bceace98 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -101,4 +101,6 @@ void chsc_chp_online(struct chp_id chpid); void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); +int chsc_error_from_response(int response); + #endif diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c deleted file mode 100644 index 2bf36e14b102..000000000000 --- a/drivers/s390/cio/qdio.c +++ /dev/null @@ -1,3929 +0,0 @@ -/* - * - * linux/drivers/s390/cio/qdio.c - * - * Linux for S/390 QDIO base support, Hipersocket base support - * version 2 - * - * Copyright 2000,2002 IBM Corporation - * Author(s): Utz Bacher <utz.bacher@de.ibm.com> - * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> - * - * Restriction: only 63 iqdio subchannels would have its own indicator, - * after that, subsequent subchannels share one indicator - * - * - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/kernel.h> -#include <linux/proc_fs.h> -#include <linux/timer.h> -#include <linux/mempool.h> -#include <linux/semaphore.h> - -#include <asm/ccwdev.h> -#include <asm/io.h> -#include <asm/atomic.h> -#include <asm/timex.h> - -#include <asm/debug.h> -#include <asm/s390_rdev.h> -#include <asm/qdio.h> -#include <asm/airq.h> - -#include "cio.h" -#include "css.h" -#include "device.h" -#include "qdio.h" -#include "ioasm.h" -#include "chsc.h" - -/****************** MODULE PARAMETER VARIABLES ********************/ -MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); -MODULE_DESCRIPTION("QDIO base support version 2, " \ - "Copyright 2000 IBM Corporation"); -MODULE_LICENSE("GPL"); - -/******************** HERE WE GO ***********************************/ - -static const char version[] = "QDIO base support version 2"; - -static int qdio_performance_stats = 0; -static int proc_perf_file_registration; -static struct qdio_perf_stats perf_stats; - -static int hydra_thinints; -static int is_passthrough = 0; -static int omit_svs; - -static int indicator_used[INDICATORS_PER_CACHELINE]; -static __u32 * volatile indicators; -static __u32 volatile spare_indicator; -static atomic_t spare_indicator_usecount; -#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 -static mempool_t *qdio_mempool_scssc; -static struct kmem_cache *qdio_q_cache; - -static debug_info_t *qdio_dbf_setup; -static debug_info_t *qdio_dbf_sbal; -static debug_info_t *qdio_dbf_trace; -static debug_info_t *qdio_dbf_sense; -#ifdef CONFIG_QDIO_DEBUG -static debug_info_t *qdio_dbf_slsb_out; -static debug_info_t *qdio_dbf_slsb_in; -#endif /* CONFIG_QDIO_DEBUG */ - -/* iQDIO stuff: */ -static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change - during a while loop */ -static DEFINE_SPINLOCK(ttiq_list_lock); -static void *tiqdio_ind; -static void tiqdio_tl(unsigned long); -static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); - -/* not a macro, as one of the arguments is atomic_read */ -static inline int -qdio_min(int a,int b) -{ - if (a<b) - return a; - else - return b; -} - -/***************** SCRUBBER HELPER ROUTINES **********************/ -#ifdef CONFIG_64BIT -static inline void qdio_perf_stat_inc(atomic64_t *count) -{ - if (qdio_performance_stats) - atomic64_inc(count); -} - -static inline void qdio_perf_stat_dec(atomic64_t *count) -{ - if (qdio_performance_stats) - atomic64_dec(count); -} -#else /* CONFIG_64BIT */ -static inline void qdio_perf_stat_inc(atomic_t *count) -{ - if (qdio_performance_stats) - atomic_inc(count); -} - -static inline void qdio_perf_stat_dec(atomic_t *count) -{ - if (qdio_performance_stats) - atomic_dec(count); -} -#endif /* CONFIG_64BIT */ - -static inline __u64 -qdio_get_micros(void) -{ - return (get_clock() >> 12); /* time>>12 is microseconds */ -} - -/* - * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve - * the q in any case, so that we'll not be interrupted when we are in - * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost - * ever works (last famous words) - */ -static inline int -qdio_reserve_q(struct qdio_q *q) -{ - return atomic_add_return(1,&q->use_count) - 1; -} - -static inline void -qdio_release_q(struct qdio_q *q) -{ - atomic_dec(&q->use_count); -} - -/*check ccq */ -static int -qdio_check_ccq(struct qdio_q *q, unsigned int ccq) -{ - char dbf_text[15]; - - if (ccq == 0 || ccq == 32) - return 0; - if (ccq == 96 || ccq == 97) - return 1; - /*notify devices immediately*/ - sprintf(dbf_text,"%d", ccq); - QDIO_DBF_TEXT2(1,trace,dbf_text); - return -EIO; -} -/* EQBS: extract buffer states */ -static int -qdio_do_eqbs(struct qdio_q *q, unsigned char *state, - unsigned int *start, unsigned int *cnt) -{ - struct qdio_irq *irq; - unsigned int tmp_cnt, q_no, ccq; - int rc ; - char dbf_text[15]; - - ccq = 0; - tmp_cnt = *cnt; - irq = (struct qdio_irq*)q->irq_ptr; - q_no = q->q_no; - if(!q->is_input_q) - q_no += irq->no_input_qs; -again: - ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); - rc = qdio_check_ccq(q, ccq); - if ((ccq == 96) && (tmp_cnt != *cnt)) - rc = 0; - if (rc == 1) { - QDIO_DBF_TEXT5(1,trace,"eqAGAIN"); - goto again; - } - if (rc < 0) { - QDIO_DBF_TEXT2(1,trace,"eqberr"); - sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0, 0, 0, -1, -1, q->int_parm); - return 0; - } - return (tmp_cnt - *cnt); -} - -/* SQBS: set buffer states */ -static int -qdio_do_sqbs(struct qdio_q *q, unsigned char state, - unsigned int *start, unsigned int *cnt) -{ - struct qdio_irq *irq; - unsigned int tmp_cnt, q_no, ccq; - int rc; - char dbf_text[15]; - - ccq = 0; - tmp_cnt = *cnt; - irq = (struct qdio_irq*)q->irq_ptr; - q_no = q->q_no; - if(!q->is_input_q) - q_no += irq->no_input_qs; -again: - ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); - rc = qdio_check_ccq(q, ccq); - if (rc == 1) { - QDIO_DBF_TEXT5(1,trace,"sqAGAIN"); - goto again; - } - if (rc < 0) { - QDIO_DBF_TEXT3(1,trace,"sqberr"); - sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt); - QDIO_DBF_TEXT3(1,trace,dbf_text); - sprintf(dbf_text,"%d,%d",ccq,q_no); - QDIO_DBF_TEXT3(1,trace,dbf_text); - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0, 0, 0, -1, -1, q->int_parm); - return 0; - } - return (tmp_cnt - *cnt); -} - -static inline int -qdio_set_slsb(struct qdio_q *q, unsigned int *bufno, - unsigned char state, unsigned int *count) -{ - volatile char *slsb; - struct qdio_irq *irq; - - irq = (struct qdio_irq*)q->irq_ptr; - if (!irq->is_qebsm) { - slsb = (char *)&q->slsb.acc.val[(*bufno)]; - xchg(slsb, state); - return 1; - } - return qdio_do_sqbs(q, state, bufno, count); -} - -#ifdef CONFIG_QDIO_DEBUG -static inline void -qdio_trace_slsb(struct qdio_q *q) -{ - if (q->queue_type==QDIO_TRACE_QTYPE) { - if (q->is_input_q) - QDIO_DBF_HEX2(0,slsb_in,&q->slsb, - QDIO_MAX_BUFFERS_PER_Q); - else - QDIO_DBF_HEX2(0,slsb_out,&q->slsb, - QDIO_MAX_BUFFERS_PER_Q); - } -} -#endif - -static inline int -set_slsb(struct qdio_q *q, unsigned int *bufno, - unsigned char state, unsigned int *count) -{ - int rc; -#ifdef CONFIG_QDIO_DEBUG - qdio_trace_slsb(q); -#endif - rc = qdio_set_slsb(q, bufno, state, count); -#ifdef CONFIG_QDIO_DEBUG - qdio_trace_slsb(q); -#endif - return rc; -} -static inline int -qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, - unsigned int gpr3) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigasync"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - qdio_perf_stat_inc(&perf_stats.siga_syncs); - - cc = do_siga_sync(q->schid, gpr2, gpr3); - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static inline int -qdio_siga_sync_q(struct qdio_q *q) -{ - if (q->is_input_q) - return qdio_siga_sync(q, 0, q->mask); - return qdio_siga_sync(q, q->mask, 0); -} - -static int -__do_siga_output(struct qdio_q *q, unsigned int *busy_bit) -{ - struct qdio_irq *irq; - unsigned int fc = 0; - unsigned long schid; - - irq = (struct qdio_irq *) q->irq_ptr; - if (!irq->is_qebsm) - schid = *((u32 *)&q->schid); - else { - schid = irq->sch_token; - fc |= 0x80; - } - return do_siga_output(schid, q->mask, busy_bit, fc); -} - -/* - * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns - * an access exception - */ -static int -qdio_siga_output(struct qdio_q *q) -{ - int cc; - __u32 busy_bit; - __u64 start_time=0; - - qdio_perf_stat_inc(&perf_stats.siga_outs); - - QDIO_DBF_TEXT4(0,trace,"sigaout"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - for (;;) { - cc = __do_siga_output(q, &busy_bit); -//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); - if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { - if (!start_time) - start_time=NOW; - if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE) - break; - } else - break; - } - - if ((cc==2) && (busy_bit)) - cc |= QDIO_SIGA_ERROR_B_BIT_SET; - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static int -qdio_siga_input(struct qdio_q *q) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigain"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - qdio_perf_stat_inc(&perf_stats.siga_ins); - - cc = do_siga_input(q->schid, q->mask); - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static __u32 * -qdio_get_indicator(void) -{ - int i; - - for (i = 0; i < INDICATORS_PER_CACHELINE; i++) - if (!indicator_used[i]) { - indicator_used[i]=1; - return indicators+i; - } - atomic_inc(&spare_indicator_usecount); - return (__u32 * volatile) &spare_indicator; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static void -qdio_put_indicator(__u32 *addr) -{ - int i; - - if ( (addr) && (addr!=&spare_indicator) ) { - i=addr-indicators; - indicator_used[i]=0; - } - if (addr == &spare_indicator) - atomic_dec(&spare_indicator_usecount); -} - -static inline void -tiqdio_clear_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"clrsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,0); -} - -static inline void -tiqdio_set_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"setsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,-1); -} - -static inline void -tiqdio_sched_tl(void) -{ - tasklet_hi_schedule(&tiqdio_tasklet); -} - -static void -qdio_mark_tiq(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"mark iq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - spin_lock_irqsave(&ttiq_list_lock,flags); - if (unlikely(atomic_read(&q->is_in_shutdown))) - goto out_unlock; - - if (!q->is_input_q) - goto out_unlock; - - if ((q->list_prev) || (q->list_next)) - goto out_unlock; - - if (!tiq_list) { - tiq_list=q; - q->list_prev=q; - q->list_next=q; - } else { - q->list_next=tiq_list; - q->list_prev=tiq_list->list_prev; - tiq_list->list_prev->list_next=q; - tiq_list->list_prev=q; - } - spin_unlock_irqrestore(&ttiq_list_lock,flags); - - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return; -out_unlock: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - return; -} - -static inline void -qdio_mark_q(struct qdio_q *q) -{ - QDIO_DBF_TEXT4(0,trace,"mark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(atomic_read(&q->is_in_shutdown))) - return; - - tasklet_schedule(&q->tasklet); -} - -static int -qdio_stop_polling(struct qdio_q *q) -{ -#ifdef QDIO_USE_PROCESSING_STATE - unsigned int tmp, gsf, count = 1; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - - if (!atomic_xchg(&q->polling,0)) - return 1; - - QDIO_DBF_TEXT4(0,trace,"stoppoll"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* show the card that we are not polling anymore */ - if (!q->is_input_q) - return 1; - - tmp = gsf = GET_SAVED_FRONTIER(q); - tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) ); - set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count); - - /* - * we don't issue this SYNC_MEMORY, as we trust Rick T and - * moreover will not use the PROCESSING state under VM, so - * q->polling was 0 anyway - */ - /*SYNC_MEMORY;*/ - if (irq->is_qebsm) { - count = 1; - qdio_do_eqbs(q, &state, &gsf, &count); - } else - state = q->slsb.acc.val[gsf]; - if (state != SLSB_P_INPUT_PRIMED) - return 1; - /* - * set our summary bit again, as otherwise there is a - * small window we can miss between resetting it and - * checking for PRIMED state - */ - if (q->is_thinint_q) - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - return 0; - -#else /* QDIO_USE_PROCESSING_STATE */ - return 1; -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -/* - * see the comment in do_QDIO and before qdio_reserve_q about the - * sophisticated locking outside of unmark_q, so that we don't need to - * disable the interrupts :-) -*/ -static void -qdio_unmark_q(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"unmark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if ((!q->list_prev)||(!q->list_next)) - return; - - if ((q->is_thinint_q)&&(q->is_input_q)) { - /* iQDIO */ - spin_lock_irqsave(&ttiq_list_lock,flags); - /* in case cleanup has done this already and simultanously - * qdio_unmark_q is called from the interrupt handler, we've - * got to check this in this specific case again */ - if ((!q->list_prev)||(!q->list_next)) - goto out; - if (q->list_next==q) { - /* q was the only interesting q */ - tiq_list=NULL; - q->list_next=NULL; - q->list_prev=NULL; - } else { - q->list_next->list_prev=q->list_prev; - q->list_prev->list_next=q->list_next; - tiq_list=q->list_next; - q->list_next=NULL; - q->list_prev=NULL; - } -out: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - } -} - -static inline unsigned long -tiqdio_clear_global_summary(void) -{ - unsigned long time; - - QDIO_DBF_TEXT5(0,trace,"clrglobl"); - - time = do_clear_global_summary(); - - QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long)); - - return time; -} - - -/************************* OUTBOUND ROUTINES *******************************/ -static int -qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - unsigned char state; - unsigned int cnt, count, ftc; - - irq = (struct qdio_irq *) q->irq_ptr; - if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) - SYNC_MEMORY; - - ftc = q->first_to_check; - count = qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - if (count == 0) - return q->first_to_check; - cnt = qdio_do_eqbs(q, &state, &ftc, &count); - if (cnt == 0) - return q->first_to_check; - switch (state) { - case SLSB_P_OUTPUT_ERROR: - QDIO_DBF_TEXT3(0,trace,"outperr"); - atomic_sub(cnt , &q->number_of_buffers_used); - if (q->qdio_error) - q->error_status_flags |= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error = SLSB_P_OUTPUT_ERROR; - q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; - q->first_to_check = ftc; - break; - case SLSB_P_OUTPUT_EMPTY: - QDIO_DBF_TEXT5(0,trace,"outpempt"); - atomic_sub(cnt, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_CU_OUTPUT_PRIMED: - /* all buffers primed */ - QDIO_DBF_TEXT5(0,trace,"outpprim"); - break; - default: - break; - } - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - return q->first_to_check; -} - -static int -qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - unsigned char state; - int tmp, ftc, count, cnt; - char dbf_text[15]; - - - irq = (struct qdio_irq *) q->irq_ptr; - ftc = q->first_to_check; - count = qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - if (count == 0) - return q->first_to_check; - cnt = qdio_do_eqbs(q, &state, &ftc, &count); - if (cnt == 0) - return q->first_to_check; - switch (state) { - case SLSB_P_INPUT_ERROR : -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(1,trace,"inperr"); - sprintf(dbf_text,"%2x,%2x",ftc,count); - QDIO_DBF_TEXT3(1,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - if (q->qdio_error) - q->error_status_flags |= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error = SLSB_P_INPUT_ERROR; - q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; - atomic_sub(cnt, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_P_INPUT_PRIMED : - QDIO_DBF_TEXT3(0,trace,"inptprim"); - sprintf(dbf_text,"%2x,%2x",ftc,count); - QDIO_DBF_TEXT3(1,trace,dbf_text); - tmp = 0; - ftc = q->first_to_check; -#ifdef QDIO_USE_PROCESSING_STATE - if (cnt > 1) { - cnt -= 1; - tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); - if (!tmp) - break; - } - cnt = 1; - tmp += set_slsb(q, &ftc, - SLSB_P_INPUT_PROCESSING, &cnt); - atomic_set(&q->polling, 1); -#else - tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); -#endif - atomic_sub(tmp, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_CU_INPUT_EMPTY: - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_PROCESSING: - QDIO_DBF_TEXT5(0,trace,"inpnipro"); - break; - default: - break; - } - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - return q->first_to_check; -} - -static int -qdio_get_outbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - volatile char *slsb; - unsigned int count = 1; - int first_not_to_check, f, f_mod_no; - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"getobfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - irq = (struct qdio_irq *) q->irq_ptr; - if (irq->is_qebsm) - return qdio_qebsm_get_outbound_buffer_frontier(q); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * f points to already processed elements, so f+no_used is correct... - * ... but: we don't check 128 buffers, as otherwise - * qdio_has_outbound_q_moved would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) || - (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) - SYNC_MEMORY; - -check_next: - if (f==first_not_to_check) - goto out; - - switch(slsb[f_mod_no]) { - - /* the adapter has not fetched the output yet */ - case SLSB_CU_OUTPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"outpprim"); - break; - - /* the adapter got it */ - case SLSB_P_OUTPUT_EMPTY: - atomic_dec(&q->number_of_buffers_used); - f++; - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - QDIO_DBF_TEXT5(0,trace,"outpempt"); - goto check_next; - - case SLSB_P_OUTPUT_ERROR: - QDIO_DBF_TEXT3(0,trace,"outperr"); - sprintf(dbf_text,"%x-%x-%x",f_mod_no, - q->sbal[f_mod_no]->element[14].sbalf.value, - q->sbal[f_mod_no]->element[15].sbalf.value); - QDIO_DBF_TEXT3(1,trace,dbf_text); - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count); - - /* - * we increment the frontier, as this buffer - * was processed obviously - */ - atomic_dec(&q->number_of_buffers_used); - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_OUTPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - break; - - /* no new buffers */ - default: - QDIO_DBF_TEXT5(0,trace,"outpni"); - } -out: - return (q->first_to_check=f_mod_no); -} - -/* all buffers are processed */ -static int -qdio_is_outbound_q_done(struct qdio_q *q) -{ - int no_used; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"oqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"oqisdone"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - return (no_used==0); -} - -static int -qdio_has_outbound_q_moved(struct qdio_q *q) -{ - int i; - - i=qdio_get_outbound_buffer_frontier(q); - - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - QDIO_DBF_TEXT4(0,trace,"oqhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"oqhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -static void -qdio_kick_outbound_q(struct qdio_q *q) -{ - int result; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"kickoutq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!q->siga_out) - return; - - /* here's the story with cc=2 and busy bit set (thanks, Rick): - * VM's CP could present us cc=2 and busy bit set on SIGA-write - * during reconfiguration of their Guest LAN (only in HIPERS mode, - * QDIO mode is asynchronous -- cc=2 and busy bit there will take - * the queues down immediately; and not being under VM we have a - * problem on cc=2 and busy bit set right away). - * - * Therefore qdio_siga_output will try for a short time constantly, - * if such a condition occurs. If it doesn't change, it will - * increase the busy_siga_counter and save the timestamp, and - * schedule the queue for later processing (via mark_q, using the - * queue tasklet). __qdio_outbound_processing will check out the - * counter. If non-zero, it will call qdio_kick_outbound_q as often - * as the value of the counter. This will attempt further SIGA - * instructions. For each successful SIGA, the counter is - * decreased, for failing SIGAs the counter remains the same, after - * all. - * After some time of no movement, qdio_kick_outbound_q will - * finally fail and reflect corresponding error codes to call - * the upper layer module and have it take the queues down. - * - * Note that this is a change from the original HiperSockets design - * (saying cc=2 and busy bit means take the queues down), but in - * these days Guest LAN didn't exist... excessive cc=2 with busy bit - * conditions will still take the queues down, but the threshold is - * higher due to the Guest LAN environment. - */ - - - result=qdio_siga_output(q); - - switch (result) { - case 0: - /* went smooth this time, reset timestamp */ -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(0,trace,"cc2reslv"); - sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - q->timing.busy_start=0; - break; - case (2|QDIO_SIGA_ERROR_B_BIT_SET): - /* cc=2 and busy bit: */ - atomic_inc(&q->busy_siga_counter); - - /* if the last siga was successful, save - * timestamp here */ - if (!q->timing.busy_start) - q->timing.busy_start=NOW; - - /* if we're in time, don't touch error_status_flags - * and siga_error */ - if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { - qdio_mark_q(q); - break; - } - QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - /* else fallthrough and report error */ - default: - /* for plain cc=1, 2 or 3: */ - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|= - QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } -} - -static void -qdio_kick_outbound_handler(struct qdio_q *q) -{ - int start, end, real_end, count; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - start = q->first_element_to_kick; - /* last_move_ftc was just updated */ - real_end = GET_SAVED_FRONTIER(q); - end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)& - (QDIO_MAX_BUFFERS_PER_Q-1); - count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)& - (QDIO_MAX_BUFFERS_PER_Q-1); - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"kickouth"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (q->state==QDIO_IRQ_STATE_ACTIVE) - q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT| - q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; -} - -static void -__qdio_outbound_processing(struct qdio_q *q) -{ - int siga_attempts; - - QDIO_DBF_TEXT4(0,trace,"qoutproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched); - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } - qdio_perf_stat_inc(&perf_stats.outbound_tl_runs); - qdio_perf_stat_inc(&perf_stats.tl_runs); - - /* see comment in qdio_kick_outbound_q */ - siga_attempts=atomic_read(&q->busy_siga_counter); - while (siga_attempts) { - atomic_dec(&q->busy_siga_counter); - qdio_kick_outbound_q(q); - siga_attempts--; - } - - if (qdio_has_outbound_q_moved(q)) - qdio_kick_outbound_handler(q); - - if (q->queue_type == QDIO_ZFCP_QFMT) { - if ((!q->hydra_gives_outbound_pcis) && - (!qdio_is_outbound_q_done(q))) - qdio_mark_q(q); - } - else if (((!q->is_iqdio_q) && (!q->is_pci_out)) || - (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) { - /* - * make sure buffer switch from PRIMED to EMPTY is noticed - * and outbound_handler is called - */ - if (qdio_is_outbound_q_done(q)) { - del_timer(&q->timer); - } else { - if (!timer_pending(&q->timer)) - mod_timer(&q->timer, jiffies + - QDIO_FORCE_CHECK_TIMEOUT); - } - } - - qdio_release_q(q); -} - -static void -qdio_outbound_processing(unsigned long q) -{ - __qdio_outbound_processing((struct qdio_q *) q); -} - -/************************* INBOUND ROUTINES *******************************/ - - -static int -qdio_get_inbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - int f,f_mod_no; - volatile char *slsb; - unsigned int count = 1; - int first_not_to_check; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif /* CONFIG_QDIO_DEBUG */ -#ifdef QDIO_USE_PROCESSING_STATE - int last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"getibfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - irq = (struct qdio_irq *) q->irq_ptr; - if (irq->is_qebsm) - return qdio_qebsm_get_inbound_buffer_frontier(q); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved - * would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - /* - * we don't use this one, as a PCI or we after a thin interrupt - * will sync the queues - */ - /* SYNC_MEMORY;*/ - -check_next: - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - if (f==first_not_to_check) - goto out; - switch (slsb[f_mod_no]) { - - /* CU_EMPTY means frontier is reached */ - case SLSB_CU_INPUT_EMPTY: - QDIO_DBF_TEXT5(0,trace,"inptempt"); - break; - - /* P_PRIMED means set slsb to P_PROCESSING and move on */ - case SLSB_P_INPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"inptprim"); - -#ifdef QDIO_USE_PROCESSING_STATE - /* - * as soon as running under VM, polling the input queues will - * kill VM in terms of CP overhead - */ - if (q->siga_sync) { - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); - } else { - /* set the previous buffer to NOT_INIT. The current - * buffer will be set to PROCESSING at the end of - * this function to avoid further interrupts. */ - if (last_position>=0) - set_slsb(q, &last_position, - SLSB_P_INPUT_NOT_INIT, &count); - atomic_set(&q->polling,1); - last_position=f_mod_no; - } -#else /* QDIO_USE_PROCESSING_STATE */ - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); -#endif /* QDIO_USE_PROCESSING_STATE */ - /* - * not needed, as the inbound queue will be synced on the next - * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s - */ - /*SYNC_MEMORY;*/ - f++; - atomic_dec(&q->number_of_buffers_used); - goto check_next; - - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_PROCESSING: - QDIO_DBF_TEXT5(0,trace,"inpnipro"); - break; - - /* P_ERROR means frontier is reached, break and report error */ - case SLSB_P_INPUT_ERROR: -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"inperr%2x",f_mod_no); - QDIO_DBF_TEXT3(1,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_INPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - /* we increment the frontier, as this buffer - * was processed obviously */ - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - atomic_dec(&q->number_of_buffers_used); - -#ifdef QDIO_USE_PROCESSING_STATE - last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - break; - - /* everything else means frontier not changed (HALTED or so) */ - default: - break; - } -out: - q->first_to_check=f_mod_no; - -#ifdef QDIO_USE_PROCESSING_STATE - if (last_position>=0) - set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count); -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - - return q->first_to_check; -} - -static int -qdio_has_inbound_q_moved(struct qdio_q *q) -{ - int i; - - i=qdio_get_inbound_buffer_frontier(q); - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis)) - SAVE_TIMESTAMP(q); - - QDIO_DBF_TEXT4(0,trace,"inhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"inhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -/* means, no more buffers to be filled */ -static int -tiqdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; - unsigned int start_buf, count; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* propagate the change from 82 to 80 through VM */ - SYNC_MEMORY; - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"iqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"iniqisdo"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!no_used) - return 1; - if (irq->is_qebsm) { - count = 1; - start_buf = q->first_to_check; - qdio_do_eqbs(q, &state, &start_buf, &count); - } else - state = q->slsb.acc.val[q->first_to_check]; - if (state != SLSB_P_INPUT_PRIMED) - /* - * nothing more to do, if next buffer is not PRIMED. - * note that we did a SYNC_MEMORY before, that there - * has been a sychnronization. - * we will return 0 below, as there is nothing to do - * (stop_polling not necessary, as we have not been - * using the PROCESSING state - */ - return 0; - - /* - * ok, the next input buffer is primed. that means, that device state - * change indicator and adapter local summary are set, so we will find - * it next time. - * we will return 0 below, as there is nothing to do, except scheduling - * ourselves for the next time. - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; -} - -static int -qdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; - unsigned int start_buf, count; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* - * we need that one for synchronization with the adapter, as it - * does a kind of PCI avoidance - */ - SYNC_MEMORY; - - if (!no_used) { - QDIO_DBF_TEXT4(0,trace,"inqisdnA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } - if (irq->is_qebsm) { - count = 1; - start_buf = q->first_to_check; - qdio_do_eqbs(q, &state, &start_buf, &count); - } else - state = q->slsb.acc.val[q->first_to_check]; - if (state == SLSB_P_INPUT_PRIMED) { - /* we got something to do */ - QDIO_DBF_TEXT4(0,trace,"inqisntA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } - - /* on VM, we don't poll, so the q is always done here */ - if (q->siga_sync) - return 1; - if (q->hydra_gives_outbound_pcis) - return 1; - - /* - * at this point we know, that inbound first_to_check - * has (probably) not moved (see qdio_inbound_processing) - */ - if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisdon"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 1; - } else { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisntd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; - } -} - -static void -qdio_kick_inbound_handler(struct qdio_q *q) -{ - int count, start, end, real_end, i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - QDIO_DBF_TEXT4(0,trace,"kickinh"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - start=q->first_element_to_kick; - real_end=q->first_to_check; - end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - i=start; - count=0; - while (1) { - count++; - if (i==end) - break; - i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - } - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (likely(q->state==QDIO_IRQ_STATE_ACTIVE)) - q->handler(q->cdev, - QDIO_STATUS_INBOUND_INT|q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; - - qdio_perf_stat_inc(&perf_stats.inbound_cnt); -} - -static void -__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *oq; - int i; - - QDIO_DBF_TEXT4(0,trace,"iqinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* - * we first want to reserve the q, so that we know, that we don't - * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might - * be set - */ - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - tiqdio_sched_tl(); - return; - } - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs); - if (unlikely(atomic_read(&q->is_in_shutdown))) { - qdio_unmark_q(q); - goto out; - } - - /* - * we reset spare_ind_was_set, when the queue does not use the - * spare indicator - */ - if (spare_ind_was_set) - spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator); - - if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set) - goto out; - /* - * q->dev_st_chg_ind is the indicator, be it shared or not. - * only clear it, if indicator is non-shared - */ - if (q->dev_st_chg_ind != &spare_indicator) - tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); - - if (q->hydra_gives_outbound_pcis) { - if (!q->siga_sync_done_on_thinints) { - SYNC_MEMORY_ALL; - } else if (!q->siga_sync_done_on_outb_tis) { - SYNC_MEMORY_ALL_OUTB; - } - } else { - SYNC_MEMORY; - } - /* - * maybe we have to do work on our outbound queues... at least - * we have to check the outbound-int-capable thinint-capable - * queues - */ - if (q->hydra_gives_outbound_pcis) { - irq_ptr = (struct qdio_irq*)q->irq_ptr; - for (i=0;i<irq_ptr->no_output_qs;i++) { - oq = irq_ptr->output_qs[i]; - if (!qdio_is_outbound_q_done(oq)) { - qdio_perf_stat_dec(&perf_stats.tl_runs); - __qdio_outbound_processing(oq); - } - } - } - - if (!qdio_has_inbound_q_moved(q)) - goto out; - - qdio_kick_inbound_handler(q); - if (tiqdio_is_inbound_q_done(q)) - if (!qdio_stop_polling(q)) { - /* - * we set the flags to get into the stuff next time, - * see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - } -out: - qdio_release_q(q); -} - -static void -tiqdio_inbound_processing(unsigned long q) -{ - __tiqdio_inbound_processing((struct qdio_q *) q, - atomic_read(&spare_indicator_usecount)); -} - -static void -__qdio_inbound_processing(struct qdio_q *q) -{ - int q_laps=0; - - QDIO_DBF_TEXT4(0,trace,"qinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched); - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } - qdio_perf_stat_inc(&perf_stats.inbound_tl_runs); - qdio_perf_stat_inc(&perf_stats.tl_runs); - -again: - if (qdio_has_inbound_q_moved(q)) { - qdio_kick_inbound_handler(q); - if (!qdio_stop_polling(q)) { - q_laps++; - if (q_laps<QDIO_Q_LAPS) - goto again; - } - qdio_mark_q(q); - } else { - if (!qdio_is_inbound_q_done(q)) - /* means poll time is not yet over */ - qdio_mark_q(q); - } - - qdio_release_q(q); -} - -static void -qdio_inbound_processing(unsigned long q) -{ - __qdio_inbound_processing((struct qdio_q *) q); -} - -/************************* MAIN ROUTINES *******************************/ - -#ifdef QDIO_USE_PROCESSING_STATE -static int -tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) -{ - if (!q) { - tiqdio_sched_tl(); - return 0; - } - - /* - * under VM, we have not used the PROCESSING state, so no - * need to stop polling - */ - if (q->siga_sync) - return 2; - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - - /* - * sanity -- we'd get here without setting the - * dev st chg ind - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; - } - if (qdio_stop_polling(q)) { - qdio_release_q(q); - return 2; - } - if (q_laps<QDIO_Q_LAPS-1) { - qdio_release_q(q); - return 3; - } - /* - * we set the flags to get into the stuff - * next time, see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - qdio_release_q(q); - return 1; - -} -#endif /* QDIO_USE_PROCESSING_STATE */ - -static void -tiqdio_inbound_checks(void) -{ - struct qdio_q *q; - int spare_ind_was_set=0; -#ifdef QDIO_USE_PROCESSING_STATE - int q_laps=0; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"iqdinbck"); - QDIO_DBF_TEXT5(0,trace,"iqlocsum"); - -#ifdef QDIO_USE_PROCESSING_STATE -again: -#endif /* QDIO_USE_PROCESSING_STATE */ - - /* when the spare indicator is used and set, save that and clear it */ - if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) { - spare_ind_was_set = 1; - tiqdio_clear_summary_bit((__u32*)&spare_indicator); - } - - q=(struct qdio_q*)tiq_list; - do { - if (!q) - break; - __tiqdio_inbound_processing(q, spare_ind_was_set); - q=(struct qdio_q*)q->list_next; - } while (q!=(struct qdio_q*)tiq_list); - -#ifdef QDIO_USE_PROCESSING_STATE - q=(struct qdio_q*)tiq_list; - do { - int ret; - - ret = tiqdio_reset_processing_state(q, q_laps); - switch (ret) { - case 0: - return; - case 1: - q_laps++; - case 2: - q = (struct qdio_q*)q->list_next; - break; - default: - q_laps++; - goto again; - } - } while (q!=(struct qdio_q*)tiq_list); -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -static void -tiqdio_tl(unsigned long data) -{ - QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); - - qdio_perf_stat_inc(&perf_stats.tl_runs); - - tiqdio_inbound_checks(); -} - -/********************* GENERAL HELPER_ROUTINES ***********************/ - -static void -qdio_release_irq_memory(struct qdio_irq *irq_ptr) -{ - int i; - struct qdio_q *q; - - for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { - q = irq_ptr->input_qs[i]; - if (q) { - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); - } - q = irq_ptr->output_qs[i]; - if (q) { - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); - } - } - free_page((unsigned long) irq_ptr->qdr); - free_page((unsigned long) irq_ptr); -} - -static void -qdio_set_impl_params(struct qdio_irq *irq_ptr, - unsigned int qib_param_field_format, - /* pointer to 128 bytes or NULL, if no param field */ - unsigned char *qib_param_field, - /* pointer to no_queues*128 words of data or NULL */ - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned long *input_slib_elements, - unsigned long *output_slib_elements) -{ - int i,j; - - if (!irq_ptr) - return; - - irq_ptr->qib.pfmt=qib_param_field_format; - if (qib_param_field) - memcpy(irq_ptr->qib.parm,qib_param_field, - QDIO_MAX_BUFFERS_PER_Q); - - if (input_slib_elements) - for (i=0;i<no_input_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->input_qs[i]->slib->slibe[j].parms= - input_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } - if (output_slib_elements) - for (i=0;i<no_output_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->output_qs[i]->slib->slibe[j].parms= - output_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } -} - -static int -qdio_alloc_qs(struct qdio_irq *irq_ptr, - int no_input_qs, int no_output_qs) -{ - int i; - struct qdio_q *q; - - for (i = 0; i < no_input_qs; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); - if (!q) - return -ENOMEM; - memset(q, 0, sizeof(*q)); - - q->slib = (struct slib *) __get_free_page(GFP_KERNEL); - if (!q->slib) { - kmem_cache_free(qdio_q_cache, q); - return -ENOMEM; - } - irq_ptr->input_qs[i]=q; - } - - for (i = 0; i < no_output_qs; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); - if (!q) - return -ENOMEM; - memset(q, 0, sizeof(*q)); - - q->slib = (struct slib *) __get_free_page(GFP_KERNEL); - if (!q->slib) { - kmem_cache_free(qdio_q_cache, q); - return -ENOMEM; - } - irq_ptr->output_qs[i]=q; - } - return 0; -} - -static void -qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, - int no_input_qs, int no_output_qs, - qdio_handler_t *input_handler, - qdio_handler_t *output_handler, - unsigned long int_parm,int q_format, - unsigned long flags, - void **inbound_sbals_array, - void **outbound_sbals_array) -{ - struct qdio_q *q; - int i,j; - char dbf_text[20]; /* see qdio_initialize */ - void *ptr; - int available; - - sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - - memset(q,0,((char*)&q->slib)-((char*)q)); - sprintf(dbf_text,"in-q%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(inbound_sbals_array++); - - q->queue_type=q_format; - q->int_parm=int_parm; - q->schid = irq_ptr->schid; - q->irq_ptr = irq_ptr; - q->cdev = cdev; - q->mask=1<<(31-i); - q->q_no=i; - q->is_input_q=1; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=input_handler; - q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind; - - /* q->is_thinint_q isn't valid at this time, but - * irq_ptr->is_thinint_irq is - */ - if (irq_ptr->is_thinint_irq) - tasklet_init(&q->tasklet, tiqdio_inbound_processing, - (unsigned long) q); - else - tasklet_init(&q->tasklet, qdio_inbound_processing, - (unsigned long) q); - - /* actually this is not used for inbound queues. yet. */ - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - -/* for (j=0;j<QDIO_STATS_NUMBER;j++) - q->timing.last_transfer_times[j]=(qdio_get_micros()/ - QDIO_STATS_NUMBER)*j; - q->timing.last_transfer_index=QDIO_STATS_NUMBER-1; -*/ - - /* fill in slib */ - if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - if (!irq_ptr->is_qebsm) { - unsigned int count = 1; - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count); - } - } - - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - memset(q,0,((char*)&q->slib)-((char*)q)); - - sprintf(dbf_text,"outq%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(outbound_sbals_array++); - - q->queue_type=q_format; - if ((q->queue_type == QDIO_IQDIO_QFMT) && - (no_output_qs > 1) && - (i == no_output_qs-1)) - q->queue_type = QDIO_IQDIO_QFMT_ASYNCH; - q->int_parm=int_parm; - q->is_input_q=0; - q->is_pci_out = 0; - q->schid = irq_ptr->schid; - q->cdev = cdev; - q->irq_ptr = irq_ptr; - q->mask=1<<(31-i); - q->q_no=i; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=output_handler; - - tasklet_init(&q->tasklet, qdio_outbound_processing, - (unsigned long) q); - setup_timer(&q->timer, qdio_outbound_processing, - (unsigned long) q); - - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - - /* fill in slib */ - if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - if (!irq_ptr->is_qebsm) { - unsigned int count = 1; - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count); - } - } -} - -static void -qdio_fill_thresholds(struct qdio_irq *irq_ptr, - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned int min_input_threshold, - unsigned int max_input_threshold, - unsigned int min_output_threshold, - unsigned int max_output_threshold) -{ - int i; - struct qdio_q *q; - - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - q->timing.threshold=max_input_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_input_threshold+ - (max_input_threshold-min_input_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - q->timing.threshold=max_output_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_output_threshold+ - (max_output_threshold-min_output_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } -} - -static void tiqdio_thinint_handler(void *ind, void *drv_data) -{ - QDIO_DBF_TEXT4(0,trace,"thin_int"); - - qdio_perf_stat_inc(&perf_stats.thinints); - - /* SVS only when needed: - * issue SVS to benefit from iqdio interrupt avoidance - * (SVS clears AISOI)*/ - if (!omit_svs) - tiqdio_clear_global_summary(); - - tiqdio_inbound_checks(); -} - -static void -qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) -{ - int i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT5(0,trace,"newstate"); - sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state); - QDIO_DBF_TEXT5(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - irq_ptr->state=state; - for (i=0;i<irq_ptr->no_input_qs;i++) - irq_ptr->input_qs[i]->state=state; - for (i=0;i<irq_ptr->no_output_qs;i++) - irq_ptr->output_qs[i]->state=state; - mb(); -} - -static void -qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) -{ - char dbf_text[15]; - - if (irb->esw.esw0.erw.cons) { - sprintf(dbf_text,"sens%4x",schid.sch_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); - - QDIO_PRINT_WARN("sense data available on qdio channel.\n"); - QDIO_HEXDUMP16(WARN,"irb: ",irb); - QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw); - } - -} - -static void -qdio_handle_pci(struct qdio_irq *irq_ptr) -{ - int i; - struct qdio_q *q; - - qdio_perf_stat_inc(&perf_stats.pcis); - for (i=0;i<irq_ptr->no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) - qdio_mark_q(q); - else { - qdio_perf_stat_dec(&perf_stats.tl_runs); - __qdio_inbound_processing(q); - } - } - if (!irq_ptr->hydra_gives_outbound_pcis) - return; - for (i=0;i<irq_ptr->no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - if (qdio_is_outbound_q_done(q)) - continue; - qdio_perf_stat_dec(&perf_stats.tl_runs); - if (!irq_ptr->sync_done_on_outb_pcis) - SYNC_MEMORY; - __qdio_outbound_processing(q); - } -} - -static void qdio_establish_handle_irq(struct ccw_device*, int, int); - -static void -qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, - int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *q; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - QDIO_DBF_TEXT2(1, trace, "ick2"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on activate " \ - "queues on device %s (cs=x%x, ds=x%x).\n", - cdev->dev.bus_id, cstat, dstat); - if (irq_ptr->no_input_qs) { - q=irq_ptr->input_qs[0]; - } else if (irq_ptr->no_output_qs) { - q=irq_ptr->output_qs[0]; - } else { - QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n", - cdev->dev.bus_id); - goto omit_handler_call; - } - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0,0,0,-1,-1,q->int_parm); -omit_handler_call: - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED); - -} - -static void -qdio_call_shutdown(struct work_struct *work) -{ - struct ccw_device_private *priv; - struct ccw_device *cdev; - - priv = container_of(work, struct ccw_device_private, kick_work); - cdev = priv->cdev; - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - put_device(&cdev->dev); -} - -static void -qdio_timeout_handler(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - QDIO_DBF_TEXT2(0, trace, "qtoh"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - irq_ptr = cdev->private->qdio_data; - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1,setup,"eq:timeo"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_CLEANUP: - QDIO_PRINT_INFO("Did not get interrupt on cleanup, " - "irq=0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - /* I/O has been terminated by common I/O layer. */ - QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1, trace, "cio:term"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - if (get_device(&cdev->dev)) { - /* Can't call shutdown from interrupt context. */ - PREPARE_WORK(&cdev->private->kick_work, - qdio_call_shutdown); - queue_work(ccw_device_work, &cdev->private->kick_work); - } - break; - default: - BUG(); - } - wake_up(&cdev->private->wait_q); -} - -static void -qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) -{ - struct qdio_irq *irq_ptr; - int cstat,dstat; - char dbf_text[15]; - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0, trace, "qint"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!intparm) { - QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \ - "handler, device %s\n", cdev->dev.bus_id); - return; - } - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) { - QDIO_DBF_TEXT2(1, trace, "uint"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_PRINT_ERR("received interrupt on unused device %s!\n", - cdev->dev.bus_id); - return; - } - - if (IS_ERR(irb)) { - /* Currently running i/o is in error. */ - switch (PTR_ERR(irb)) { - case -EIO: - QDIO_PRINT_ERR("i/o error on device %s\n", - cdev->dev.bus_id); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - wake_up(&cdev->private->wait_q); - return; - case -ETIMEDOUT: - qdio_timeout_handler(cdev); - return; - default: - QDIO_PRINT_ERR("unknown error state %ld on device %s\n", - PTR_ERR(irb), cdev->dev.bus_id); - return; - } - } - - qdio_irq_check_sense(irq_ptr->schid, irb); - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - qdio_establish_handle_irq(cdev, cstat, dstat); - break; - - case QDIO_IRQ_STATE_CLEANUP: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - break; - - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - if (cstat & SCHN_STAT_PCI) { - qdio_handle_pci(irq_ptr); - break; - } - - if ((cstat&~SCHN_STAT_PCI)||dstat) { - qdio_handle_activate_check(cdev, intparm, cstat, dstat); - break; - } - default: - QDIO_PRINT_ERR("got interrupt for queues in state %d on " \ - "device %s?!\n", - irq_ptr->state, cdev->dev.bus_id); - } - wake_up(&cdev->private->wait_q); - -} - -int -qdio_synchronize(struct ccw_device *cdev, unsigned int flags, - unsigned int queue_number) -{ - int cc = 0; - struct qdio_q *q; - struct qdio_irq *irq_ptr; - void *ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]="SyncXXXX"; -#endif - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); - *((int*)(&dbf_text[0]))=flags; - *((int*)(&dbf_text[4]))=queue_number; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); -#endif /* CONFIG_QDIO_DEBUG */ - - if (flags&QDIO_FLAG_SYNC_INPUT) { - q=irq_ptr->input_qs[queue_number]; - if (!q) - return -EINVAL; - if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(q->schid, 0, q->mask); - } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { - q=irq_ptr->output_qs[queue_number]; - if (!q) - return -EINVAL; - if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(q->schid, q->mask, 0); - } else - return -EINVAL; - - ptr=&cc; - if (cc) - QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int)); - - return cc; -} - -static int -qdio_get_ssqd_information(struct subchannel_id *schid, - struct qdio_chsc_ssqd **ssqd_area) -{ - int result; - - QDIO_DBF_TEXT0(0, setup, "getssqd"); - *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!ssqd_area) { - QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n", - schid->sch_no); - return -ENOMEM; - } - - (*ssqd_area)->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0024, - }; - (*ssqd_area)->first_sch = schid->sch_no; - (*ssqd_area)->last_sch = schid->sch_no; - (*ssqd_area)->ssid = schid->ssid; - result = chsc(*ssqd_area); - - if (result) { - QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n", - result, schid->ssid, schid->sch_no); - goto out; - } - - if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n", - (*ssqd_area)->response.code, - schid->ssid, schid->sch_no); - goto out; - } - if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) || - !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) || - ((*ssqd_area)->sch != schid->sch_no)) { - QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \ - "using all SIGAs.\n", - schid->ssid, schid->sch_no); - goto out; - } - return 0; -out: - return -EINVAL; -} - -int -qdio_get_ssqd_pct(struct ccw_device *cdev) -{ - struct qdio_chsc_ssqd *ssqd_area; - struct subchannel_id schid; - char dbf_text[15]; - int rc; - int pct = 0; - - QDIO_DBF_TEXT0(0, setup, "getpct"); - schid = ccw_device_get_subchannel_id(cdev); - rc = qdio_get_ssqd_information(&schid, &ssqd_area); - if (!rc) - pct = (int)ssqd_area->pct; - if (rc != -ENOMEM) - mempool_free(ssqd_area, qdio_mempool_scssc); - sprintf(dbf_text, "pct: %d", pct); - QDIO_DBF_TEXT2(0, setup, dbf_text); - return pct; -} -EXPORT_SYMBOL(qdio_get_ssqd_pct); - -static void -qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token) -{ - struct qdio_q *q; - int i; - unsigned int count, start_buf; - char dbf_text[15]; - - /*check if QEBSM is disabled */ - if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) { - irq_ptr->is_qebsm = 0; - irq_ptr->sch_token = 0; - irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; - QDIO_DBF_TEXT0(0,setup,"noV=V"); - return; - } - irq_ptr->sch_token = token; - /*input queue*/ - for (i = 0; i < irq_ptr->no_input_qs;i++) { - q = irq_ptr->input_qs[i]; - count = QDIO_MAX_BUFFERS_PER_Q; - start_buf = 0; - set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count); - } - sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"%8lx",irq_ptr->sch_token); - QDIO_DBF_TEXT0(0,setup,dbf_text); - /*output queue*/ - for (i = 0; i < irq_ptr->no_output_qs; i++) { - q = irq_ptr->output_qs[i]; - count = QDIO_MAX_BUFFERS_PER_Q; - start_buf = 0; - set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count); - } -} - -static void -qdio_get_ssqd_siga(struct qdio_irq *irq_ptr) -{ - int rc; - struct qdio_chsc_ssqd *ssqd_area; - - QDIO_DBF_TEXT0(0,setup,"getssqd"); - irq_ptr->qdioac = 0; - rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area); - if (rc) { - QDIO_PRINT_WARN("using all SIGAs for sch x%x.n", - irq_ptr->schid.sch_no); - irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | - CHSC_FLAG_SIGA_OUTPUT_NECESSARY | - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ - irq_ptr->is_qebsm = 0; - } else - irq_ptr->qdioac = ssqd_area->qdioac1; - - qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token); - if (rc != -ENOMEM) - mempool_free(ssqd_area, qdio_mempool_scssc); -} - -static unsigned int -tiqdio_check_chsc_availability(void) -{ - char dbf_text[15]; - - /* Check for bit 41. */ - if (!css_general_characteristics.aif) { - QDIO_PRINT_WARN("Adapter interruption facility not " \ - "installed.\n"); - return -ENOENT; - } - - /* Check for bits 107 and 108. */ - if (!css_chsc_characteristics.scssc || - !css_chsc_characteristics.scsscf) { - QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \ - "not available.\n"); - return -ENOENT; - } - - /* Check for OSA/FCP thin interrupts (bit 67). */ - hydra_thinints = css_general_characteristics.aif_osa; - sprintf(dbf_text,"hydrati%1x", hydra_thinints); - QDIO_DBF_TEXT0(0,setup,dbf_text); - -#ifdef CONFIG_64BIT - /* Check for QEBSM support in general (bit 58). */ - is_passthrough = css_general_characteristics.qebsm; -#endif - sprintf(dbf_text,"cssQBS:%1x", is_passthrough); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* Check for aif time delay disablement fac (bit 56). If installed, - * omit svs even under lpar (good point by rick again) */ - omit_svs = css_general_characteristics.aif_tdd; - sprintf(dbf_text,"omitsvs%1x", omit_svs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - return 0; -} - - -static unsigned int -tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) -{ - unsigned long real_addr_local_summary_bit; - unsigned long real_addr_dev_st_chg_ind; - void *ptr; - char dbf_text[15]; - - unsigned int resp_code; - int result; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u64 summary_indicator_addr; - u64 subchannel_indicator_addr; - u32 ks:4; - u32 kc:4; - u32 reserved4:21; - u32 isc:3; - u32 word_with_d_bit; - /* set to 0x10000000 to enable - * time delay disablement facility */ - u32 reserved5; - struct subchannel_id schid; - u32 reserved6[1004]; - struct chsc_header response; - u32 reserved7; - } *scssc_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - if (reset_to_zero) { - real_addr_local_summary_bit=0; - real_addr_dev_st_chg_ind=0; - } else { - real_addr_local_summary_bit= - virt_to_phys((volatile void *)tiqdio_ind); - real_addr_dev_st_chg_ind= - virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); - } - - scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!scssc_area) { - QDIO_PRINT_WARN("No memory for setting indicators on " \ - "subchannel 0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - return -ENOMEM; - } - scssc_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x0021, - }; - scssc_area->operation_code = 0; - - scssc_area->summary_indicator_addr = real_addr_local_summary_bit; - scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind; - scssc_area->ks = QDIO_STORAGE_KEY; - scssc_area->kc = QDIO_STORAGE_KEY; - scssc_area->isc = TIQDIO_THININT_ISC; - scssc_area->schid = irq_ptr->schid; - /* enables the time delay disablement facility. Don't care - * whether it is really there (i.e. we haven't checked for - * it) */ - if (css_general_characteristics.aif_tdd) - scssc_area->word_with_d_bit = 0x10000000; - else - QDIO_PRINT_WARN("Time delay disablement facility " \ - "not available\n"); - - result = chsc(scssc_area); - if (result) { - QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \ - "cc=%i.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result); - result = -EIO; - goto out; - } - - resp_code = scssc_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting indicators " \ - "is 0x%x.\n",resp_code); - sprintf(dbf_text,"sidR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scssc_area->response; - QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN); - result = -EIO; - goto out; - } - - QDIO_DBF_TEXT2(0,setup,"setscind"); - QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit, - sizeof(unsigned long)); - QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long)); - result = 0; -out: - mempool_free(scssc_area, qdio_mempool_scssc); - return result; - -} - -static unsigned int -tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) -{ - unsigned int resp_code; - int result; - void *ptr; - char dbf_text[15]; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u32 reserved4[2]; - u32 delay_target; - u32 reserved5[1009]; - struct chsc_header response; - u32 reserved6; - } *scsscf_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!scsscf_area) { - QDIO_PRINT_WARN("No memory for setting delay target on " \ - "subchannel 0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - return -ENOMEM; - } - scsscf_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x1027, - }; - - scsscf_area->delay_target = delay_target<<16; - - result=chsc(scsscf_area); - if (result) { - QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \ - "cc=%i. Continuing.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result); - result = -EIO; - goto out; - } - - resp_code = scsscf_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting delay target " \ - "is 0x%x. Continuing.\n",resp_code); - sprintf(dbf_text,"sdtR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scsscf_area->response; - QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN); - } - QDIO_DBF_TEXT2(0,trace,"delytrgt"); - QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long)); - result = 0; /* not critical */ -out: - mempool_free(scsscf_area, qdio_mempool_scssc); - return result; -} - -int -qdio_cleanup(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - int rc; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - rc = qdio_shutdown(cdev, how); - if ((rc == 0) || (rc == -EINPROGRESS)) - rc = qdio_free(cdev); - return rc; -} - -int -qdio_shutdown(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - int i; - int result = 0; - int rc; - unsigned long flags; - int timeout; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* mark all qs as uninteresting */ - for (i=0;i<irq_ptr->no_input_qs;i++) - atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1); - - for (i=0;i<irq_ptr->no_output_qs;i++) - atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1); - - tasklet_kill(&tiqdio_tasklet); - - for (i=0;i<irq_ptr->no_input_qs;i++) { - qdio_unmark_q(irq_ptr->input_qs[i]); - tasklet_kill(&irq_ptr->input_qs[i]->tasklet); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - input_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->input_qs[i]->use_count)) - result=-EINPROGRESS; - } - - for (i=0;i<irq_ptr->no_output_qs;i++) { - tasklet_kill(&irq_ptr->output_qs[i]->tasklet); - del_timer(&irq_ptr->output_qs[i]->timer); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - output_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->output_qs[i]->use_count)) - result=-EINPROGRESS; - } - - /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev),flags); - if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) { - rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_CLEAR_TIMEOUT; - } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) { - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } else { /* default behaviour */ - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } - if (rc == -ENODEV) { - /* No need to wait for device no longer present. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) { - /* - * Whoever put another handler there, has to cope with the - * interrupt theirself. Might happen if qdio_shutdown was - * called on already shutdown queues, but this shouldn't have - * bad side effects. - */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (rc == 0) { - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); - - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || - irq_ptr->state == QDIO_IRQ_STATE_ERR, - timeout); - } else { - QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " - "device %s\n", result, cdev->dev.bus_id); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - result = rc; - goto out; - } - if (irq_ptr->is_thinint_irq) { - qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind); - tiqdio_set_subchannel_ind(irq_ptr,1); - /* reset adapter interrupt indicators */ - } - - /* exchange int handlers, if necessary */ - if ((void*)cdev->handler == (void*)qdio_handler) - cdev->handler=irq_ptr->original_int_handler; - - /* Ignore errors. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); -out: - up(&irq_ptr->setting_up_sema); - return result; -} - -int -qdio_free(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - cdev->private->qdio_data = NULL; - - up(&irq_ptr->setting_up_sema); - - qdio_release_irq_memory(irq_ptr); - module_put(THIS_MODULE); - return 0; -} - -static void -qdio_allocate_do_dbf(struct qdio_initialize *init_data) -{ - char dbf_text[20]; /* if a printf printed out more than 8 chars */ - - sprintf(dbf_text,"qfmt:%x",init_data->q_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8); - sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*)); - QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*)); - sprintf(dbf_text,"miit%4x",init_data->min_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"mait%4x",init_data->max_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"miot%4x",init_data->min_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"maot%4x",init_data->max_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"niq:%4x",init_data->no_input_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"noq:%4x",init_data->no_output_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); -} - -static void -qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) -{ - irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib); - - irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl); - - irq_ptr->qdr->qdf0[i].slsba= - (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; -} - -static void -qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, - int j, int iqfmt) -{ - irq_ptr->output_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib); - - irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl); - - irq_ptr->qdr->qdf0[i+j].slsba= - (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY; -} - - -static void -qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->input_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->input_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static void -qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_output_qs;i++) { - irq_ptr->output_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->output_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->output_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static int -qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, - int dstat) -{ - char dbf_text[15]; - struct qdio_irq *irq_ptr; - - irq_ptr = cdev->private->qdio_data; - - if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { - sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on establish " \ - "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - cstat,dstat); - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); - } - - if (!(dstat & DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:no de"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get " - "device end: dstat=%02x, cstat=%02x\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - - if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:badio"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got " - "the following devstat: dstat=%02x, " - "cstat=%02x\n", irq_ptr->schid.ssid, - irq_ptr->schid.sch_no, dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - return 0; -} - -static void -qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) - return; - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); -} - -int -qdio_initialize(struct qdio_initialize *init_data) -{ - int rc; - char dbf_text[15]; - - sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - rc = qdio_allocate(init_data); - if (rc == 0) { - rc = qdio_establish(init_data); - if (rc != 0) - qdio_free(init_data->cdev); - } - - return rc; -} - - -int -qdio_allocate(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || - (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) || - ((init_data->no_input_qs) && (!init_data->input_handler)) || - ((init_data->no_output_qs) && (!init_data->output_handler)) ) - return -EINVAL; - - if (!init_data->input_sbal_addr_array) - return -EINVAL; - - if (!init_data->output_sbal_addr_array) - return -EINVAL; - - qdio_allocate_do_dbf(init_data); - - /* create irq */ - irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - - QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); - - if (!irq_ptr) { - QDIO_PRINT_ERR("allocation of irq_ptr failed!\n"); - return -ENOMEM; - } - - init_MUTEX(&irq_ptr->setting_up_sema); - - /* QDR must be in DMA area since CCW data address is only 32 bit */ - irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA); - if (!(irq_ptr->qdr)) { - free_page((unsigned long) irq_ptr); - QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n"); - return -ENOMEM; - } - QDIO_DBF_TEXT0(0,setup,"qdr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*)); - - if (qdio_alloc_qs(irq_ptr, - init_data->no_input_qs, - init_data->no_output_qs)) { - QDIO_PRINT_ERR("queue allocation failed!\n"); - qdio_release_irq_memory(irq_ptr); - return -ENOMEM; - } - - init_data->cdev->private->qdio_data = irq_ptr; - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE); - - return 0; -} - -static int qdio_fill_irq(struct qdio_initialize *init_data) -{ - int i; - char dbf_text[15]; - struct ciw *ciw; - int is_iqdio; - struct qdio_irq *irq_ptr; - - irq_ptr = init_data->cdev->private->qdio_data; - - memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr)); - - /* wipes qib.ac, required by ar7063 */ - memset(irq_ptr->qdr,0,sizeof(struct qdr)); - - irq_ptr->int_parm=init_data->int_parm; - - irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); - irq_ptr->no_input_qs=init_data->no_input_qs; - irq_ptr->no_output_qs=init_data->no_output_qs; - - if (init_data->q_format==QDIO_IQDIO_QFMT) { - irq_ptr->is_iqdio_irq=1; - irq_ptr->is_thinint_irq=1; - } else { - irq_ptr->is_iqdio_irq=0; - irq_ptr->is_thinint_irq=hydra_thinints; - } - sprintf(dbf_text,"is_i_t%1x%1x", - irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - if (irq_ptr->is_thinint_irq) { - irq_ptr->dev_st_chg_ind = qdio_get_indicator(); - QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); - if (!irq_ptr->dev_st_chg_ind) { - QDIO_PRINT_WARN("no indicator location available " \ - "for irq 0.%x.%x\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdio_release_irq_memory(irq_ptr); - return -ENOBUFS; - } - } - - /* defaults */ - irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD; - irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT; - irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD; - irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT; - - qdio_fill_qs(irq_ptr, init_data->cdev, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_handler, - init_data->output_handler,init_data->int_parm, - init_data->q_format,init_data->flags, - init_data->input_sbal_addr_array, - init_data->output_sbal_addr_array); - - if (!try_module_get(THIS_MODULE)) { - QDIO_PRINT_CRIT("try_module_get() failed!\n"); - qdio_release_irq_memory(irq_ptr); - return -EINVAL; - } - - qdio_fill_thresholds(irq_ptr,init_data->no_input_qs, - init_data->no_output_qs, - init_data->min_input_threshold, - init_data->max_input_threshold, - init_data->min_output_threshold, - init_data->max_output_threshold); - - /* fill in qdr */ - irq_ptr->qdr->qfmt=init_data->q_format; - irq_ptr->qdr->iqdcnt=init_data->no_input_qs; - irq_ptr->qdr->oqdcnt=init_data->no_output_qs; - irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */ - irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4; - - irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib; - irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; - - /* fill in qib */ - irq_ptr->is_qebsm = is_passthrough; - if (irq_ptr->is_qebsm) - irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; - - irq_ptr->qib.qfmt=init_data->q_format; - if (init_data->no_input_qs) - irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); - if (init_data->no_output_qs) - irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib); - memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8); - - qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format, - init_data->qib_param_field, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_slib_elements, - init_data->output_slib_elements); - - /* first input descriptors, then output descriptors */ - is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0; - for (i=0;i<init_data->no_input_qs;i++) - qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio); - - for (i=0;i<init_data->no_output_qs;i++) - qdio_allocate_fill_output_desc(irq_ptr, i, - init_data->no_input_qs, - is_iqdio); - - /* qdr, qib, sls, slsbs, slibs, sbales filled. */ - - /* get qdio commands */ - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no eq"); - QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->equeue = *ciw; - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no aq"); - QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->aqueue = *ciw; - - /* Set new interrupt handler. */ - irq_ptr->original_int_handler = init_data->cdev->handler; - init_data->cdev->handler = qdio_handler; - - return 0; -} - -int -qdio_establish(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - unsigned long saveflags; - int result, result2; - struct ccw_device *cdev; - char dbf_text[20]; - - cdev=init_data->cdev; - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -EINVAL; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - - qdio_fill_irq(init_data); - - /* the thinint CHSC stuff */ - if (irq_ptr->is_thinint_irq) { - - result = tiqdio_set_subchannel_ind(irq_ptr,0); - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); - } - - sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - /* establish q */ - irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->equeue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_options_mask(cdev, 0); - result = ccw_device_start(cdev, &irq_ptr->ccw, - QDIO_DOING_ESTABLISH, 0, 0); - if (result) { - result2 = ccw_device_start(cdev, &irq_ptr->ccw, - QDIO_DOING_ESTABLISH, 0, 0); - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result, result2); - result=result2; - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || - irq_ptr->state == QDIO_IRQ_STATE_ERR, - QDIO_ESTABLISH_TIMEOUT); - - if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED) - result = 0; - else { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return -EIO; - } - - qdio_get_ssqd_siga(irq_ptr); - /* if this gets set once, we're running under VM and can omit SVSes */ - if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) - omit_svs=1; - - sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - irq_ptr->hydra_gives_outbound_pcis= - irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED; - irq_ptr->sync_done_on_outb_pcis= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS; - - qdio_initialize_set_siga_flags_input(irq_ptr); - qdio_initialize_set_siga_flags_output(irq_ptr); - - up(&irq_ptr->setting_up_sema); - - return result; - -} - -int -qdio_activate(struct ccw_device *cdev, int flags) -{ - struct qdio_irq *irq_ptr; - int i,result=0,result2; - unsigned long saveflags; - char dbf_text[20]; /* see qdio_initialize */ - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) { - result=-EBUSY; - goto out; - } - - sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(0,setup,dbf_text); - QDIO_DBF_TEXT2(0,trace,dbf_text); - - /* activate q */ - irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->aqueue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(0); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); - result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, - 0, DOIO_DENY_PREFETCH); - if (result) { - result2=ccw_device_start(cdev,&irq_ptr->ccw, - QDIO_DOING_ACTIVATE,0,0); - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result, result2); - result=result2; - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - if (result) - goto out; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - if (irq_ptr->is_thinint_irq) { - /* - * that way we know, that, if we will get interrupted - * by tiqdio_inbound_processing, qdio_unmark_q will - * not be called - */ - qdio_reserve_q(irq_ptr->input_qs[i]); - qdio_mark_tiq(irq_ptr->input_qs[i]); - qdio_release_q(irq_ptr->input_qs[i]); - } - } - - if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) { - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->is_input_q|= - QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT; - } - } - - msleep(QDIO_ACTIVATE_TIMEOUT); - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_STOPPED: - case QDIO_IRQ_STATE_ERR: - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - down(&irq_ptr->setting_up_sema); - result = -EIO; - break; - default: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); - result = 0; - } - out: - up(&irq_ptr->setting_up_sema); - - return result; -} - -/* buffers filled forwards again to make Rick happy */ -static void -qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - int tmp = 0; - - qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); - if (irq->is_qebsm) { - while (count) { - tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); - if (!tmp) - return; - } - return; - } - for (;;) { - set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); - count--; - if (!count) break; - qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); - } -} - -static void -qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - int tmp = 0; - - qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); - if (irq->is_qebsm) { - while (count) { - tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); - if (!tmp) - return; - } - return; - } - - for (;;) { - set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); - count--; - if (!count) break; - qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); - } -} - -static void -do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - - /* This is the inbound handling of queues */ - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - qdio_do_qdio_fill_input(q,qidx,count,buffers); - - if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&& - (callflags&QDIO_FLAG_UNDER_INTERRUPT)) - atomic_xchg(&q->polling,0); - - if (used_elements) - return; - if (callflags&QDIO_FLAG_DONT_SIGA) - return; - if (q->siga_in) { - int result; - - result=qdio_siga_input(q); - if (result) { - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } - } - - qdio_mark_q(q); -} - -static void -do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - unsigned int cnt, start_buf; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - - /* This is the outbound handling of queues */ - qdio_do_qdio_fill_output(q,qidx,count,buffers); - - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - if (callflags&QDIO_FLAG_DONT_SIGA) { - qdio_perf_stat_inc(&perf_stats.outbound_cnt); - return; - } - if (callflags & QDIO_FLAG_PCI_OUT) - q->is_pci_out = 1; - else - q->is_pci_out = 0; - if (q->is_iqdio_q) { - /* one siga for every sbal */ - while (count--) - qdio_kick_outbound_q(q); - - __qdio_outbound_processing(q); - } else { - /* under VM, we do a SIGA sync unconditionally */ - SYNC_MEMORY; - else { - /* - * w/o shadow queues (else branch of - * SYNC_MEMORY :-/ ), we try to - * fast-requeue buffers - */ - if (irq->is_qebsm) { - cnt = 1; - start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) & - (QDIO_MAX_BUFFERS_PER_Q-1)); - qdio_do_eqbs(q, &state, &start_buf, &cnt); - } else - state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) - &(QDIO_MAX_BUFFERS_PER_Q-1) ]; - if (state != SLSB_CU_OUTPUT_PRIMED) { - qdio_kick_outbound_q(q); - } else { - QDIO_DBF_TEXT3(0,trace, "fast-req"); - qdio_perf_stat_inc(&perf_stats.fast_reqs); - } - } - /* - * only marking the q could take too long, - * the upper layer module could do a lot of - * traffic in that time - */ - __qdio_outbound_processing(q); - } - - qdio_perf_stat_inc(&perf_stats.outbound_cnt); -} - -/* count must be 1 in iqdio */ -int -do_QDIO(struct ccw_device *cdev,unsigned int callflags, - unsigned int queue_number, unsigned int qidx, - unsigned int count,struct qdio_buffer *buffers) -{ - struct qdio_irq *irq_ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[20]; - - sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) || - (count>QDIO_MAX_BUFFERS_PER_Q) || - (queue_number>QDIO_MAX_QUEUES_PER_IRQ) ) - return -EINVAL; - - if (count==0) - return 0; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - if (callflags&QDIO_FLAG_SYNC_INPUT) - QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number], - sizeof(void*)); - else - QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number], - sizeof(void*)); - sprintf(dbf_text,"flag%04x",callflags); - QDIO_DBF_TEXT3(0,trace,dbf_text); - sprintf(dbf_text,"qi%02xct%02x",qidx,count); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE) - return -EBUSY; - - if (callflags&QDIO_FLAG_SYNC_INPUT) - do_qdio_handle_inbound(irq_ptr->input_qs[queue_number], - callflags, qidx, count, buffers); - else if (callflags&QDIO_FLAG_SYNC_OUTPUT) - do_qdio_handle_outbound(irq_ptr->output_qs[queue_number], - callflags, qidx, count, buffers); - else { - QDIO_DBF_TEXT3(1,trace,"doQD:inv"); - return -EINVAL; - } - return 0; -} - -static int -qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, - int buffer_length, int *eof, void *data) -{ - int c=0; - - /* we are always called with buffer_length=4k, so we all - deliver on the first read */ - if (offset>0) - return 0; - -#define _OUTP_IT(x...) c+=sprintf(buffer+c,x) -#ifdef CONFIG_64BIT - _OUTP_IT("Number of tasklet runs (total) : %li\n", - (long)atomic64_read(&perf_stats.tl_runs)); - _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.inbound_tl_runs), - (long)atomic64_read(&perf_stats.inbound_tl_runs_resched)); - _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.inbound_thin_tl_runs), - (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched)); - _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.outbound_tl_runs), - (long)atomic64_read(&perf_stats.outbound_tl_runs_resched)); - _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_syncs)); - _OUTP_IT("Number of SIGA in's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_ins)); - _OUTP_IT("Number of SIGA out's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_outs)); - _OUTP_IT("Number of PCIs caught : %li\n", - (long)atomic64_read(&perf_stats.pcis)); - _OUTP_IT("Number of adapter interrupts caught : %li\n", - (long)atomic64_read(&perf_stats.thinints)); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n", - (long)atomic64_read(&perf_stats.fast_reqs)); - _OUTP_IT("\n"); - _OUTP_IT("Number of inbound transfers : %li\n", - (long)atomic64_read(&perf_stats.inbound_cnt)); - _OUTP_IT("Number of do_QDIOs outbound : %li\n", - (long)atomic64_read(&perf_stats.outbound_cnt)); -#else /* CONFIG_64BIT */ - _OUTP_IT("Number of tasklet runs (total) : %i\n", - atomic_read(&perf_stats.tl_runs)); - _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.inbound_tl_runs), - atomic_read(&perf_stats.inbound_tl_runs_resched)); - _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.inbound_thin_tl_runs), - atomic_read(&perf_stats.inbound_thin_tl_runs_resched)); - _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.outbound_tl_runs), - atomic_read(&perf_stats.outbound_tl_runs_resched)); - _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %i\n", - atomic_read(&perf_stats.siga_syncs)); - _OUTP_IT("Number of SIGA in's issued : %i\n", - atomic_read(&perf_stats.siga_ins)); - _OUTP_IT("Number of SIGA out's issued : %i\n", - atomic_read(&perf_stats.siga_outs)); - _OUTP_IT("Number of PCIs caught : %i\n", - atomic_read(&perf_stats.pcis)); - _OUTP_IT("Number of adapter interrupts caught : %i\n", - atomic_read(&perf_stats.thinints)); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n", - atomic_read(&perf_stats.fast_reqs)); - _OUTP_IT("\n"); - _OUTP_IT("Number of inbound transfers : %i\n", - atomic_read(&perf_stats.inbound_cnt)); - _OUTP_IT("Number of do_QDIOs outbound : %i\n", - atomic_read(&perf_stats.outbound_cnt)); -#endif /* CONFIG_64BIT */ - _OUTP_IT("\n"); - - return c; -} - -static struct proc_dir_entry *qdio_perf_proc_file; - -static void -qdio_add_procfs_entry(void) -{ - proc_perf_file_registration=0; - qdio_perf_proc_file=create_proc_entry(QDIO_PERF, - S_IFREG|0444,NULL); - if (qdio_perf_proc_file) { - qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; - } else proc_perf_file_registration=-1; - - if (proc_perf_file_registration) - QDIO_PRINT_WARN("was not able to register perf. " \ - "proc-file (%i).\n", - proc_perf_file_registration); -} - -static void -qdio_remove_procfs_entry(void) -{ - if (!proc_perf_file_registration) /* means if it went ok earlier */ - remove_proc_entry(QDIO_PERF,NULL); -} - -/** - * attributes in sysfs - *****************************************************************************/ - -static ssize_t -qdio_performance_stats_show(struct bus_type *bus, char *buf) -{ - return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); -} - -static ssize_t -qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count) -{ - unsigned long i; - int ret; - - ret = strict_strtoul(buf, 16, &i); - if (!ret && ((i == 0) || (i == 1))) { - if (i == qdio_performance_stats) - return count; - qdio_performance_stats = i; - if (i==0) { - /* reset perf. stat. info */ -#ifdef CONFIG_64BIT - atomic64_set(&perf_stats.tl_runs, 0); - atomic64_set(&perf_stats.outbound_tl_runs, 0); - atomic64_set(&perf_stats.inbound_tl_runs, 0); - atomic64_set(&perf_stats.inbound_tl_runs_resched, 0); - atomic64_set(&perf_stats.inbound_thin_tl_runs, 0); - atomic64_set(&perf_stats.inbound_thin_tl_runs_resched, - 0); - atomic64_set(&perf_stats.siga_outs, 0); - atomic64_set(&perf_stats.siga_ins, 0); - atomic64_set(&perf_stats.siga_syncs, 0); - atomic64_set(&perf_stats.pcis, 0); - atomic64_set(&perf_stats.thinints, 0); - atomic64_set(&perf_stats.fast_reqs, 0); - atomic64_set(&perf_stats.outbound_cnt, 0); - atomic64_set(&perf_stats.inbound_cnt, 0); -#else /* CONFIG_64BIT */ - atomic_set(&perf_stats.tl_runs, 0); - atomic_set(&perf_stats.outbound_tl_runs, 0); - atomic_set(&perf_stats.inbound_tl_runs, 0); - atomic_set(&perf_stats.inbound_tl_runs_resched, 0); - atomic_set(&perf_stats.inbound_thin_tl_runs, 0); - atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0); - atomic_set(&perf_stats.siga_outs, 0); - atomic_set(&perf_stats.siga_ins, 0); - atomic_set(&perf_stats.siga_syncs, 0); - atomic_set(&perf_stats.pcis, 0); - atomic_set(&perf_stats.thinints, 0); - atomic_set(&perf_stats.fast_reqs, 0); - atomic_set(&perf_stats.outbound_cnt, 0); - atomic_set(&perf_stats.inbound_cnt, 0); -#endif /* CONFIG_64BIT */ - } - } else { - QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n"); - return -EINVAL; - } - return count; -} - -static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show, - qdio_performance_stats_store); - -static void -tiqdio_register_thinints(void) -{ - char dbf_text[20]; - - tiqdio_ind = - s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL, - TIQDIO_THININT_ISC); - if (IS_ERR(tiqdio_ind)) { - sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_PRINT_ERR("failed to register adapter handler " \ - "(rc=%li).\nAdapter interrupts might " \ - "not work. Continuing.\n", - PTR_ERR(tiqdio_ind)); - tiqdio_ind = NULL; - } -} - -static void -tiqdio_unregister_thinints(void) -{ - if (tiqdio_ind) - s390_unregister_adapter_interrupt(tiqdio_ind, - TIQDIO_THININT_ISC); -} - -static int -qdio_get_qdio_memory(void) -{ - int i; - indicator_used[0]=1; - - for (i=1;i<INDICATORS_PER_CACHELINE;i++) - indicator_used[i]=0; - indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), - GFP_KERNEL); - if (!indicators) - return -ENOMEM; - return 0; -} - -static void -qdio_release_qdio_memory(void) -{ - kfree(indicators); -} - -static void -qdio_unregister_dbf_views(void) -{ - if (qdio_dbf_setup) - debug_unregister(qdio_dbf_setup); - if (qdio_dbf_sbal) - debug_unregister(qdio_dbf_sbal); - if (qdio_dbf_sense) - debug_unregister(qdio_dbf_sense); - if (qdio_dbf_trace) - debug_unregister(qdio_dbf_trace); -#ifdef CONFIG_QDIO_DEBUG - if (qdio_dbf_slsb_out) - debug_unregister(qdio_dbf_slsb_out); - if (qdio_dbf_slsb_in) - debug_unregister(qdio_dbf_slsb_in); -#endif /* CONFIG_QDIO_DEBUG */ -} - -static int -qdio_register_dbf_views(void) -{ - qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME, - QDIO_DBF_SETUP_PAGES, - QDIO_DBF_SETUP_NR_AREAS, - QDIO_DBF_SETUP_LEN); - if (!qdio_dbf_setup) - goto oom; - debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL); - - qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME, - QDIO_DBF_SBAL_PAGES, - QDIO_DBF_SBAL_NR_AREAS, - QDIO_DBF_SBAL_LEN); - if (!qdio_dbf_sbal) - goto oom; - - debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL); - - qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME, - QDIO_DBF_SENSE_PAGES, - QDIO_DBF_SENSE_NR_AREAS, - QDIO_DBF_SENSE_LEN); - if (!qdio_dbf_sense) - goto oom; - - debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL); - - qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME, - QDIO_DBF_TRACE_PAGES, - QDIO_DBF_TRACE_NR_AREAS, - QDIO_DBF_TRACE_LEN); - if (!qdio_dbf_trace) - goto oom; - - debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL); - -#ifdef CONFIG_QDIO_DEBUG - qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME, - QDIO_DBF_SLSB_OUT_PAGES, - QDIO_DBF_SLSB_OUT_NR_AREAS, - QDIO_DBF_SLSB_OUT_LEN); - if (!qdio_dbf_slsb_out) - goto oom; - debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL); - - qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME, - QDIO_DBF_SLSB_IN_PAGES, - QDIO_DBF_SLSB_IN_NR_AREAS, - QDIO_DBF_SLSB_IN_LEN); - if (!qdio_dbf_slsb_in) - goto oom; - debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; -oom: - QDIO_PRINT_ERR("not enough memory for dbf.\n"); - qdio_unregister_dbf_views(); - return -ENOMEM; -} - -static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size) -{ - return (void *) get_zeroed_page(gfp_mask|GFP_DMA); -} - -static void qdio_mempool_free(void *element, void *size) -{ - free_page((unsigned long) element); -} - -static int __init -init_QDIO(void) -{ - int res; - void *ptr; - - printk("qdio: loading %s\n",version); - - res=qdio_get_qdio_memory(); - if (res) - return res; - - qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), - 256, 0, NULL); - if (!qdio_q_cache) { - qdio_release_qdio_memory(); - return -ENOMEM; - } - - res = qdio_register_dbf_views(); - if (res) { - kmem_cache_destroy(qdio_q_cache); - qdio_release_qdio_memory(); - return res; - } - - QDIO_DBF_TEXT0(0,setup,"initQDIO"); - res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); - - memset((void*)&perf_stats,0,sizeof(perf_stats)); - QDIO_DBF_TEXT0(0,setup,"perfstat"); - ptr=&perf_stats; - QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*)); - - qdio_add_procfs_entry(); - - qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS, - qdio_mempool_alloc, - qdio_mempool_free, NULL); - - isc_register(QDIO_AIRQ_ISC); - if (tiqdio_check_chsc_availability()) - QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); - - tiqdio_register_thinints(); - - return 0; - } - -static void __exit -cleanup_QDIO(void) -{ - tiqdio_unregister_thinints(); - isc_unregister(QDIO_AIRQ_ISC); - qdio_remove_procfs_entry(); - qdio_release_qdio_memory(); - qdio_unregister_dbf_views(); - mempool_destroy(qdio_mempool_scssc); - kmem_cache_destroy(qdio_q_cache); - bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); - printk("qdio: %s: module removed\n",version); -} - -module_init(init_QDIO); -module_exit(cleanup_QDIO); - -EXPORT_SYMBOL(qdio_allocate); -EXPORT_SYMBOL(qdio_establish); -EXPORT_SYMBOL(qdio_initialize); -EXPORT_SYMBOL(qdio_activate); -EXPORT_SYMBOL(do_QDIO); -EXPORT_SYMBOL(qdio_shutdown); -EXPORT_SYMBOL(qdio_free); -EXPORT_SYMBOL(qdio_cleanup); -EXPORT_SYMBOL(qdio_synchronize); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 7656081a24d2..c1a70985abfa 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -1,66 +1,20 @@ +/* + * linux/drivers/s390/cio/qdio.h + * + * Copyright 2000,2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ #ifndef _CIO_QDIO_H #define _CIO_QDIO_H #include <asm/page.h> -#include <asm/isc.h> #include <asm/schid.h> +#include "chsc.h" -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_VERBOSE_LEVEL 9 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_VERBOSE_LEVEL 5 -#endif /* CONFIG_QDIO_DEBUG */ -#define QDIO_USE_PROCESSING_STATE - -#define QDIO_MINIMAL_BH_RELIEF_TIME 16 -#define QDIO_TIMER_POLL_VALUE 1 -#define IQDIO_TIMER_POLL_VALUE 1 - -/* - * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as - * we never know, whether we'll get initiative again, e.g. to give the - * transmit skb's back to the stack, however the stack may be waiting for - * them... therefore we define 4 as threshold to start polling (which - * will stop as soon as the asynchronous queue catches up) - * btw, this only applies to the asynchronous HiperSockets queue - */ -#define IQDIO_FILL_LEVEL_TO_POLL 4 - -#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC -#define TIQDIO_DELAY_TARGET 0 -#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ -#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ -#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */ -#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */ -#define IQDIO_LOCAL_LAPS 4 -#define IQDIO_LOCAL_LAPS_INT 1 -#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2 -/*#define IQDIO_IQDC_INT_PARM 0x1234*/ - -#define QDIO_Q_LAPS 5 - -#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY - -#define L2_CACHELINE_SIZE 256 -#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32)) - -#define QDIO_PERF "qdio_perf" - -/* must be a power of 2 */ -/*#define QDIO_STATS_NUMBER 4 - -#define QDIO_STATS_CLASSES 2 -#define QDIO_STATS_COUNT_NEEDED 2*/ - -#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before - exiting without having use_count - of the queue to 0 */ - -#define QDIO_ESTABLISH_TIMEOUT (1*HZ) -#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) -#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) -#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) -#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */ +#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ +#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ +#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, @@ -72,565 +26,352 @@ enum qdio_irq_states { NR_QDIO_IRQ_STATES, }; -/* used as intparm in do_IO: */ -#define QDIO_DOING_SENSEID 0 -#define QDIO_DOING_ESTABLISH 1 -#define QDIO_DOING_ACTIVATE 2 -#define QDIO_DOING_CLEANUP 3 - -/************************* DEBUG FACILITY STUFF *********************/ - -#define QDIO_DBF_HEX(ex,name,level,addr,len) \ - do { \ - if (ex) \ - debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ - else \ - debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ - } while (0) -#define QDIO_DBF_TEXT(ex,name,level,text) \ - do { \ - if (ex) \ - debug_text_exception(qdio_dbf_##name,level,text); \ - else \ - debug_text_event(qdio_dbf_##name,level,text); \ - } while (0) - - -#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) -#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) -#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) -#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) -#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) -#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) -#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) -#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text) -#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text) -#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text) -#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SETUP_NAME "qdio_setup" -#define QDIO_DBF_SETUP_LEN 8 -#define QDIO_DBF_SETUP_PAGES 4 -#define QDIO_DBF_SETUP_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SETUP_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SETUP_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */ -#define QDIO_DBF_SBAL_LEN 256 -#define QDIO_DBF_SBAL_PAGES 4 -#define QDIO_DBF_SBAL_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SBAL_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SBAL_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TRACE_NAME "qdio_trace" -#define QDIO_DBF_TRACE_LEN 8 -#define QDIO_DBF_TRACE_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TRACE_PAGES 16 -#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */ -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TRACE_PAGES 4 -#define QDIO_DBF_TRACE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SENSE_NAME "qdio_sense" -#define QDIO_DBF_SENSE_LEN 64 -#define QDIO_DBF_SENSE_PAGES 2 -#define QDIO_DBF_SENSE_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SENSE_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SENSE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT - -#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out" -#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_OUT_PAGES 256 -#define QDIO_DBF_SLSB_OUT_NR_AREAS 1 -#define QDIO_DBF_SLSB_OUT_LEVEL 6 - -#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in" -#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_IN_PAGES 256 -#define QDIO_DBF_SLSB_IN_NR_AREAS 1 -#define QDIO_DBF_SLSB_IN_LEVEL 6 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_PRINTK_HEADER QDIO_NAME ": " - -#if QDIO_VERBOSE_LEVEL>8 -#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_STUPID(x...) do { } while (0) -#endif +/* used as intparm in do_IO */ +#define QDIO_DOING_ESTABLISH 1 +#define QDIO_DOING_ACTIVATE 2 +#define QDIO_DOING_CLEANUP 3 + +#define SLSB_STATE_NOT_INIT 0x0 +#define SLSB_STATE_EMPTY 0x1 +#define SLSB_STATE_PRIMED 0x2 +#define SLSB_STATE_HALTED 0xe +#define SLSB_STATE_ERROR 0xf +#define SLSB_TYPE_INPUT 0x0 +#define SLSB_TYPE_OUTPUT 0x20 +#define SLSB_OWNER_PROG 0x80 +#define SLSB_OWNER_CU 0x40 + +#define SLSB_P_INPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */ +#define SLSB_P_INPUT_ACK \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */ +#define SLSB_CU_INPUT_EMPTY \ + (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */ +#define SLSB_P_INPUT_PRIMED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */ +#define SLSB_P_INPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */ +#define SLSB_P_INPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */ +#define SLSB_P_OUTPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ +#define SLSB_P_OUTPUT_EMPTY \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ +#define SLSB_CU_OUTPUT_PRIMED \ + (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ +#define SLSB_P_OUTPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */ +#define SLSB_P_OUTPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */ + +#define SLSB_ERROR_DURING_LOOKUP 0xff + +/* additional CIWs returned by extended Sense-ID */ +#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ +#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ -#if QDIO_VERBOSE_LEVEL>7 -#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALL(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>6 -#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_INFO(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>5 -#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_WARN(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>4 -#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ERR(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>3 -#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_CRIT(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>2 -#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALERT(x...) do { } while (0) -#endif +/* flags for st qdio sch data */ +#define CHSC_FLAG_QDIO_CAPABILITY 0x80 +#define CHSC_FLAG_VALIDITY 0x40 + +/* qdio adapter-characteristics-1 flag */ +#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ +#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ +#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ +#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ +#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ +#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ +#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ -#if QDIO_VERBOSE_LEVEL>1 -#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_EMERG(x...) do { } while (0) -#endif - -#define QDIO_HEXDUMP16(importance,header,ptr) \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x\n",*(((char*)ptr)), \ - *(((char*)ptr)+1),*(((char*)ptr)+2), \ - *(((char*)ptr)+3),*(((char*)ptr)+4), \ - *(((char*)ptr)+5),*(((char*)ptr)+6), \ - *(((char*)ptr)+7),*(((char*)ptr)+8), \ - *(((char*)ptr)+9),*(((char*)ptr)+10), \ - *(((char*)ptr)+11),*(((char*)ptr)+12), \ - *(((char*)ptr)+13),*(((char*)ptr)+14), \ - *(((char*)ptr)+15)); \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)+16),*(((char*)ptr)+17), \ - *(((char*)ptr)+18),*(((char*)ptr)+19), \ - *(((char*)ptr)+20),*(((char*)ptr)+21), \ - *(((char*)ptr)+22),*(((char*)ptr)+23), \ - *(((char*)ptr)+24),*(((char*)ptr)+25), \ - *(((char*)ptr)+26),*(((char*)ptr)+27), \ - *(((char*)ptr)+28),*(((char*)ptr)+29), \ - *(((char*)ptr)+30),*(((char*)ptr)+31)); - -/****************** END OF DEBUG FACILITY STUFF *********************/ +#ifdef CONFIG_64BIT +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) +{ + register unsigned long _ccq asm ("0") = *count; + register unsigned long _token asm ("1") = token; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; -/* - * Some instructions as assembly - */ + asm volatile( + " .insn rsy,0xeb000000008A,%1,0,0(%2)" + : "+d" (_ccq), "+d" (_queuestart) + : "d" ((unsigned long)state), "d" (_token) + : "memory", "cc"); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; -static inline int -do_sqbs(unsigned long sch, unsigned char state, int queue, - unsigned int *start, unsigned int *count) -{ -#ifdef CONFIG_64BIT - register unsigned long _ccq asm ("0") = *count; - register unsigned long _sch asm ("1") = sch; - unsigned long _queuestart = ((unsigned long)queue << 32) | *start; - - asm volatile( - " .insn rsy,0xeb000000008A,%1,0,0(%2)" - : "+d" (_ccq), "+d" (_queuestart) - : "d" ((unsigned long)state), "d" (_sch) - : "memory", "cc"); - *count = _ccq & 0xff; - *start = _queuestart & 0xff; - - return (_ccq >> 32) & 0xff; -#else - return 0; -#endif + return (_ccq >> 32) & 0xff; } -static inline int -do_eqbs(unsigned long sch, unsigned char *state, int queue, - unsigned int *start, unsigned int *count) +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count) { -#ifdef CONFIG_64BIT register unsigned long _ccq asm ("0") = *count; - register unsigned long _sch asm ("1") = sch; + register unsigned long _token asm ("1") = token; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _state = 0; asm volatile( " .insn rrf,0xB99c0000,%1,%2,0,0" : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) - : "d" (_sch) - : "memory", "cc" ); + : "d" (_token) + : "memory", "cc"); *count = _ccq & 0xff; *start = _queuestart & 0xff; *state = _state & 0xff; return (_ccq >> 32) & 0xff; -#else - return 0; -#endif -} - - -static inline int -do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) -{ - register unsigned long reg0 asm ("0") = 2; - register struct subchannel_id reg1 asm ("1") = schid; - register unsigned long reg2 asm ("2") = mask1; - register unsigned long reg3 asm ("3") = mask2; - int cc; - - asm volatile( - " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc"); - return cc; -} - -static inline int -do_siga_input(struct subchannel_id schid, unsigned int mask) -{ - register unsigned long reg0 asm ("0") = 1; - register struct subchannel_id reg1 asm ("1") = schid; - register unsigned long reg2 asm ("2") = mask; - int cc; - - asm volatile( - " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory"); - return cc; -} - -static inline int -do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, - unsigned int fc) -{ - register unsigned long __fc asm("0") = fc; - register unsigned long __schid asm("1") = schid; - register unsigned long __mask asm("2") = mask; - int cc; - - asm volatile( - " siga 0\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) - : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) - : "cc", "memory"); - (*bb) = ((unsigned int) __fc) >> 31; - return cc; -} - -static inline unsigned long -do_clear_global_summary(void) -{ - register unsigned long __fn asm("1") = 3; - register unsigned long __tmp asm("2"); - register unsigned long __time asm("3"); - - asm volatile( - " .insn rre,0xb2650000,2,0" - : "+d" (__fn), "=d" (__tmp), "=d" (__time)); - return __time; } - -/* - * QDIO device commands returned by extended Sense-ID - */ -#define DEFAULT_ESTABLISH_QS_CMD 0x1b -#define DEFAULT_ESTABLISH_QS_COUNT 0x1000 -#define DEFAULT_ACTIVATE_QS_CMD 0x1f -#define DEFAULT_ACTIVATE_QS_COUNT 0 - -/* - * additional CIWs returned by extended Sense-ID - */ -#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ -#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ +#else +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) { return 0; } +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count) { return 0; } +#endif /* CONFIG_64BIT */ -#define QDIO_CHSC_RESPONSE_CODE_OK 1 -/* flags for st qdio sch data */ -#define CHSC_FLAG_QDIO_CAPABILITY 0x80 -#define CHSC_FLAG_VALIDITY 0x40 +struct qdio_irq; -#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 -#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 -#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 +struct siga_flag { + u8 input:1; + u8 output:1; + u8 sync:1; + u8 no_sync_ti:1; + u8 no_sync_out_ti:1; + u8 no_sync_out_pci:1; + u8:2; +} __attribute__ ((packed)); -struct qdio_chsc_ssqd { +struct chsc_ssqd_area { struct chsc_header request; - u16 reserved1:10; - u16 ssid:2; - u16 fmt:4; + u16:10; + u8 ssid:2; + u8 fmt:4; u16 first_sch; - u16 reserved2; + u16:16; u16 last_sch; - u32 reserved3; + u32:32; struct chsc_header response; - u32 reserved4; - u8 flags; - u8 reserved5; - u16 sch; - u8 qfmt; - u8 parm; - u8 qdioac1; - u8 sch_class; - u8 pct; - u8 icnt; - u8 reserved7; - u8 ocnt; - u8 reserved8; - u8 mbccnt; - u16 qdioac2; - u64 sch_token; -}; + u32:32; + struct qdio_ssqd_desc qdio_ssqd; +} __attribute__ ((packed)); -struct qdio_perf_stats { -#ifdef CONFIG_64BIT - atomic64_t tl_runs; - atomic64_t outbound_tl_runs; - atomic64_t outbound_tl_runs_resched; - atomic64_t inbound_tl_runs; - atomic64_t inbound_tl_runs_resched; - atomic64_t inbound_thin_tl_runs; - atomic64_t inbound_thin_tl_runs_resched; - - atomic64_t siga_outs; - atomic64_t siga_ins; - atomic64_t siga_syncs; - atomic64_t pcis; - atomic64_t thinints; - atomic64_t fast_reqs; - - atomic64_t outbound_cnt; - atomic64_t inbound_cnt; -#else /* CONFIG_64BIT */ - atomic_t tl_runs; - atomic_t outbound_tl_runs; - atomic_t outbound_tl_runs_resched; - atomic_t inbound_tl_runs; - atomic_t inbound_tl_runs_resched; - atomic_t inbound_thin_tl_runs; - atomic_t inbound_thin_tl_runs_resched; - - atomic_t siga_outs; - atomic_t siga_ins; - atomic_t siga_syncs; - atomic_t pcis; - atomic_t thinints; - atomic_t fast_reqs; - - atomic_t outbound_cnt; - atomic_t inbound_cnt; -#endif /* CONFIG_64BIT */ +struct scssc_area { + struct chsc_header request; + u16 operation_code; + u16:16; + u32:32; + u32:32; + u64 summary_indicator_addr; + u64 subchannel_indicator_addr; + u32 ks:4; + u32 kc:4; + u32:21; + u32 isc:3; + u32 word_with_d_bit; + u32:32; + struct subchannel_id schid; + u32 reserved[1004]; + struct chsc_header response; + u32:32; +} __attribute__ ((packed)); + +struct qdio_input_q { + /* input buffer acknowledgement flag */ + int polling; + + /* last time of noticing incoming data */ + u64 timestamp; + + /* lock for clearing the acknowledgement */ + spinlock_t lock; }; -/* unlikely as the later the better */ -#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) -#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,~0U) -#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,0) +struct qdio_output_q { + /* failed siga-w attempts*/ + atomic_t busy_siga_counter; -#define NOW qdio_get_micros() -#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW -#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time) -#define SAVE_FRONTIER(q,val) q->last_move_ftc=val -#define GET_SAVED_FRONTIER(q) (q->last_move_ftc) + /* start time of busy condition */ + u64 timestamp; -#define MY_MODULE_STRING(x) #x + /* PCIs are enabled for the queue */ + int pci_out_enabled; -#ifdef CONFIG_64BIT -#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) -#else /* CONFIG_64BIT */ -#define QDIO_GET_ADDR(x) ((__u32)(long)x) -#endif /* CONFIG_64BIT */ + /* timer to check for more outbound work */ + struct timer_list timer; +}; struct qdio_q { - volatile struct slsb slsb; + struct slsb slsb; + union { + struct qdio_input_q in; + struct qdio_output_q out; + } u; - char unused[QDIO_MAX_BUFFERS_PER_Q]; + /* queue number */ + int nr; - __u32 * dev_st_chg_ind; + /* bitmask of queue number */ + int mask; + /* input or output queue */ int is_input_q; - struct subchannel_id schid; - struct ccw_device *cdev; - - unsigned int is_iqdio_q; - unsigned int is_thinint_q; - /* bit 0 means queue 0, bit 1 means queue 1, ... */ - unsigned int mask; - unsigned int q_no; + /* list of thinint input queues */ + struct list_head entry; + /* upper-layer program handler */ qdio_handler_t (*handler); - /* points to the next buffer to be checked for having - * been processed by the card (outbound) - * or to the next buffer the program should check for (inbound) */ - volatile int first_to_check; - /* and the last time it was: */ - volatile int last_move_ftc; + /* + * inbound: next buffer the program should check for + * outbound: next buffer to check for having been processed + * by the card + */ + int first_to_check; - atomic_t number_of_buffers_used; - atomic_t polling; + /* first_to_check of the last time */ + int last_move_ftc; - unsigned int siga_in; - unsigned int siga_out; - unsigned int siga_sync; - unsigned int siga_sync_done_on_thinints; - unsigned int siga_sync_done_on_outb_tis; - unsigned int hydra_gives_outbound_pcis; + /* beginning position for calling the program */ + int first_to_kick; - /* used to save beginning position when calling dd_handlers */ - int first_element_to_kick; + /* number of buffers in use by the adapter */ + atomic_t nr_buf_used; - atomic_t use_count; - atomic_t is_in_shutdown; - - void *irq_ptr; - - struct timer_list timer; -#ifdef QDIO_USE_TIMERS_FOR_POLLING - atomic_t timer_already_set; - spinlock_t timer_lock; -#else /* QDIO_USE_TIMERS_FOR_POLLING */ + struct qdio_irq *irq_ptr; struct tasklet_struct tasklet; -#endif /* QDIO_USE_TIMERS_FOR_POLLING */ - - enum qdio_irq_states state; - - /* used to store the error condition during a data transfer */ + /* error condition during a data transfer */ unsigned int qdio_error; - unsigned int siga_error; - unsigned int error_status_flags; - - /* list of interesting queues */ - volatile struct qdio_q *list_next; - volatile struct qdio_q *list_prev; struct sl *sl; - volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; - - struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; - - unsigned long int_parm; - - /*struct { - int in_bh_check_limit; - int threshold; - } threshold_classes[QDIO_STATS_CLASSES];*/ - - struct { - /* inbound: the time to stop polling - outbound: the time to kick peer */ - int threshold; /* the real value */ - - /* outbound: last time of do_QDIO - inbound: last time of noticing incoming data */ - /*__u64 last_transfer_times[QDIO_STATS_NUMBER]; - int last_transfer_index; */ - - __u64 last_transfer_time; - __u64 busy_start; - } timing; - atomic_t busy_siga_counter; - unsigned int queue_type; - unsigned int is_pci_out; - - /* leave this member at the end. won't be cleared in qdio_fill_qs */ - struct slib *slib; /* a page is allocated under this pointer, - sl points into this page, offset PAGE_SIZE/2 - (after slib) */ + struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; + + /* + * Warning: Leave this member at the end so it won't be cleared in + * qdio_fill_qs. A page is allocated under this pointer and used for + * slib and sl. slib is 2048 bytes big and sl points to offset + * PAGE_SIZE / 2. + */ + struct slib *slib; } __attribute__ ((aligned(256))); struct qdio_irq { - __u32 * volatile dev_st_chg_ind; + struct qib qib; + u32 *dsci; /* address of device state change indicator */ + struct ccw_device *cdev; unsigned long int_parm; struct subchannel_id schid; - - unsigned int is_iqdio_irq; - unsigned int is_thinint_irq; - unsigned int hydra_gives_outbound_pcis; - unsigned int sync_done_on_outb_pcis; - - /* QEBSM facility */ - unsigned int is_qebsm; - unsigned long sch_token; + unsigned long sch_token; /* QEBSM facility */ enum qdio_irq_states state; - unsigned int no_input_qs; - unsigned int no_output_qs; + struct siga_flag siga_flag; /* siga sync information from qdioac */ - unsigned char qdioac; + int nr_input_qs; + int nr_output_qs; struct ccw1 ccw; - struct ciw equeue; struct ciw aqueue; - struct qib qib; - - void (*original_int_handler) (struct ccw_device *, - unsigned long, struct irb *); + struct qdio_ssqd_desc ssqd_desc; + + void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); - /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ + /* + * Warning: Leave these members together at the end so they won't be + * cleared in qdio_setup_irq. + */ struct qdr *qdr; + unsigned long chsc_page; + struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; - struct semaphore setting_up_sema; + + struct mutex setup_mutex; }; -#endif + +/* helper functions */ +#define queue_type(q) q->irq_ptr->qib.qfmt + +#define is_thinint_irq(irq) \ + (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ + css_general_characteristics.aif_osa) + +/* the highest iqdio queue is used for multicast */ +static inline int multicast_outbound(struct qdio_q *q) +{ + return (q->irq_ptr->nr_output_qs > 1) && + (q->nr == q->irq_ptr->nr_output_qs - 1); +} + +static inline unsigned long long get_usecs(void) +{ + return monotonic_clock() >> 12; +} + +#define pci_out_supported(q) \ + (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) +#define is_qebsm(q) (q->irq_ptr->sch_token != 0) + +#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti) +#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti) +#define need_siga_in(q) (q->irq_ptr->siga_flag.input) +#define need_siga_out(q) (q->irq_ptr->siga_flag.output) +#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) +#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) + +#define for_each_input_queue(irq_ptr, q, i) \ + for (i = 0, q = irq_ptr->input_qs[0]; \ + i < irq_ptr->nr_input_qs; \ + q = irq_ptr->input_qs[++i]) +#define for_each_output_queue(irq_ptr, q, i) \ + for (i = 0, q = irq_ptr->output_qs[0]; \ + i < irq_ptr->nr_output_qs; \ + q = irq_ptr->output_qs[++i]) + +#define prev_buf(bufnr) \ + ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) +#define next_buf(bufnr) \ + ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) +#define add_buf(bufnr, inc) \ + ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) + +/* prototypes for thin interrupt */ +void qdio_sync_after_thinint(struct qdio_q *q); +int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); +void qdio_check_outbound_after_thinint(struct qdio_q *q); +int qdio_inbound_q_moved(struct qdio_q *q); +void qdio_kick_inbound_handler(struct qdio_q *q); +void qdio_stop_polling(struct qdio_q *q); +int qdio_siga_sync_q(struct qdio_q *q); + +void qdio_setup_thinint(struct qdio_irq *irq_ptr); +int qdio_establish_thinint(struct qdio_irq *irq_ptr); +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_inbound_processing(unsigned long q); +int tiqdio_allocate_memory(void); +void tiqdio_free_memory(void); +int tiqdio_register_thinints(void); +void tiqdio_unregister_thinints(void); + +/* prototypes for setup */ +void qdio_inbound_processing(unsigned long data); +void qdio_outbound_processing(unsigned long data); +void qdio_outbound_timer(unsigned long data); +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb); +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, + int nr_output_qs); +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); +int qdio_setup_irq(struct qdio_initialize *init_data); +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_release_memory(struct qdio_irq *irq_ptr); +int qdio_setup_init(void); +void qdio_setup_exit(void); + +#endif /* _CIO_QDIO_H */ diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c new file mode 100644 index 000000000000..337aa3087a78 --- /dev/null +++ b/drivers/s390/cio/qdio_debug.c @@ -0,0 +1,240 @@ +/* + * drivers/s390/cio/qdio_debug.c + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <asm/qdio.h> +#include <asm/debug.h> +#include "qdio_debug.h" +#include "qdio.h" + +debug_info_t *qdio_dbf_setup; +debug_info_t *qdio_dbf_trace; + +static struct dentry *debugfs_root; +#define MAX_DEBUGFS_QUEUES 32 +static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; +static DEFINE_MUTEX(debugfs_mutex); + +void qdio_allocate_do_dbf(struct qdio_initialize *init_data) +{ + char dbf_text[20]; + + sprintf(dbf_text, "qfmt:%x", init_data->q_format); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8); + sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *)); + sprintf(dbf_text, "niq:%4x", init_data->no_input_qs); + QDIO_DBF_TEXT0(0, setup, dbf_text); + sprintf(dbf_text, "noq:%4x", init_data->no_output_qs); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long)); + QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long)); + QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *)); +} + +static void qdio_unregister_dbf_views(void) +{ + if (qdio_dbf_setup) + debug_unregister(qdio_dbf_setup); + if (qdio_dbf_trace) + debug_unregister(qdio_dbf_trace); +} + +static int qdio_register_dbf_views(void) +{ + qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES, + QDIO_DBF_SETUP_NR_AREAS, + QDIO_DBF_SETUP_LEN); + if (!qdio_dbf_setup) + goto oom; + debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL); + + qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES, + QDIO_DBF_TRACE_NR_AREAS, + QDIO_DBF_TRACE_LEN); + if (!qdio_dbf_trace) + goto oom; + debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL); + return 0; +oom: + qdio_unregister_dbf_views(); + return -ENOMEM; +} + +static int qstat_show(struct seq_file *m, void *v) +{ + unsigned char state; + struct qdio_q *q = m->private; + int i; + + if (!q) + return 0; + + seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci); + seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); + seq_printf(m, "ftc: %d\n", q->first_to_check); + seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); + seq_printf(m, "polling: %d\n", q->u.in.polling); + seq_printf(m, "slsb buffer states:\n"); + + qdio_siga_sync_q(q); + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { + get_buf_state(q, i, &state); + switch (state) { + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_OUTPUT_NOT_INIT: + seq_printf(m, "N"); + break; + case SLSB_P_INPUT_PRIMED: + case SLSB_CU_OUTPUT_PRIMED: + seq_printf(m, "+"); + break; + case SLSB_P_INPUT_ACK: + seq_printf(m, "A"); + break; + case SLSB_P_INPUT_ERROR: + case SLSB_P_OUTPUT_ERROR: + seq_printf(m, "x"); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_OUTPUT_EMPTY: + seq_printf(m, "-"); + break; + case SLSB_P_INPUT_HALTED: + case SLSB_P_OUTPUT_HALTED: + seq_printf(m, "."); + break; + default: + seq_printf(m, "?"); + } + if (i == 63) + seq_printf(m, "\n"); + } + seq_printf(m, "\n"); + return 0; +} + +static ssize_t qstat_seq_write(struct file *file, const char __user *buf, + size_t count, loff_t *off) +{ + struct seq_file *seq = file->private_data; + struct qdio_q *q = seq->private; + + if (!q) + return 0; + + if (q->is_input_q) + xchg(q->irq_ptr->dsci, 1); + local_bh_disable(); + tasklet_schedule(&q->tasklet); + local_bh_enable(); + return count; +} + +static int qstat_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qstat_show, + filp->f_path.dentry->d_inode->i_private); +} + +static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name) +{ + memset(name, 0, sizeof(name)); + sprintf(name, "%s", cdev->dev.bus_id); + if (q->is_input_q) + sprintf(name + strlen(name), "_input"); + else + sprintf(name + strlen(name), "_output"); + sprintf(name + strlen(name), "_%d", q->nr); +} + +static void remove_debugfs_entry(struct qdio_q *q) +{ + int i; + + for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) { + if (!debugfs_queues[i]) + continue; + if (debugfs_queues[i]->d_inode->i_private == q) { + debugfs_remove(debugfs_queues[i]); + debugfs_queues[i] = NULL; + } + } +} + +static struct file_operations debugfs_fops = { + .owner = THIS_MODULE, + .open = qstat_seq_open, + .read = seq_read, + .write = qstat_seq_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) +{ + int i = 0; + char name[40]; + + while (debugfs_queues[i] != NULL) { + i++; + if (i >= MAX_DEBUGFS_QUEUES) + return; + } + get_queue_name(q, cdev, name); + debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, + debugfs_root, q, &debugfs_fops); +} + +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +{ + struct qdio_q *q; + int i; + + mutex_lock(&debugfs_mutex); + for_each_input_queue(irq_ptr, q, i) + setup_debugfs_entry(q, cdev); + for_each_output_queue(irq_ptr, q, i) + setup_debugfs_entry(q, cdev); + mutex_unlock(&debugfs_mutex); +} + +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +{ + struct qdio_q *q; + int i; + + mutex_lock(&debugfs_mutex); + for_each_input_queue(irq_ptr, q, i) + remove_debugfs_entry(q); + for_each_output_queue(irq_ptr, q, i) + remove_debugfs_entry(q); + mutex_unlock(&debugfs_mutex); +} + +int __init qdio_debug_init(void) +{ + debugfs_root = debugfs_create_dir("qdio_queues", NULL); + return qdio_register_dbf_views(); +} + +void qdio_debug_exit(void) +{ + debugfs_remove(debugfs_root); + qdio_unregister_dbf_views(); +} diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h new file mode 100644 index 000000000000..8484b83698e1 --- /dev/null +++ b/drivers/s390/cio/qdio_debug.h @@ -0,0 +1,91 @@ +/* + * drivers/s390/cio/qdio_debug.h + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#ifndef QDIO_DEBUG_H +#define QDIO_DEBUG_H + +#include <asm/debug.h> +#include <asm/qdio.h> +#include "qdio.h" + +#define QDIO_DBF_HEX(ex, name, level, addr, len) \ + do { \ + if (ex) \ + debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \ + else \ + debug_event(qdio_dbf_##name, level, (void *)(addr), len); \ + } while (0) +#define QDIO_DBF_TEXT(ex, name, level, text) \ + do { \ + if (ex) \ + debug_text_exception(qdio_dbf_##name, level, text); \ + else \ + debug_text_event(qdio_dbf_##name, level, text); \ + } while (0) + +#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len) +#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len) +#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len) + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len) +#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len) +#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len) +#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len) +#else +#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0) +#endif /* CONFIG_QDIO_DEBUG */ + +#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text) +#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text) +#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text) + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text) +#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text) +#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text) +#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text) +#else +#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0) +#endif /* CONFIG_QDIO_DEBUG */ + +/* s390dbf views */ +#define QDIO_DBF_SETUP_LEN 8 +#define QDIO_DBF_SETUP_PAGES 4 +#define QDIO_DBF_SETUP_NR_AREAS 1 + +#define QDIO_DBF_TRACE_LEN 8 +#define QDIO_DBF_TRACE_NR_AREAS 2 + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_TRACE_PAGES 16 +#define QDIO_DBF_SETUP_LEVEL 6 +#define QDIO_DBF_TRACE_LEVEL 4 +#else /* !CONFIG_QDIO_DEBUG */ +#define QDIO_DBF_TRACE_PAGES 4 +#define QDIO_DBF_SETUP_LEVEL 2 +#define QDIO_DBF_TRACE_LEVEL 2 +#endif /* CONFIG_QDIO_DEBUG */ + +extern debug_info_t *qdio_dbf_setup; +extern debug_info_t *qdio_dbf_trace; + +void qdio_allocate_do_dbf(struct qdio_initialize *init_data); +void debug_print_bstat(struct qdio_q *q); +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +int qdio_debug_init(void); +void qdio_debug_exit(void); +#endif diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c new file mode 100644 index 000000000000..d10c73cc1688 --- /dev/null +++ b/drivers/s390/cio/qdio_main.c @@ -0,0 +1,1755 @@ +/* + * linux/drivers/s390/cio/qdio_main.c + * + * Linux for s390 qdio support, buffer handling, qdio API and module support. + * + * Copyright 2000,2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <asm/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "qdio.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ + "Jan Glauber <jang@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("QDIO base support"); +MODULE_LICENSE("GPL"); + +static inline int do_siga_sync(struct subchannel_id schid, + unsigned int out_mask, unsigned int in_mask) +{ + register unsigned long __fc asm ("0") = 2; + register struct subchannel_id __schid asm ("1") = schid; + register unsigned long out asm ("2") = out_mask; + register unsigned long in asm ("3") = in_mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); + return cc; +} + +static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) +{ + register unsigned long __fc asm ("0") = 1; + register struct subchannel_id __schid asm ("1") = schid; + register unsigned long __mask asm ("2") = mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); + return cc; +} + +/** + * do_siga_output - perform SIGA-w/wt function + * @schid: subchannel id or in case of QEBSM the subchannel token + * @mask: which output queues to process + * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer + * @fc: function code to perform + * + * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. + * Note: For IQDC unicast queues only the highest priority queue is processed. + */ +static inline int do_siga_output(unsigned long schid, unsigned long mask, + u32 *bb, unsigned int fc) +{ + register unsigned long __fc asm("0") = fc; + register unsigned long __schid asm("1") = schid; + register unsigned long __mask asm("2") = mask; + int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; + + asm volatile( + " siga 0\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) + : : "cc", "memory"); + *bb = ((unsigned int) __fc) >> 31; + return cc; +} + +static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) +{ + char dbf_text[15]; + + /* all done or next buffer state different */ + if (ccq == 0 || ccq == 32) + return 0; + /* not all buffers processed */ + if (ccq == 96 || ccq == 97) + return 1; + /* notify devices immediately */ + sprintf(dbf_text, "%d", ccq); + QDIO_DBF_TEXT2(1, trace, dbf_text); + return -EIO; +} + +/** + * qdio_do_eqbs - extract buffer states for QEBSM + * @q: queue to manipulate + * @state: state of the extracted buffers + * @start: buffer number to start at + * @count: count of buffers to examine + * + * Returns the number of successfull extracted equal buffer states. + * Stops processing if a state is different from the last buffers state. + */ +static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, + int start, int count) +{ + unsigned int ccq = 0; + int tmp_count = count, tmp_start = start; + int nr = q->nr; + int rc; + char dbf_text[15]; + + BUG_ON(!q->irq_ptr->sch_token); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + rc = qdio_check_ccq(q, ccq); + + /* At least one buffer was processed, return and extract the remaining + * buffers later. + */ + if ((ccq == 96) && (count != tmp_count)) + return (count - tmp_count); + if (rc == 1) { + QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); + goto again; + } + + if (rc < 0) { + QDIO_DBF_TEXT2(1, trace, "eqberr"); + sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); + QDIO_DBF_TEXT2(1, trace, dbf_text); + q->handler(q->irq_ptr->cdev, + QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, q->irq_ptr->int_parm); + return 0; + } + return count - tmp_count; +} + +/** + * qdio_do_sqbs - set buffer states for QEBSM + * @q: queue to manipulate + * @state: new state of the buffers + * @start: first buffer number to change + * @count: how many buffers to change + * + * Returns the number of successfully changed buffers. + * Does retrying until the specified count of buffer states is set or an + * error occurs. + */ +static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, + int count) +{ + unsigned int ccq = 0; + int tmp_count = count, tmp_start = start; + int nr = q->nr; + int rc; + char dbf_text[15]; + + BUG_ON(!q->irq_ptr->sch_token); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + rc = qdio_check_ccq(q, ccq); + if (rc == 1) { + QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); + goto again; + } + if (rc < 0) { + QDIO_DBF_TEXT3(1, trace, "sqberr"); + sprintf(dbf_text, "%2x,%2x", count, tmp_count); + QDIO_DBF_TEXT3(1, trace, dbf_text); + sprintf(dbf_text, "%d,%d", ccq, nr); + QDIO_DBF_TEXT3(1, trace, dbf_text); + + q->handler(q->irq_ptr->cdev, + QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, q->irq_ptr->int_parm); + return 0; + } + WARN_ON(tmp_count); + return count - tmp_count; +} + +/* returns number of examined buffers and their common state in *state */ +static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, + unsigned char *state, unsigned int count) +{ + unsigned char __state = 0; + int i; + + BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); + BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); + + if (is_qebsm(q)) + return qdio_do_eqbs(q, state, bufnr, count); + + for (i = 0; i < count; i++) { + if (!__state) + __state = q->slsb.val[bufnr]; + else if (q->slsb.val[bufnr] != __state) + break; + bufnr = next_buf(bufnr); + } + *state = __state; + return i; +} + +inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state) +{ + return get_buf_states(q, bufnr, state, 1); +} + +/* wrap-around safe setting of slsb states, returns number of changed buffers */ +static inline int set_buf_states(struct qdio_q *q, int bufnr, + unsigned char state, int count) +{ + int i; + + BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); + BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); + + if (is_qebsm(q)) + return qdio_do_sqbs(q, state, bufnr, count); + + for (i = 0; i < count; i++) { + xchg(&q->slsb.val[bufnr], state); + bufnr = next_buf(bufnr); + } + return count; +} + +static inline int set_buf_state(struct qdio_q *q, int bufnr, + unsigned char state) +{ + return set_buf_states(q, bufnr, state, 1); +} + +/* set slsb states to initial state */ +void qdio_init_buf_states(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); + for_each_output_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); +} + +static int qdio_siga_sync(struct qdio_q *q, unsigned int output, + unsigned int input) +{ + int cc; + + if (!need_siga_sync(q)) + return 0; + + qdio_perf_stat_inc(&perf_stats.siga_sync); + + cc = do_siga_sync(q->irq_ptr->schid, output, input); + if (cc) { + QDIO_DBF_TEXT4(0, trace, "sigasync"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + } + return cc; +} + +inline int qdio_siga_sync_q(struct qdio_q *q) +{ + if (q->is_input_q) + return qdio_siga_sync(q, 0, q->mask); + else + return qdio_siga_sync(q, q->mask, 0); +} + +static inline int qdio_siga_sync_out(struct qdio_q *q) +{ + return qdio_siga_sync(q, ~0U, 0); +} + +static inline int qdio_siga_sync_all(struct qdio_q *q) +{ + return qdio_siga_sync(q, ~0U, ~0U); +} + +static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) +{ + unsigned int fc = 0; + unsigned long schid; + + if (!is_qebsm(q)) + schid = *((u32 *)&q->irq_ptr->schid); + else { + schid = q->irq_ptr->sch_token; + fc |= 0x80; + } + return do_siga_output(schid, q->mask, busy_bit, fc); +} + +static int qdio_siga_output(struct qdio_q *q) +{ + int cc; + u32 busy_bit; + u64 start_time = 0; + + QDIO_DBF_TEXT5(0, trace, "sigaout"); + QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); + + qdio_perf_stat_inc(&perf_stats.siga_out); +again: + cc = qdio_do_siga_output(q, &busy_bit); + if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { + if (!start_time) + start_time = get_usecs(); + else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) + goto again; + } + + if (cc == 2 && busy_bit) + cc |= QDIO_ERROR_SIGA_BUSY; + if (cc) + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + return cc; +} + +static inline int qdio_siga_input(struct qdio_q *q) +{ + int cc; + + QDIO_DBF_TEXT4(0, trace, "sigain"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + + qdio_perf_stat_inc(&perf_stats.siga_in); + + cc = do_siga_input(q->irq_ptr->schid, q->mask); + if (cc) + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + return cc; +} + +/* called from thinint inbound handler */ +void qdio_sync_after_thinint(struct qdio_q *q) +{ + if (pci_out_supported(q)) { + if (need_siga_sync_thinint(q)) + qdio_siga_sync_all(q); + else if (need_siga_sync_out_thinint(q)) + qdio_siga_sync_out(q); + } else + qdio_siga_sync_q(q); +} + +inline void qdio_stop_polling(struct qdio_q *q) +{ + spin_lock_bh(&q->u.in.lock); + if (!q->u.in.polling) { + spin_unlock_bh(&q->u.in.lock); + return; + } + q->u.in.polling = 0; + qdio_perf_stat_inc(&perf_stats.debug_stop_polling); + + /* show the card that we are not polling anymore */ + set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); + spin_unlock_bh(&q->u.in.lock); +} + +static void announce_buffer_error(struct qdio_q *q) +{ + char dbf_text[15]; + + if (q->is_input_q) + QDIO_DBF_TEXT3(1, trace, "inperr"); + else + QDIO_DBF_TEXT3(0, trace, "outperr"); + + sprintf(dbf_text, "%x-%x-%x", q->first_to_check, + q->sbal[q->first_to_check]->element[14].flags, + q->sbal[q->first_to_check]->element[15].flags); + QDIO_DBF_TEXT3(1, trace, dbf_text); + QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); + + q->qdio_error = QDIO_ERROR_SLSB_STATE; +} + +static int get_inbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state; + + /* + * If we still poll don't update last_move_ftc, keep the + * previously ACK buffer there. + */ + if (!q->u.in.polling) + q->last_move_ftc = q->first_to_check; + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + + /* + * No siga sync here, as a PCI or we after a thin interrupt + * will sync the queues. + */ + + /* need to set count to 1 for non-qebsm */ + if (!is_qebsm(q)) + count = 1; + +check_next: + if (q->first_to_check == stop) + goto out; + + count = get_buf_states(q, q->first_to_check, &state, count); + if (!count) + goto out; + + switch (state) { + case SLSB_P_INPUT_PRIMED: + QDIO_DBF_TEXT5(0, trace, "inptprim"); + + /* + * Only ACK the first buffer. The ACK will be removed in + * qdio_stop_polling. + */ + if (q->u.in.polling) + state = SLSB_P_INPUT_NOT_INIT; + else { + q->u.in.polling = 1; + state = SLSB_P_INPUT_ACK; + } + set_buf_state(q, q->first_to_check, state); + + /* + * Need to change all PRIMED buffers to NOT_INIT, otherwise + * we're loosing initiative in the thinint code. + */ + if (count > 1) + set_buf_states(q, next_buf(q->first_to_check), + SLSB_P_INPUT_NOT_INIT, count - 1); + + /* + * No siga-sync needed for non-qebsm here, as the inbound queue + * will be synced on the next siga-r, resp. + * tiqdio_is_inbound_q_done will do the siga-sync. + */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + goto check_next; + case SLSB_P_INPUT_ERROR: + announce_buffer_error(q); + /* process the buffer, the upper layer will take care of it */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_ACK: + QDIO_DBF_TEXT5(0, trace, "inpnipro"); + break; + default: + BUG(); + } +out: + QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); + return q->first_to_check; +} + +int qdio_inbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_inbound_buffer_frontier(q); + + if ((bufnr != q->last_move_ftc) || q->qdio_error) { + if (!need_siga_sync(q) && !pci_out_supported(q)) + q->u.in.timestamp = get_usecs(); + + QDIO_DBF_TEXT4(0, trace, "inhasmvd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + return 1; + } else + return 0; +} + +static int qdio_inbound_q_done(struct qdio_q *q) +{ + unsigned char state; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + if (!atomic_read(&q->nr_buf_used)) + return 1; + + /* + * We need that one for synchronization with the adapter, as it + * does a kind of PCI avoidance. + */ + qdio_siga_sync_q(q); + + get_buf_state(q, q->first_to_check, &state); + if (state == SLSB_P_INPUT_PRIMED) + /* we got something to do */ + return 0; + + /* on VM, we don't poll, so the q is always done here */ + if (need_siga_sync(q) || pci_out_supported(q)) + return 1; + + /* + * At this point we know, that inbound first_to_check + * has (probably) not moved (see qdio_inbound_processing). + */ + if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "inqisdon"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + sprintf(dbf_text, "pf%02x", q->first_to_check); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 1; + } else { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "inqisntd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + sprintf(dbf_text, "pf%02x", q->first_to_check); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 0; + } +} + +void qdio_kick_inbound_handler(struct qdio_q *q) +{ + int count, start, end; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + qdio_perf_stat_inc(&perf_stats.inbound_handler); + + start = q->first_to_kick; + end = q->first_to_check; + if (end >= start) + count = end - start; + else + count = end + QDIO_MAX_BUFFERS_PER_Q - start; + +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text, "s=%2xc=%2x", start, count); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return; + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, + start, count, q->irq_ptr->int_parm); + + /* for the next time */ + q->first_to_kick = q->first_to_check; + q->qdio_error = 0; +} + +static void __qdio_inbound_processing(struct qdio_q *q) +{ + qdio_perf_stat_inc(&perf_stats.tasklet_inbound); +again: + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_inbound_handler(q); + + if (!qdio_inbound_q_done(q)) + /* means poll time is not yet over */ + goto again; + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!qdio_inbound_q_done(q)) + goto again; +} + +/* inbound tasklet */ +void qdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_inbound_processing(q); +} + +static int get_outbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state; + + if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || + (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) + qdio_siga_sync_q(q); + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + + /* need to set count to 1 for non-qebsm */ + if (!is_qebsm(q)) + count = 1; + +check_next: + if (q->first_to_check == stop) + return q->first_to_check; + + count = get_buf_states(q, q->first_to_check, &state, count); + if (!count) + return q->first_to_check; + + switch (state) { + case SLSB_P_OUTPUT_EMPTY: + /* the adapter got it */ + QDIO_DBF_TEXT5(0, trace, "outpempt"); + + atomic_sub(count, &q->nr_buf_used); + q->first_to_check = add_buf(q->first_to_check, count); + /* + * We fetch all buffer states at once. get_buf_states may + * return count < stop. For QEBSM we do not loop. + */ + if (is_qebsm(q)) + break; + goto check_next; + case SLSB_P_OUTPUT_ERROR: + announce_buffer_error(q); + /* process the buffer, the upper layer will take care of it */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + break; + case SLSB_CU_OUTPUT_PRIMED: + /* the adapter has not fetched the output yet */ + QDIO_DBF_TEXT5(0, trace, "outpprim"); + break; + case SLSB_P_OUTPUT_NOT_INIT: + case SLSB_P_OUTPUT_HALTED: + break; + default: + BUG(); + } + return q->first_to_check; +} + +/* all buffers processed? */ +static inline int qdio_outbound_q_done(struct qdio_q *q) +{ + return atomic_read(&q->nr_buf_used) == 0; +} + +static inline int qdio_outbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_outbound_buffer_frontier(q); + + if ((bufnr != q->last_move_ftc) || q->qdio_error) { + q->last_move_ftc = bufnr; + QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + return 1; + } else + return 0; +} + +/* + * VM could present us cc=2 and busy bit set on SIGA-write + * during reconfiguration of their Guest LAN (only in iqdio mode, + * otherwise qdio is asynchronous and cc=2 and busy bit there will take + * the queues down immediately). + * + * Therefore qdio_siga_output will try for a short time constantly, + * if such a condition occurs. If it doesn't change, it will + * increase the busy_siga_counter and save the timestamp, and + * schedule the queue for later processing. qdio_outbound_processing + * will check out the counter. If non-zero, it will call qdio_kick_outbound_q + * as often as the value of the counter. This will attempt further SIGA + * instructions. For each successful SIGA, the counter is + * decreased, for failing SIGAs the counter remains the same, after + * all. After some time of no movement, qdio_kick_outbound_q will + * finally fail and reflect corresponding error codes to call + * the upper layer module and have it take the queues down. + * + * Note that this is a change from the original HiperSockets design + * (saying cc=2 and busy bit means take the queues down), but in + * these days Guest LAN didn't exist... excessive cc=2 with busy bit + * conditions will still take the queues down, but the threshold is + * higher due to the Guest LAN environment. + * + * Called from outbound tasklet and do_QDIO handler. + */ +static void qdio_kick_outbound_q(struct qdio_q *q) +{ + int rc; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; + + QDIO_DBF_TEXT5(0, trace, "kickoutq"); + QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); +#endif /* CONFIG_QDIO_DEBUG */ + + if (!need_siga_out(q)) + return; + + rc = qdio_siga_output(q); + switch (rc) { + case 0: + /* went smooth this time, reset timestamp */ + q->u.out.timestamp = 0; + + /* TODO: improve error handling for CC=0 case */ +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT3(0, trace, "cc2reslv"); + sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, + atomic_read(&q->u.out.busy_siga_counter)); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + break; + /* cc=2 and busy bit */ + case (2 | QDIO_ERROR_SIGA_BUSY): + atomic_inc(&q->u.out.busy_siga_counter); + + /* if the last siga was successful, save timestamp here */ + if (!q->u.out.timestamp) + q->u.out.timestamp = get_usecs(); + + /* if we're in time, don't touch qdio_error */ + if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { + tasklet_schedule(&q->tasklet); + break; + } + QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, + atomic_read(&q->u.out.busy_siga_counter)); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + default: + /* for plain cc=1, 2 or 3 */ + q->qdio_error = rc; + } +} + +static void qdio_kick_outbound_handler(struct qdio_q *q) +{ + int start, end, count; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + start = q->first_to_kick; + end = q->last_move_ftc; + if (end >= start) + count = end - start; + else + count = end + QDIO_MAX_BUFFERS_PER_Q - start; + +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "kickouth"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + + sprintf(dbf_text, "s=%2xc=%2x", start, count); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return; + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, + q->irq_ptr->int_parm); + + /* for the next time: */ + q->first_to_kick = q->last_move_ftc; + q->qdio_error = 0; +} + +static void __qdio_outbound_processing(struct qdio_q *q) +{ + int siga_attempts; + + qdio_perf_stat_inc(&perf_stats.tasklet_outbound); + + /* see comment in qdio_kick_outbound_q */ + siga_attempts = atomic_read(&q->u.out.busy_siga_counter); + while (siga_attempts--) { + atomic_dec(&q->u.out.busy_siga_counter); + qdio_kick_outbound_q(q); + } + + BUG_ON(atomic_read(&q->nr_buf_used) < 0); + + if (qdio_outbound_q_moved(q)) + qdio_kick_outbound_handler(q); + + if (queue_type(q) == QDIO_ZFCP_QFMT) { + if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) + tasklet_schedule(&q->tasklet); + return; + } + + /* bail out for HiperSockets unicast queues */ + if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) + return; + + if (q->u.out.pci_out_enabled) + return; + + /* + * Now we know that queue type is either qeth without pci enabled + * or HiperSockets multicast. Make sure buffer switch from PRIMED to + * EMPTY is noticed and outbound_handler is called after some time. + */ + if (qdio_outbound_q_done(q)) + del_timer(&q->u.out.timer); + else { + if (!timer_pending(&q->u.out.timer)) { + mod_timer(&q->u.out.timer, jiffies + 10 * HZ); + qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); + } + } +} + +/* outbound tasklet */ +void qdio_outbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_outbound_processing(q); +} + +void qdio_outbound_timer(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + tasklet_schedule(&q->tasklet); +} + +/* called from thinint inbound tasklet */ +void qdio_check_outbound_after_thinint(struct qdio_q *q) +{ + struct qdio_q *out; + int i; + + if (!pci_out_supported(q)) + return; + + for_each_output_queue(q->irq_ptr, out, i) + if (!qdio_outbound_q_done(out)) + tasklet_schedule(&out->tasklet); +} + +static inline void qdio_set_state(struct qdio_irq *irq_ptr, + enum qdio_irq_states state) +{ +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; + + QDIO_DBF_TEXT5(0, trace, "newstate"); + sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); + QDIO_DBF_TEXT5(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + irq_ptr->state = state; + mb(); +} + +static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) +{ + char dbf_text[15]; + + if (irb->esw.esw0.erw.cons) { + sprintf(dbf_text, "sens%4x", schid.sch_no); + QDIO_DBF_TEXT2(1, trace, dbf_text); + QDIO_DBF_HEX0(0, trace, irb, 64); + QDIO_DBF_HEX0(0, trace, irb->ecw, 64); + } +} + +/* PCI interrupt handler */ +static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) +{ + int i; + struct qdio_q *q; + + qdio_perf_stat_inc(&perf_stats.pci_int); + + for_each_input_queue(irq_ptr, q, i) + tasklet_schedule(&q->tasklet); + + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + return; + + for_each_output_queue(irq_ptr, q, i) { + if (qdio_outbound_q_done(q)) + continue; + + if (!siga_syncs_out_pci(q)) + qdio_siga_sync_q(q); + + tasklet_schedule(&q->tasklet); + } +} + +static void qdio_handle_activate_check(struct ccw_device *cdev, + unsigned long intparm, int cstat, int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + char dbf_text[15]; + + QDIO_DBF_TEXT2(1, trace, "ick2"); + sprintf(dbf_text, "%s", cdev->dev.bus_id); + QDIO_DBF_TEXT2(1, trace, dbf_text); + QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); + + if (irq_ptr->nr_input_qs) { + q = irq_ptr->input_qs[0]; + } else if (irq_ptr->nr_output_qs) { + q = irq_ptr->output_qs[0]; + } else { + dump_stack(); + goto no_handler; + } + q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, irq_ptr->int_parm); +no_handler: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); +} + +static void qdio_call_shutdown(struct work_struct *work) +{ + struct ccw_device_private *priv; + struct ccw_device *cdev; + + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + put_device(&cdev->dev); +} + +static void qdio_int_error(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_INACTIVE: + case QDIO_IRQ_STATE_CLEANUP: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + break; + case QDIO_IRQ_STATE_ESTABLISHED: + case QDIO_IRQ_STATE_ACTIVE: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); + if (get_device(&cdev->dev)) { + /* Can't call shutdown from interrupt context. */ + PREPARE_WORK(&cdev->private->kick_work, + qdio_call_shutdown); + queue_work(ccw_device_work, &cdev->private->kick_work); + } + break; + default: + WARN_ON(1); + } + wake_up(&cdev->private->wait_q); +} + +static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, + int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { + QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); + goto error; + } + + if (!(dstat & DEV_STAT_DEV_END)) { + QDIO_DBF_TEXT2(1, setup, "eq:no de"); + goto error; + } + + if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { + QDIO_DBF_TEXT2(1, setup, "eq:badio"); + goto error; + } + return 0; +error: + QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + return 1; +} + +static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, + int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + char dbf_text[15]; + + sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + if (!qdio_establish_check_errors(cdev, cstat, dstat)) + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); +} + +/* qdio interrupt handler */ +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + int cstat, dstat; + char dbf_text[15]; + + qdio_perf_stat_inc(&perf_stats.qdio_int); + + if (!intparm || !irq_ptr) { + sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + return; + } + + if (IS_ERR(irb)) { + switch (PTR_ERR(irb)) { + case -EIO: + sprintf(dbf_text, "ierr%4x", + cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + qdio_int_error(cdev); + return; + case -ETIMEDOUT: + sprintf(dbf_text, "qtoh%4x", + cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + qdio_int_error(cdev); + return; + default: + WARN_ON(1); + return; + } + } + qdio_irq_check_sense(irq_ptr->schid, irb); + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_INACTIVE: + qdio_establish_handle_irq(cdev, cstat, dstat); + break; + + case QDIO_IRQ_STATE_CLEANUP: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + break; + + case QDIO_IRQ_STATE_ESTABLISHED: + case QDIO_IRQ_STATE_ACTIVE: + if (cstat & SCHN_STAT_PCI) { + qdio_int_handler_pci(irq_ptr); + /* no state change so no need to wake up wait_q */ + return; + } + if ((cstat & ~SCHN_STAT_PCI) || dstat) { + qdio_handle_activate_check(cdev, intparm, cstat, + dstat); + break; + } + default: + WARN_ON(1); + } + wake_up(&cdev->private->wait_q); +} + +/** + * qdio_get_ssqd_desc - get qdio subchannel description + * @cdev: ccw device to get description for + * + * Returns a pointer to the saved qdio subchannel description, + * or NULL for not setup qdio devices. + */ +struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + + QDIO_DBF_TEXT0(0, setup, "getssqd"); + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return NULL; + + return &irq_ptr->ssqd_desc; +} +EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); + +/** + * qdio_cleanup - shutdown queues and free data structures + * @cdev: associated ccw device + * @how: use halt or clear to shutdown + * + * This function calls qdio_shutdown() for @cdev with method @how + * and on success qdio_free() for @cdev. + */ +int qdio_cleanup(struct ccw_device *cdev, int how) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + int rc; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + rc = qdio_shutdown(cdev, how); + if (rc == 0) + rc = qdio_free(cdev); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_cleanup); + +static void qdio_shutdown_queues(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + tasklet_disable(&q->tasklet); + + for_each_output_queue(irq_ptr, q, i) { + tasklet_disable(&q->tasklet); + del_timer(&q->u.out.timer); + } +} + +/** + * qdio_shutdown - shut down a qdio subchannel + * @cdev: associated ccw device + * @how: use halt or clear to shutdown + */ +int qdio_shutdown(struct ccw_device *cdev, int how) +{ + struct qdio_irq *irq_ptr; + int rc; + unsigned long flags; + char dbf_text[15]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + mutex_lock(&irq_ptr->setup_mutex); + /* + * Subchannel was already shot down. We cannot prevent being called + * twice since cio may trigger a shutdown asynchronously. + */ + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + mutex_unlock(&irq_ptr->setup_mutex); + return 0; + } + + sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + tiqdio_remove_input_queues(irq_ptr); + qdio_shutdown_queues(cdev); + qdio_shutdown_debug_entries(irq_ptr, cdev); + + /* cleanup subchannel */ + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + + if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) + rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); + else + /* default behaviour is halt */ + rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); + if (rc) { + sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + sprintf(dbf_text, "rc=%d", rc); + QDIO_DBF_TEXT0(0, setup, dbf_text); + goto no_cleanup; + } + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || + irq_ptr->state == QDIO_IRQ_STATE_ERR, + 10 * HZ); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + +no_cleanup: + qdio_shutdown_thinint(irq_ptr); + + /* restore interrupt handler */ + if ((void *)cdev->handler == (void *)qdio_int_handler) + cdev->handler = irq_ptr->orig_handler; + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + mutex_unlock(&irq_ptr->setup_mutex); + module_put(THIS_MODULE); + if (rc) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(qdio_shutdown); + +/** + * qdio_free - free data structures for a qdio subchannel + * @cdev: associated ccw device + */ +int qdio_free(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + mutex_lock(&irq_ptr->setup_mutex); + + sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + cdev->private->qdio_data = NULL; + mutex_unlock(&irq_ptr->setup_mutex); + + qdio_release_memory(irq_ptr); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_free); + +/** + * qdio_initialize - allocate and establish queues for a qdio subchannel + * @init_data: initialization data + * + * This function first allocates queues via qdio_allocate() and on success + * establishes them via qdio_establish(). + */ +int qdio_initialize(struct qdio_initialize *init_data) +{ + int rc; + char dbf_text[15]; + + sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + rc = qdio_allocate(init_data); + if (rc) + return rc; + + rc = qdio_establish(init_data); + if (rc) + qdio_free(init_data->cdev); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_initialize); + +/** + * qdio_allocate - allocate qdio queues and associated data + * @init_data: initialization data + */ +int qdio_allocate(struct qdio_initialize *init_data) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + + sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + if ((init_data->no_input_qs && !init_data->input_handler) || + (init_data->no_output_qs && !init_data->output_handler)) + return -EINVAL; + + if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || + (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) + return -EINVAL; + + if ((!init_data->input_sbal_addr_array) || + (!init_data->output_sbal_addr_array)) + return -EINVAL; + + qdio_allocate_do_dbf(init_data); + + /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ + irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!irq_ptr) + goto out_err; + QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); + QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); + + mutex_init(&irq_ptr->setup_mutex); + + /* + * Allocate a page for the chsc calls in qdio_establish. + * Must be pre-allocated since a zfcp recovery will call + * qdio_establish. In case of low memory and swap on a zfcp disk + * we may not be able to allocate memory otherwise. + */ + irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); + if (!irq_ptr->chsc_page) + goto out_rel; + + /* qdr is used in ccw1.cda which is u32 */ + irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); + if (!irq_ptr->qdr) + goto out_rel; + WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); + + QDIO_DBF_TEXT0(0, setup, "qdr:"); + QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); + + if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, + init_data->no_output_qs)) + goto out_rel; + + init_data->cdev->private->qdio_data = irq_ptr; + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + return 0; +out_rel: + qdio_release_memory(irq_ptr); +out_err: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(qdio_allocate); + +/** + * qdio_establish - establish queues on a qdio subchannel + * @init_data: initialization data + */ +int qdio_establish(struct qdio_initialize *init_data) +{ + char dbf_text[20]; + struct qdio_irq *irq_ptr; + struct ccw_device *cdev = init_data->cdev; + unsigned long saveflags; + int rc; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + if (!try_module_get(THIS_MODULE)) + return -EINVAL; + + sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + mutex_lock(&irq_ptr->setup_mutex); + qdio_setup_irq(init_data); + + rc = qdio_establish_thinint(irq_ptr); + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + /* establish q */ + irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->equeue.count; + irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options_mask(cdev, 0); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + if (rc) { + sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + sprintf(dbf_text, "eq:rc%4x", rc); + QDIO_DBF_TEXT2(1, setup, dbf_text); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || + irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); + + if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; + } + + qdio_setup_ssqd_info(irq_ptr); + sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); + QDIO_DBF_TEXT2(0, setup, dbf_text); + + /* qebsm is now setup if available, initialize buffer states */ + qdio_init_buf_states(irq_ptr); + + mutex_unlock(&irq_ptr->setup_mutex); + qdio_print_subchannel_info(irq_ptr, cdev); + qdio_setup_debug_entries(irq_ptr, cdev); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_establish); + +/** + * qdio_activate - activate queues on a qdio subchannel + * @cdev: associated cdev + */ +int qdio_activate(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + int rc; + unsigned long saveflags; + char dbf_text[20]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + mutex_lock(&irq_ptr->setup_mutex); + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + rc = -EBUSY; + goto out; + } + + sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(0, setup, dbf_text); + QDIO_DBF_TEXT2(0, trace, dbf_text); + + irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->aqueue.count; + irq_ptr->ccw.cda = 0; + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, + 0, DOIO_DENY_PREFETCH); + if (rc) { + sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + sprintf(dbf_text, "aq:rc%4x", rc); + QDIO_DBF_TEXT2(1, setup, dbf_text); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) + goto out; + + if (is_thinint_irq(irq_ptr)) + tiqdio_add_input_queues(irq_ptr); + + /* wait for subchannel to become active */ + msleep(5); + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_STOPPED: + case QDIO_IRQ_STATE_ERR: + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; + default: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); + rc = 0; + } +out: + mutex_unlock(&irq_ptr->setup_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_activate); + +static inline int buf_in_between(int bufnr, int start, int count) +{ + int end = add_buf(start, count); + + if (end > start) { + if (bufnr >= start && bufnr < end) + return 1; + else + return 0; + } + + /* wrap-around case */ + if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || + (bufnr < end)) + return 1; + else + return 0; +} + +/** + * handle_inbound - reset processed input buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are emptied + */ +static void handle_inbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + unsigned long flags; + int used, rc; + + /* + * do_QDIO could run in parallel with the queue tasklet so the + * upper-layer programm could empty the ACK'ed buffer here. + * If that happens we must clear the polling flag, otherwise + * qdio_stop_polling() could set the buffer to NOT_INIT after + * it was set to EMPTY which would kill us. + */ + spin_lock_irqsave(&q->u.in.lock, flags); + if (q->u.in.polling) + if (buf_in_between(q->last_move_ftc, bufnr, count)) + q->u.in.polling = 0; + + count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); + spin_unlock_irqrestore(&q->u.in.lock, flags); + + used = atomic_add_return(count, &q->nr_buf_used) - count; + BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); + + /* no need to signal as long as the adapter had free buffers */ + if (used) + return; + + if (need_siga_in(q)) { + rc = qdio_siga_input(q); + if (rc) + q->qdio_error = rc; + } +} + +/** + * handle_outbound - process filled outbound buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are filled + */ +static void handle_outbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + unsigned char state; + int used; + + qdio_perf_stat_inc(&perf_stats.outbound_handler); + + count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); + used = atomic_add_return(count, &q->nr_buf_used); + BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); + + if (callflags & QDIO_FLAG_PCI_OUT) + q->u.out.pci_out_enabled = 1; + else + q->u.out.pci_out_enabled = 0; + + if (queue_type(q) == QDIO_IQDIO_QFMT) { + if (multicast_outbound(q)) + qdio_kick_outbound_q(q); + else + /* + * One siga-w per buffer required for unicast + * HiperSockets. + */ + while (count--) + qdio_kick_outbound_q(q); + goto out; + } + + if (need_siga_sync(q)) { + qdio_siga_sync_q(q); + goto out; + } + + /* try to fast requeue buffers */ + get_buf_state(q, prev_buf(bufnr), &state); + if (state != SLSB_CU_OUTPUT_PRIMED) + qdio_kick_outbound_q(q); + else { + QDIO_DBF_TEXT5(0, trace, "fast-req"); + qdio_perf_stat_inc(&perf_stats.fast_requeue); + } +out: + /* Fixme: could wait forever if called from process context */ + tasklet_schedule(&q->tasklet); +} + +/** + * do_QDIO - process input or output buffers + * @cdev: associated ccw_device for the qdio subchannel + * @callflags: input or output and special flags from the program + * @q_nr: queue number + * @bufnr: buffer number + * @count: how many buffers to process + */ +int do_QDIO(struct ccw_device *cdev, unsigned int callflags, + int q_nr, int bufnr, int count) +{ + struct qdio_irq *irq_ptr; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[20]; + + sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || + (count > QDIO_MAX_BUFFERS_PER_Q) || + (q_nr > QDIO_MAX_QUEUES_PER_IRQ)) + return -EINVAL; + + if (!count) + return 0; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + +#ifdef CONFIG_QDIO_DEBUG + if (callflags & QDIO_FLAG_SYNC_INPUT) + QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], + sizeof(void *)); + else + QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], + sizeof(void *)); + + sprintf(dbf_text, "flag%04x", callflags); + QDIO_DBF_TEXT3(0, trace, dbf_text); + sprintf(dbf_text, "qi%02xct%02x", bufnr, count); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) + return -EBUSY; + + if (callflags & QDIO_FLAG_SYNC_INPUT) + handle_inbound(irq_ptr->input_qs[q_nr], + callflags, bufnr, count); + else if (callflags & QDIO_FLAG_SYNC_OUTPUT) + handle_outbound(irq_ptr->output_qs[q_nr], + callflags, bufnr, count); + else { + QDIO_DBF_TEXT3(1, trace, "doQD:inv"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(do_QDIO); + +static int __init init_QDIO(void) +{ + int rc; + + rc = qdio_setup_init(); + if (rc) + return rc; + rc = tiqdio_allocate_memory(); + if (rc) + goto out_cache; + rc = qdio_debug_init(); + if (rc) + goto out_ti; + rc = qdio_setup_perf_stats(); + if (rc) + goto out_debug; + rc = tiqdio_register_thinints(); + if (rc) + goto out_perf; + return 0; + +out_perf: + qdio_remove_perf_stats(); +out_debug: + qdio_debug_exit(); +out_ti: + tiqdio_free_memory(); +out_cache: + qdio_setup_exit(); + return rc; +} + +static void __exit exit_QDIO(void) +{ + tiqdio_unregister_thinints(); + tiqdio_free_memory(); + qdio_remove_perf_stats(); + qdio_debug_exit(); + qdio_setup_exit(); +} + +module_init(init_QDIO); +module_exit(exit_QDIO); diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c new file mode 100644 index 000000000000..ea01b85b1cc9 --- /dev/null +++ b/drivers/s390/cio/qdio_perf.c @@ -0,0 +1,151 @@ +/* + * drivers/s390/cio/qdio_perf.c + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#include <linux/kernel.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <asm/ccwdev.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "ioasm.h" +#include "chsc.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +int qdio_performance_stats; +struct qdio_perf_stats perf_stats; + +#ifdef CONFIG_PROC_FS +static struct proc_dir_entry *qdio_perf_pde; +#endif + +inline void qdio_perf_stat_inc(atomic_long_t *count) +{ + if (qdio_performance_stats) + atomic_long_inc(count); +} + +inline void qdio_perf_stat_dec(atomic_long_t *count) +{ + if (qdio_performance_stats) + atomic_long_dec(count); +} + +/* + * procfs functions + */ +static int qdio_perf_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.qdio_int)); + seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.pci_int)); + seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.thin_int)); + seq_printf(m, "\n"); + seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.tasklet_inbound)); + seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.tasklet_outbound)); + seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.tasklet_thinint), + (long)atomic_long_read(&perf_stats.tasklet_thinint_loop)); + seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.thinint_inbound), + (long)atomic_long_read(&perf_stats.thinint_inbound_loop)); + seq_printf(m, "\n"); + seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_in)); + seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_out)); + seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_sync)); + seq_printf(m, "\n"); + seq_printf(m, "Number of inbound transfers\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.inbound_handler)); + seq_printf(m, "Number of outbound transfers\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.outbound_handler)); + seq_printf(m, "\n"); + seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", + (long)atomic_long_read(&perf_stats.fast_requeue)); + seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", + (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); + seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.debug_stop_polling)); + seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", + (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); + seq_printf(m, "\n"); + return 0; +} +static int qdio_perf_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qdio_perf_proc_show, NULL); +} + +static struct file_operations qdio_perf_proc_fops = { + .owner = THIS_MODULE, + .open = qdio_perf_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * sysfs functions + */ +static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf) +{ + return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); +} + +static ssize_t qdio_perf_stats_store(struct bus_type *bus, + const char *buf, size_t count) +{ + unsigned long i; + + if (strict_strtoul(buf, 16, &i) != 0) + return -EINVAL; + if ((i != 0) && (i != 1)) + return -EINVAL; + if (i == qdio_performance_stats) + return count; + + qdio_performance_stats = i; + /* reset performance statistics */ + if (i == 0) + memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); + return count; +} + +static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show, + qdio_perf_stats_store); + +int __init qdio_setup_perf_stats(void) +{ + int rc; + + rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); + if (rc) + return rc; + +#ifdef CONFIG_PROC_FS + memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); + qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO, + NULL, &qdio_perf_proc_fops); +#endif + return 0; +} + +void __exit qdio_remove_perf_stats(void) +{ +#ifdef CONFIG_PROC_FS + remove_proc_entry("qdio_perf", NULL); +#endif + bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); +} diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h new file mode 100644 index 000000000000..5c406a8b7387 --- /dev/null +++ b/drivers/s390/cio/qdio_perf.h @@ -0,0 +1,54 @@ +/* + * drivers/s390/cio/qdio_perf.h + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#ifndef QDIO_PERF_H +#define QDIO_PERF_H + +#include <linux/types.h> +#include <linux/device.h> +#include <asm/atomic.h> + +struct qdio_perf_stats { + /* interrupt handler calls */ + atomic_long_t qdio_int; + atomic_long_t pci_int; + atomic_long_t thin_int; + + /* tasklet runs */ + atomic_long_t tasklet_inbound; + atomic_long_t tasklet_outbound; + atomic_long_t tasklet_thinint; + atomic_long_t tasklet_thinint_loop; + atomic_long_t thinint_inbound; + atomic_long_t thinint_inbound_loop; + atomic_long_t thinint_inbound_loop2; + + /* signal adapter calls */ + atomic_long_t siga_out; + atomic_long_t siga_in; + atomic_long_t siga_sync; + + /* misc */ + atomic_long_t inbound_handler; + atomic_long_t outbound_handler; + atomic_long_t fast_requeue; + + /* for debugging */ + atomic_long_t debug_tl_out_timer; + atomic_long_t debug_stop_polling; +}; + +extern struct qdio_perf_stats perf_stats; +extern int qdio_performance_stats; + +int qdio_setup_perf_stats(void); +void qdio_remove_perf_stats(void); + +extern void qdio_perf_stat_inc(atomic_long_t *count); +extern void qdio_perf_stat_dec(atomic_long_t *count); + +#endif diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c new file mode 100644 index 000000000000..f0923a8aceda --- /dev/null +++ b/drivers/s390/cio/qdio_setup.c @@ -0,0 +1,521 @@ +/* + * driver/s390/cio/qdio_setup.c + * + * qdio queue initialization + * + * Copyright (C) IBM Corp. 2008 + * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "ioasm.h" +#include "chsc.h" +#include "qdio.h" +#include "qdio_debug.h" + +static struct kmem_cache *qdio_q_cache; + +/* + * qebsm is only available under 64bit but the adapter sets the feature + * flag anyway, so we manually override it. + */ +static inline int qebsm_possible(void) +{ +#ifdef CONFIG_64BIT + return css_general_characteristics.qebsm; +#endif + return 0; +} + +/* + * qib_param_field: pointer to 128 bytes or NULL, if no param field + * nr_input_qs: pointer to nr_queues*128 words of data or NULL + */ +static void set_impl_params(struct qdio_irq *irq_ptr, + unsigned int qib_param_field_format, + unsigned char *qib_param_field, + unsigned long *input_slib_elements, + unsigned long *output_slib_elements) +{ + struct qdio_q *q; + int i, j; + + if (!irq_ptr) + return; + + WARN_ON((unsigned long)&irq_ptr->qib & 0xff); + irq_ptr->qib.pfmt = qib_param_field_format; + if (qib_param_field) + memcpy(irq_ptr->qib.parm, qib_param_field, + QDIO_MAX_BUFFERS_PER_Q); + + if (!input_slib_elements) + goto output; + + for_each_input_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +output: + if (!output_slib_elements) + return; + + for_each_output_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +} + +static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) +{ + struct qdio_q *q; + int i; + + for (i = 0; i < nr_queues; i++) { + q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); + if (!q) + return -ENOMEM; + WARN_ON((unsigned long)q & 0xff); + + q->slib = (struct slib *) __get_free_page(GFP_KERNEL); + if (!q->slib) { + kmem_cache_free(qdio_q_cache, q); + return -ENOMEM; + } + WARN_ON((unsigned long)q->slib & 0x7ff); + irq_ptr_qs[i] = q; + } + return 0; +} + +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) +{ + int rc; + + rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); + if (rc) + return rc; + rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); + return rc; +} + +static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, + qdio_handler_t *handler, int i) +{ + /* must be cleared by every qdio_establish */ + memset(q, 0, ((char *)&q->slib) - ((char *)q)); + memset(q->slib, 0, PAGE_SIZE); + + q->irq_ptr = irq_ptr; + q->mask = 1 << (31 - i); + q->nr = i; + q->handler = handler; +} + +static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, + void **sbals_array, char *dbf_text, int i) +{ + struct qdio_q *prev; + int j; + + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &q, sizeof(void *)); + + q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); + + /* fill in sbal */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { + q->sbal[j] = *sbals_array++; + WARN_ON((unsigned long)q->sbal[j] & 0xff); + } + + /* fill in slib */ + if (i > 0) { + prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] + : irq_ptr->output_qs[i - 1]; + prev->slib->nsliba = (unsigned long)q->slib; + } + + q->slib->sla = (unsigned long)q->sl; + q->slib->slsba = (unsigned long)&q->slsb.val[0]; + + /* fill in sl */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->sl->element[j].sbal = (unsigned long)q->sbal[j]; + + QDIO_DBF_TEXT2(0, setup, "sl-sb-b0"); + QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *)); + QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *)); + QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *)); +} + +static void setup_queues(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + char dbf_text[20]; + struct qdio_q *q; + void **input_sbal_array = qdio_init->input_sbal_addr_array; + void **output_sbal_array = qdio_init->output_sbal_addr_array; + int i; + + sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + for_each_input_queue(irq_ptr, q, i) { + sprintf(dbf_text, "in-q%4x", i); + setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); + + q->is_input_q = 1; + spin_lock_init(&q->u.in.lock); + setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i); + input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + if (is_thinint_irq(irq_ptr)) + tasklet_init(&q->tasklet, tiqdio_inbound_processing, + (unsigned long) q); + else + tasklet_init(&q->tasklet, qdio_inbound_processing, + (unsigned long) q); + } + + for_each_output_queue(irq_ptr, q, i) { + sprintf(dbf_text, "outq%4x", i); + setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); + + q->is_input_q = 0; + setup_storage_lists(q, irq_ptr, output_sbal_array, + dbf_text, i); + output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + tasklet_init(&q->tasklet, qdio_outbound_processing, + (unsigned long) q); + setup_timer(&q->u.out.timer, (void(*)(unsigned long)) + &qdio_outbound_timer, (unsigned long)q); + } +} + +static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) +{ + if (qdioac & AC1_SIGA_INPUT_NEEDED) + irq_ptr->siga_flag.input = 1; + if (qdioac & AC1_SIGA_OUTPUT_NEEDED) + irq_ptr->siga_flag.output = 1; + if (qdioac & AC1_SIGA_SYNC_NEEDED) + irq_ptr->siga_flag.sync = 1; + if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) + irq_ptr->siga_flag.no_sync_ti = 1; + if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) + irq_ptr->siga_flag.no_sync_out_pci = 1; + + if (irq_ptr->siga_flag.no_sync_out_pci && + irq_ptr->siga_flag.no_sync_ti) + irq_ptr->siga_flag.no_sync_out_ti = 1; +} + +static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, + unsigned char qdioac, unsigned long token) +{ + char dbf_text[15]; + + if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) + goto no_qebsm; + if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || + (!(qdioac & AC1_SC_QEBSM_ENABLED))) + goto no_qebsm; + + irq_ptr->sch_token = token; + + QDIO_DBF_TEXT0(0, setup, "V=V:1"); + sprintf(dbf_text, "%8lx", irq_ptr->sch_token); + QDIO_DBF_TEXT0(0, setup, dbf_text); + return; + +no_qebsm: + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + QDIO_DBF_TEXT0(0, setup, "noV=V"); +} + +static int __get_ssqd_info(struct qdio_irq *irq_ptr) +{ + struct chsc_ssqd_area *ssqd; + int rc; + + QDIO_DBF_TEXT0(0, setup, "getssqd"); + ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + memset(ssqd, 0, PAGE_SIZE); + + ssqd->request = (struct chsc_header) { + .length = 0x0010, + .code = 0x0024, + }; + ssqd->first_sch = irq_ptr->schid.sch_no; + ssqd->last_sch = irq_ptr->schid.sch_no; + ssqd->ssid = irq_ptr->schid.ssid; + + if (chsc(ssqd)) + return -EIO; + rc = chsc_error_from_response(ssqd->response.code); + if (rc) + return rc; + + if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || + !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || + (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no)) + return -EINVAL; + + memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, + sizeof(struct qdio_ssqd_desc)); + return 0; +} + +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) +{ + unsigned char qdioac; + char dbf_text[15]; + int rc; + + rc = __get_ssqd_info(irq_ptr); + if (rc) { + QDIO_DBF_TEXT2(0, setup, "ssqdasig"); + sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(0, setup, dbf_text); + sprintf(dbf_text, "rc:%d", rc); + QDIO_DBF_TEXT2(0, setup, dbf_text); + /* all flags set, worst case */ + qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | + AC1_SIGA_SYNC_NEEDED; + } else + qdioac = irq_ptr->ssqd_desc.qdioac1; + + check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); + process_ac_flags(irq_ptr, qdioac); + + sprintf(dbf_text, "qdioac%2x", qdioac); + QDIO_DBF_TEXT2(0, setup, dbf_text); +} + +void qdio_release_memory(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + /* + * Must check queue array manually since irq_ptr->nr_input_queues / + * irq_ptr->nr_input_queues may not yet be set. + */ + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->input_qs[i]; + if (q) { + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->output_qs[i]; + if (q) { + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + kfree(irq_ptr->qdr); + free_page(irq_ptr->chsc_page); + free_page((unsigned long) irq_ptr); +} + +static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, + struct qdio_q **irq_ptr_qs, + int i, int nr) +{ + irq_ptr->qdr->qdf0[i + nr].sliba = + (unsigned long)irq_ptr_qs[i]->slib; + + irq_ptr->qdr->qdf0[i + nr].sla = + (unsigned long)irq_ptr_qs[i]->sl; + + irq_ptr->qdr->qdf0[i + nr].slsba = + (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; + + irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; +} + +static void setup_qdr(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + int i; + + irq_ptr->qdr->qfmt = qdio_init->q_format; + irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; + irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; + irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ + irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; + irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; + irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; + + for (i = 0; i < qdio_init->no_input_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); + + for (i = 0; i < qdio_init->no_output_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, + qdio_init->no_input_qs); +} + +static void setup_qib(struct qdio_irq *irq_ptr, + struct qdio_initialize *init_data) +{ + if (qebsm_possible()) + irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + + irq_ptr->qib.qfmt = init_data->q_format; + if (init_data->no_input_qs) + irq_ptr->qib.isliba = + (unsigned long)(irq_ptr->input_qs[0]->slib); + if (init_data->no_output_qs) + irq_ptr->qib.osliba = + (unsigned long)(irq_ptr->output_qs[0]->slib); + memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); +} + +int qdio_setup_irq(struct qdio_initialize *init_data) +{ + struct ciw *ciw; + struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; + int rc; + + memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); + /* wipes qib.ac, required by ar7063 */ + memset(irq_ptr->qdr, 0, sizeof(struct qdr)); + + irq_ptr->int_parm = init_data->int_parm; + irq_ptr->nr_input_qs = init_data->no_input_qs; + irq_ptr->nr_output_qs = init_data->no_output_qs; + + irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); + irq_ptr->cdev = init_data->cdev; + setup_queues(irq_ptr, init_data); + + setup_qib(irq_ptr, init_data); + qdio_setup_thinint(irq_ptr); + set_impl_params(irq_ptr, init_data->qib_param_field_format, + init_data->qib_param_field, + init_data->input_slib_elements, + init_data->output_slib_elements); + + /* fill input and output descriptors */ + setup_qdr(irq_ptr, init_data); + + /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ + + /* get qdio commands */ + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); + if (!ciw) { + QDIO_DBF_TEXT2(1, setup, "no eq"); + rc = -EINVAL; + goto out_err; + } + irq_ptr->equeue = *ciw; + + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); + if (!ciw) { + QDIO_DBF_TEXT2(1, setup, "no aq"); + rc = -EINVAL; + goto out_err; + } + irq_ptr->aqueue = *ciw; + + /* set new interrupt handler */ + irq_ptr->orig_handler = init_data->cdev->handler; + init_data->cdev->handler = qdio_int_handler; + return 0; +out_err: + qdio_release_memory(irq_ptr); + return rc; +} + +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev) +{ + char s[80]; + + sprintf(s, "%s ", cdev->dev.bus_id); + + switch (irq_ptr->qib.qfmt) { + case QDIO_QETH_QFMT: + sprintf(s + strlen(s), "OSADE "); + break; + case QDIO_ZFCP_QFMT: + sprintf(s + strlen(s), "ZFCP "); + break; + case QDIO_IQDIO_QFMT: + sprintf(s + strlen(s), "HiperSockets "); + break; + } + sprintf(s + strlen(s), "using: "); + + if (!is_thinint_irq(irq_ptr)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "AdapterInterrupts "); + if (!(irq_ptr->sch_token != 0)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "QEBSM "); + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "OutboundPCI "); + if (!css_general_characteristics.aif_tdd) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "TDD\n"); + printk(KERN_INFO "qdio: %s", s); + + memset(s, 0, sizeof(s)); + sprintf(s, "%s SIGA required: ", cdev->dev.bus_id); + if (irq_ptr->siga_flag.input) + sprintf(s + strlen(s), "Read "); + if (irq_ptr->siga_flag.output) + sprintf(s + strlen(s), "Write "); + if (irq_ptr->siga_flag.sync) + sprintf(s + strlen(s), "Sync "); + if (!irq_ptr->siga_flag.no_sync_ti) + sprintf(s + strlen(s), "SyncAI "); + if (!irq_ptr->siga_flag.no_sync_out_ti) + sprintf(s + strlen(s), "SyncOutAI "); + if (!irq_ptr->siga_flag.no_sync_out_pci) + sprintf(s + strlen(s), "SyncOutPCI"); + sprintf(s + strlen(s), "\n"); + printk(KERN_INFO "qdio: %s", s); +} + +int __init qdio_setup_init(void) +{ + char dbf_text[15]; + + qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), + 256, 0, NULL); + if (!qdio_q_cache) + return -ENOMEM; + + /* Check for OSA/FCP thin interrupts (bit 67). */ + sprintf(dbf_text, "thini%1x", + (css_general_characteristics.aif_osa) ? 1 : 0); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + /* Check for QEBSM support in general (bit 58). */ + sprintf(dbf_text, "cssQBS:%1x", + (qebsm_possible()) ? 1 : 0); + QDIO_DBF_TEXT0(0, setup, dbf_text); + return 0; +} + +void __exit qdio_setup_exit(void) +{ + kmem_cache_destroy(qdio_q_cache); +} diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c new file mode 100644 index 000000000000..9291a771d812 --- /dev/null +++ b/drivers/s390/cio/qdio_thinint.c @@ -0,0 +1,380 @@ +/* + * linux/drivers/s390/cio/thinint_qdio.c + * + * thin interrupt support for qdio + * + * Copyright 2000-2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/io.h> +#include <asm/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/airq.h> +#include <asm/isc.h> + +#include "cio.h" +#include "ioasm.h" +#include "qdio.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +/* + * Restriction: only 63 iqdio subchannels would have its own indicator, + * after that, subsequent subchannels share one indicator + */ +#define TIQDIO_NR_NONSHARED_IND 63 +#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) +#define TIQDIO_SHARED_IND 63 + +/* list of thin interrupt input queues */ +static LIST_HEAD(tiq_list); + +/* adapter local summary indicator */ +static unsigned char *tiqdio_alsi; + +/* device state change indicators */ +struct indicator_t { + u32 ind; /* u32 because of compare-and-swap performance */ + atomic_t count; /* use count, 0 or 1 for non-shared indicators */ +}; +static struct indicator_t *q_indicators; + +static void tiqdio_tasklet_fn(unsigned long data); +static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); + +static int css_qdio_omit_svs; + +static inline unsigned long do_clear_global_summary(void) +{ + register unsigned long __fn asm("1") = 3; + register unsigned long __tmp asm("2"); + register unsigned long __time asm("3"); + + asm volatile( + " .insn rre,0xb2650000,2,0" + : "+d" (__fn), "=d" (__tmp), "=d" (__time)); + return __time; +} + +/* returns addr for the device state change indicator */ +static u32 *get_indicator(void) +{ + int i; + + for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) + if (!atomic_read(&q_indicators[i].count)) { + atomic_set(&q_indicators[i].count, 1); + return &q_indicators[i].ind; + } + + /* use the shared indicator */ + atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); + return &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static void put_indicator(u32 *addr) +{ + int i; + + if (!addr) + return; + i = ((unsigned long)addr - (unsigned long)q_indicators) / + sizeof(struct indicator_t); + atomic_dec(&q_indicators[i].count); +} + +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ + if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) + css_qdio_omit_svs = 1; + + for_each_input_queue(irq_ptr, q, i) { + list_add_rcu(&q->entry, &tiq_list); + synchronize_rcu(); + } + xchg(irq_ptr->dsci, 1); + tasklet_schedule(&tiqdio_tasklet); +} + +/* + * we cannot stop the tiqdio tasklet here since it is for all + * thinint qdio devices and it must run as long as there is a + * thinint device left + */ +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) { + list_del_rcu(&q->entry); + synchronize_rcu(); + } +} + +static inline int tiqdio_inbound_q_done(struct qdio_q *q) +{ + unsigned char state; + + if (!atomic_read(&q->nr_buf_used)) + return 1; + + qdio_siga_sync_q(q); + get_buf_state(q, q->first_to_check, &state); + + if (state == SLSB_P_INPUT_PRIMED) + /* more work coming */ + return 0; + return 1; +} + +static inline int shared_ind(struct qdio_irq *irq_ptr) +{ + return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static void __tiqdio_inbound_processing(struct qdio_q *q) +{ + qdio_perf_stat_inc(&perf_stats.thinint_inbound); + qdio_sync_after_thinint(q); + + /* + * Maybe we have work on our outbound queues... at least + * we have to check the PCI capable queues. + */ + qdio_check_outbound_after_thinint(q); + +again: + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_inbound_handler(q); + + if (!tiqdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); + goto again; + } + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!tiqdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); + goto again; + } +} + +void tiqdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + + __tiqdio_inbound_processing(q); +} + +/* check for work on all inbound thinint queues */ +static void tiqdio_tasklet_fn(unsigned long data) +{ + struct qdio_q *q; + + qdio_perf_stat_inc(&perf_stats.tasklet_thinint); +again: + + /* protect tiq_list entries, only changed in activate or shutdown */ + rcu_read_lock(); + + list_for_each_entry_rcu(q, &tiq_list, entry) + /* only process queues from changed sets */ + if (*q->irq_ptr->dsci) { + + /* only clear it if the indicator is non-shared */ + if (!shared_ind(q->irq_ptr)) + xchg(q->irq_ptr->dsci, 0); + /* + * don't call inbound processing directly since + * that could starve other thinint queues + */ + tasklet_schedule(&q->tasklet); + } + + rcu_read_unlock(); + + /* + * if we used the shared indicator clear it now after all queues + * were processed + */ + if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { + xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); + + /* prevent racing */ + if (*tiqdio_alsi) + xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); + } + + /* check for more work */ + if (*tiqdio_alsi) { + xchg(tiqdio_alsi, 0); + qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); + goto again; + } +} + +/** + * tiqdio_thinint_handler - thin interrupt handler for qdio + * @ind: pointer to adapter local summary indicator + * @drv_data: NULL + */ +static void tiqdio_thinint_handler(void *ind, void *drv_data) +{ + qdio_perf_stat_inc(&perf_stats.thin_int); + + /* + * SVS only when needed: issue SVS to benefit from iqdio interrupt + * avoidance (SVS clears adapter interrupt suppression overwrite) + */ + if (!css_qdio_omit_svs) + do_clear_global_summary(); + + /* + * reset local summary indicator (tiqdio_alsi) to stop adapter + * interrupts for now, the tasklet will clean all dsci's + */ + xchg((u8 *)ind, 0); + tasklet_hi_schedule(&tiqdio_tasklet); +} + +static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) +{ + struct scssc_area *scssc_area; + char dbf_text[15]; + void *ptr; + int rc; + + scssc_area = (struct scssc_area *)irq_ptr->chsc_page; + memset(scssc_area, 0, PAGE_SIZE); + + if (reset) { + scssc_area->summary_indicator_addr = 0; + scssc_area->subchannel_indicator_addr = 0; + } else { + scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); + scssc_area->subchannel_indicator_addr = + virt_to_phys(irq_ptr->dsci); + } + + scssc_area->request = (struct chsc_header) { + .length = 0x0fe0, + .code = 0x0021, + }; + scssc_area->operation_code = 0; + scssc_area->ks = PAGE_DEFAULT_KEY; + scssc_area->kc = PAGE_DEFAULT_KEY; + scssc_area->isc = QDIO_AIRQ_ISC; + scssc_area->schid = irq_ptr->schid; + + /* enable the time delay disablement facility */ + if (css_general_characteristics.aif_tdd) + scssc_area->word_with_d_bit = 0x10000000; + + rc = chsc(scssc_area); + if (rc) + return -EIO; + + rc = chsc_error_from_response(scssc_area->response.code); + if (rc) { + sprintf(dbf_text, "sidR%4x", scssc_area->response.code); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT1(0, setup, dbf_text); + ptr = &scssc_area->response; + QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); + return rc; + } + + QDIO_DBF_TEXT2(0, setup, "setscind"); + QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, + sizeof(unsigned long)); + QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, + sizeof(unsigned long)); + return 0; +} + +/* allocate non-shared indicators and shared indicator */ +int __init tiqdio_allocate_memory(void) +{ + q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, + GFP_KERNEL); + if (!q_indicators) + return -ENOMEM; + return 0; +} + +void tiqdio_free_memory(void) +{ + kfree(q_indicators); +} + +int __init tiqdio_register_thinints(void) +{ + char dbf_text[20]; + + isc_register(QDIO_AIRQ_ISC); + tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, + NULL, QDIO_AIRQ_ISC); + if (IS_ERR(tiqdio_alsi)) { + sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); + QDIO_DBF_TEXT0(0, setup, dbf_text); + tiqdio_alsi = NULL; + isc_unregister(QDIO_AIRQ_ISC); + return -ENOMEM; + } + return 0; +} + +int qdio_establish_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return 0; + + /* Check for aif time delay disablement. If installed, + * omit SVS even under LPAR + */ + if (css_general_characteristics.aif_tdd) + css_qdio_omit_svs = 1; + return set_subchannel_ind(irq_ptr, 0); +} + +void qdio_setup_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + irq_ptr->dsci = get_indicator(); + QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); +} + +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + + /* reset adapter interrupt indicators */ + put_indicator(irq_ptr->dsci); + set_subchannel_ind(irq_ptr, 1); +} + +void __exit tiqdio_unregister_thinints(void) +{ + tasklet_disable(&tiqdio_tasklet); + + if (tiqdio_alsi) { + s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); + isc_unregister(QDIO_AIRQ_ISC); + } +} diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 699ac11debd8..1895dbb553cd 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /*not used unless the microcode gets patched*/ #define QETH_PCI_TIMER_VALUE(card) 3 -#define QETH_MIN_INPUT_THRESHOLD 1 -#define QETH_MAX_INPUT_THRESHOLD 500 -#define QETH_MIN_OUTPUT_THRESHOLD 1 -#define QETH_MAX_OUTPUT_THRESHOLD 300 - /* priority queing */ #define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING #define QETH_DEFAULT_QUEUE 2 @@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, enum qeth_prot_versions); int qeth_query_setadapterparms(struct qeth_card *); -int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, - unsigned int, const char *); +int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *); void qeth_queue_input_buffer(struct qeth_card *, int); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); void qeth_schedule_recovery(struct qeth_card *); void qeth_qdio_output_handler(struct ccw_device *, unsigned int, - unsigned int, unsigned int, - unsigned int, int, int, - unsigned long); + int, int, int, unsigned long); void qeth_clear_ipacmd_list(struct qeth_card *); int qeth_qdio_clear_card(struct qeth_card *, int); void qeth_clear_working_pool_list(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0ac54dc638c2..c3ad89e302bd 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card, static int qeth_qdio_activate(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 3, "qdioact"); - return qdio_activate(CARD_DDEV(card), 0); + return qdio_activate(CARD_DDEV(card)); } static int qeth_dm_act(struct qeth_card *card) @@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card) card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, - card->qdio.in_buf_pool.buf_count - 1, NULL); + card->qdio.in_buf_pool.buf_count - 1); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } - rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0); - if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - return rc; - } /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { memset(card->qdio.out_qs[i]->qdio_bufs, 0, @@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card) EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, - unsigned int siga_error, const char *dbftext) + const char *dbftext) { - if (qdio_error || siga_error) { + if (qdio_error) { QETH_DBF_TEXT(TRACE, 2, dbftext); QETH_DBF_TEXT(QERR, 2, dbftext); QETH_DBF_TEXT_(QERR, 2, " F15=%02X", @@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, QETH_DBF_TEXT_(QERR, 2, " F14=%02X", buf->element[14].flags & 0xff); QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); - QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error); return 1; } return 0; @@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros(); } - rc = do_QDIO(CARD_DDEV(card), - QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, - 0, queue->next_buf_to_init, count, NULL); + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, + queue->next_buf_to_init, count); if (card->options.performance_stats) card->perf_stats.inbound_do_qdio_time += qeth_get_micros() - @@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); static int qeth_handle_send_error(struct qeth_card *card, - struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, - unsigned int siga_err) + struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { int sbalf15 = buffer->buffer->element[15].flags & 0xff; - int cc = siga_err & 3; + int cc = qdio_err & 3; QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); - qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr"); + qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr"); switch (cc) { case 0: if (qdio_err) { @@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card, } return QETH_SEND_ERROR_NONE; case 2: - if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { + if (qdio_err & QDIO_ERROR_SIGA_BUSY) { QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B"); QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); return QETH_SEND_ERROR_KICK_IT; @@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) return 0; } -static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, - int index, int count) +static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, + int count) { struct qeth_qdio_out_buffer *buf; int rc; @@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, qeth_get_micros(); } qdio_flags = QDIO_FLAG_SYNC_OUTPUT; - if (under_int) - qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, - queue->queue_no, index, count, NULL); + queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - @@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) queue->card->perf_stats.bufs_sent_pack += flush_cnt; if (flush_cnt) - qeth_flush_buffers(queue, 1, index, flush_cnt); + qeth_flush_buffers(queue, index, flush_cnt); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } } } -void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, - unsigned int qdio_error, unsigned int siga_error, - unsigned int __queue, int first_element, int count, - unsigned long card_ptr) +void qeth_qdio_output_handler(struct ccw_device *ccwdev, + unsigned int qdio_error, int __queue, int first_element, + int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; @@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, int i; QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); - if (status & QDIO_STATUS_LOOK_FOR_ERROR) { - if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 2, "achkcond"); - QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 2, "%08x", status); - netif_stop_queue(card->dev); - qeth_schedule_recovery(card); - return; - } + if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { + QETH_DBF_TEXT(TRACE, 2, "achkcond"); + QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + return; } if (card->options.performance_stats) { card->perf_stats.outbound_handler_cnt++; @@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status, for (i = first_element; i < (first_element + count); ++i) { buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; /*we only handle the KICK_IT error by doing a recovery */ - if (qeth_handle_send_error(card, buffer, - qdio_error, siga_error) + if (qeth_handle_send_error(card, buffer, qdio_error) == QETH_SEND_ERROR_KICK_IT){ netif_stop_queue(card->dev); qeth_schedule_recovery(card); @@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card, atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); if (ctx == NULL) { qeth_fill_buffer(queue, buffer, skb); - qeth_flush_buffers(queue, 0, index, 1); + qeth_flush_buffers(queue, index, 1); } else { flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); WARN_ON(buffers_needed != flush_cnt); - qeth_flush_buffers(queue, 0, index, flush_cnt); + qeth_flush_buffers(queue, index, flush_cnt); } return 0; out: @@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, * again */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){ - qeth_flush_buffers(queue, 0, - start_index, flush_count); + qeth_flush_buffers(queue, start_index, + flush_count); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; @@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, flush_count += tmp; out: if (flush_count) - qeth_flush_buffers(queue, 0, start_index, flush_count); + qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); /* @@ -3274,7 +3259,7 @@ out: if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) flush_count += qeth_flush_buffers_on_no_pci(queue); if (flush_count) - qeth_flush_buffers(queue, 0, start_index, flush_count); + qeth_flush_buffers(queue, start_index, flush_count); } /* at this point the queue is UNLOCKED again */ if (queue->card->options.performance_stats && do_pack) @@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.q_format = qeth_get_qdio_q_format(card); init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; - init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD; - init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD; - init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD; - init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD; init_data.no_input_qs = 1; init_data.no_output_qs = card->qdio.no_out_queues; init_data.input_handler = card->discipline.input_handler; @@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, int qeth_core_hardsetup_card(struct qeth_card *card) { + struct qdio_ssqd_desc *qdio_ssqd; int retries = 3; - int mpno; + int mpno = 0; int rc; QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); @@ -3784,7 +3766,10 @@ retry: QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); return rc; } - mpno = qdio_get_ssqd_pct(CARD_DDEV(card)); + + qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card)); + if (qdio_ssqd) + mpno = qdio_ssqd->pcnt; if (mpno) mpno = min(mpno - 1, QETH_MAX_PORTNO); if (card->info.portno > mpno) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index f682f7b14480..3fbc3bdec0c5 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -726,8 +726,7 @@ tx_drop: } static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, - unsigned int status, unsigned int qdio_err, - unsigned int siga_err, unsigned int queue, + unsigned int qdio_err, unsigned int queue, int first_element, int count, unsigned long card_ptr) { struct net_device *net_dev; @@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, card->perf_stats.inbound_cnt++; card->perf_stats.inbound_start_time = qeth_get_micros(); } - if (status & QDIO_STATUS_LOOK_FOR_ERROR) { - if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, - count); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); - qeth_schedule_recovery(card); - return; - } + if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { + QETH_DBF_TEXT(TRACE, 1, "qdinchk"); + QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); + QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element, + count); + QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); + qeth_schedule_recovery(card); + return; } for (i = first_element; i < (first_element + count); ++i) { index = i % QDIO_MAX_BUFFERS_PER_Q; buffer = &card->qdio.in_q->bufs[index]; - if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && - qeth_check_qdio_errors(buffer->buffer, - qdio_err, siga_err, "qinerr"))) + if (!(qdio_err && + qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr"))) qeth_l2_process_inbound_buffer(card, buffer, index); /* clear buffer and give back to hardware */ qeth_put_buffer_pool_entry(card, buffer->pool_entry); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 06deaee50f6d..22f64aa6dd1f 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) } static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, - unsigned int status, unsigned int qdio_err, - unsigned int siga_err, unsigned int queue, int first_element, + unsigned int qdio_err, unsigned int queue, int first_element, int count, unsigned long card_ptr) { struct net_device *net_dev; @@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, card->perf_stats.inbound_cnt++; card->perf_stats.inbound_start_time = qeth_get_micros(); } - if (status & QDIO_STATUS_LOOK_FOR_ERROR) { - if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) { - QETH_DBF_TEXT(TRACE, 1, "qdinchk"); - QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", - first_element, count); - QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status); - qeth_schedule_recovery(card); - return; - } + if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { + QETH_DBF_TEXT(TRACE, 1, "qdinchk"); + QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); + QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", + first_element, count); + QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); + qeth_schedule_recovery(card); + return; } for (i = first_element; i < (first_element + count); ++i) { index = i % QDIO_MAX_BUFFERS_PER_Q; buffer = &card->qdio.in_q->bufs[index]; - if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && + if (!(qdio_err && qeth_check_qdio_errors(buffer->buffer, - qdio_err, siga_err, "qinerr"))) + qdio_err, "qinerr"))) qeth_l3_process_inbound_buffer(card, buffer, index); /* clear buffer and give back to hardware */ qeth_put_buffer_pool_entry(card, buffer->pool_entry); diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile index d6a78f1a2f16..cb301cc6178c 100644 --- a/drivers/s390/scsi/Makefile +++ b/drivers/s390/scsi/Makefile @@ -3,7 +3,6 @@ # zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ - zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \ - zfcp_sysfs_unit.o zfcp_sysfs_driver.o + zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o obj-$(CONFIG_ZFCP) += zfcp.o diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 8c7e2b778ef1..90abfd06ed55 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -1,22 +1,9 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Module interface and handling of zfcp data structures. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ /* @@ -31,93 +18,25 @@ * Maxim Shchetynin * Volker Sameske * Ralph Wuerthner + * Michael Loehr + * Swen Schillig + * Christof Schmitt + * Martin Petermann + * Sven Schuetz */ +#include <linux/miscdevice.h> #include "zfcp_ext.h" -/* accumulated log level (module parameter) */ -static u32 loglevel = ZFCP_LOG_LEVEL_DEFAULTS; static char *device; -/*********************** FUNCTION PROTOTYPES *********************************/ - -/* written against the module interface */ -static int __init zfcp_module_init(void); - -/* FCP related */ -static void zfcp_ns_gid_pn_handler(unsigned long); - -/* miscellaneous */ -static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); -static void zfcp_sg_list_free(struct zfcp_sg_list *); -static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, - void __user *, size_t); -static int zfcp_sg_list_copy_to_user(void __user *, - struct zfcp_sg_list *, size_t); -static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); - -#define ZFCP_CFDC_IOC_MAGIC 0xDD -#define ZFCP_CFDC_IOC \ - _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_sense_data) - - -static const struct file_operations zfcp_cfdc_fops = { - .unlocked_ioctl = zfcp_cfdc_dev_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = zfcp_cfdc_dev_ioctl -#endif -}; - -static struct miscdevice zfcp_cfdc_misc = { - .minor = ZFCP_CFDC_DEV_MINOR, - .name = ZFCP_CFDC_DEV_NAME, - .fops = &zfcp_cfdc_fops -}; - -/*********************** KERNEL/MODULE PARAMETERS ***************************/ - -/* declare driver module init/cleanup functions */ -module_init(zfcp_module_init); MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); -MODULE_DESCRIPTION - ("FCP (SCSI over Fibre Channel) HBA driver for IBM System z9 and zSeries"); +MODULE_DESCRIPTION("FCP HBA driver"); MODULE_LICENSE("GPL"); module_param(device, charp, 0400); MODULE_PARM_DESC(device, "specify initial device"); -module_param(loglevel, uint, 0400); -MODULE_PARM_DESC(loglevel, - "log levels, 8 nibbles: " - "FC ERP QDIO CIO Config FSF SCSI Other, " - "levels: 0=none 1=normal 2=devel 3=trace"); - -/****************************************************************/ -/************** Functions without logging ***********************/ -/****************************************************************/ - -void -_zfcp_hex_dump(char *addr, int count) -{ - int i; - for (i = 0; i < count; i++) { - printk("%02x", addr[i]); - if ((i % 4) == 3) - printk(" "); - if ((i % 32) == 31) - printk("\n"); - } - if (((i-1) % 32) != 31) - printk("\n"); -} - - -/****************************************************************/ -/****** Functions to handle the request ID hash table ********/ -/****************************************************************/ - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF - static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) { int idx; @@ -132,11 +51,12 @@ static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) return 0; } -static void zfcp_reqlist_free(struct zfcp_adapter *adapter) -{ - kfree(adapter->req_list); -} - +/** + * zfcp_reqlist_isempty - is the request list empty + * @adapter: pointer to struct zfcp_adapter + * + * Returns: true if list is empty, false otherwise + */ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) { unsigned int idx; @@ -147,62 +67,58 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) return 1; } -#undef ZFCP_LOG_AREA - -/****************************************************************/ -/************** Uncategorised Functions *************************/ -/****************************************************************/ - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER - -/** - * zfcp_device_setup - setup function - * @str: pointer to parameter string - * - * Parse "device=..." parameter string. - */ -static int __init -zfcp_device_setup(char *devstr) +static int __init zfcp_device_setup(char *devstr) { - char *tmp, *str; - size_t len; + char *token; + char *str; if (!devstr) return 0; - len = strlen(devstr) + 1; - str = kmalloc(len, GFP_KERNEL); + /* duplicate devstr and keep the original for sysfs presentation*/ + str = kmalloc(strlen(devstr) + 1, GFP_KERNEL); if (!str) - goto err_out; - memcpy(str, devstr, len); + return 0; - tmp = strchr(str, ','); - if (!tmp) - goto err_out; - *tmp++ = '\0'; - strncpy(zfcp_data.init_busid, str, BUS_ID_SIZE); - zfcp_data.init_busid[BUS_ID_SIZE-1] = '\0'; + strcpy(str, devstr); - zfcp_data.init_wwpn = simple_strtoull(tmp, &tmp, 0); - if (*tmp++ != ',') + token = strsep(&str, ","); + if (!token || strlen(token) >= BUS_ID_SIZE) goto err_out; - if (*tmp == '\0') + strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); + + token = strsep(&str, ","); + if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn)) goto err_out; - zfcp_data.init_fcp_lun = simple_strtoull(tmp, &tmp, 0); - if (*tmp != '\0') + token = strsep(&str, ","); + if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun)) goto err_out; + kfree(str); return 1; err_out: - ZFCP_LOG_NORMAL("Parse error for device parameter string %s\n", str); kfree(str); + pr_err("zfcp: Parse error for device parameter string %s, " + "device not attached.\n", devstr); return 0; } -static void __init -zfcp_init_device_configure(void) +static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id) +{ + struct zfcp_adapter *adapter; + + list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list) + if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id, + BUS_ID_SIZE) == 0) && + !(atomic_read(&adapter->status) & + ZFCP_STATUS_COMMON_REMOVE)) + return adapter; + return NULL; +} + +static void __init zfcp_init_device_configure(void) { struct zfcp_adapter *adapter; struct zfcp_port *port; @@ -215,101 +131,75 @@ zfcp_init_device_configure(void) zfcp_adapter_get(adapter); read_unlock_irq(&zfcp_data.config_lock); - if (adapter == NULL) + if (!adapter) goto out_adapter; port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0); - if (!port) + if (IS_ERR(port)) goto out_port; unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun); - if (!unit) + if (IS_ERR(unit)) goto out_unit; up(&zfcp_data.config_sema); ccw_device_set_online(adapter->ccw_device); zfcp_erp_wait(adapter); down(&zfcp_data.config_sema); zfcp_unit_put(unit); - out_unit: +out_unit: zfcp_port_put(port); - out_port: +out_port: zfcp_adapter_put(adapter); - out_adapter: +out_adapter: up(&zfcp_data.config_sema); return; } -static int calc_alignment(int size) +static struct kmem_cache *zfcp_cache_create(int size, char *name) { int align = 1; - - if (!size) - return 0; - while ((size - align) > 0) align <<= 1; - - return align; + return kmem_cache_create(name , size, align, 0, NULL); } -static int __init -zfcp_module_init(void) +static int __init zfcp_module_init(void) { int retval = -ENOMEM; - int size, align; - size = sizeof(struct zfcp_fsf_req_qtcb); - align = calc_alignment(size); - zfcp_data.fsf_req_qtcb_cache = - kmem_cache_create("zfcp_fsf", size, align, 0, NULL); + zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create( + sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf"); if (!zfcp_data.fsf_req_qtcb_cache) goto out; - size = sizeof(struct fsf_status_read_buffer); - align = calc_alignment(size); - zfcp_data.sr_buffer_cache = - kmem_cache_create("zfcp_sr", size, align, 0, NULL); + zfcp_data.sr_buffer_cache = zfcp_cache_create( + sizeof(struct fsf_status_read_buffer), "zfcp_sr"); if (!zfcp_data.sr_buffer_cache) goto out_sr_cache; - size = sizeof(struct zfcp_gid_pn_data); - align = calc_alignment(size); - zfcp_data.gid_pn_cache = - kmem_cache_create("zfcp_gid", size, align, 0, NULL); + zfcp_data.gid_pn_cache = zfcp_cache_create( + sizeof(struct zfcp_gid_pn_data), "zfcp_gid"); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; - atomic_set(&zfcp_data.loglevel, loglevel); - - /* initialize adapter list */ INIT_LIST_HEAD(&zfcp_data.adapter_list_head); - - /* initialize adapters to be removed list head */ INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh); + sema_init(&zfcp_data.config_sema, 1); + rwlock_init(&zfcp_data.config_lock); + zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); if (!zfcp_data.scsi_transport_template) goto out_transport; retval = misc_register(&zfcp_cfdc_misc); - if (retval != 0) { - ZFCP_LOG_INFO("registration of misc device " - "zfcp_cfdc failed\n"); + if (retval) { + pr_err("zfcp: registration of misc device zfcp_cfdc failed\n"); goto out_misc; } - ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n", - ZFCP_CFDC_DEV_MAJOR, zfcp_cfdc_misc.minor); - - /* Initialise proc semaphores */ - sema_init(&zfcp_data.config_sema, 1); - - /* initialise configuration rw lock */ - rwlock_init(&zfcp_data.config_lock); - - /* setup dynamic I/O */ retval = zfcp_ccw_register(); if (retval) { - ZFCP_LOG_NORMAL("registration with common I/O layer failed\n"); + pr_err("zfcp: Registration with common I/O layer failed.\n"); goto out_ccw_register; } @@ -318,527 +208,88 @@ zfcp_module_init(void) goto out; - out_ccw_register: +out_ccw_register: misc_deregister(&zfcp_cfdc_misc); - out_misc: +out_misc: fc_release_transport(zfcp_data.scsi_transport_template); - out_transport: +out_transport: kmem_cache_destroy(zfcp_data.gid_pn_cache); - out_gid_cache: +out_gid_cache: kmem_cache_destroy(zfcp_data.sr_buffer_cache); - out_sr_cache: +out_sr_cache: kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); - out: +out: return retval; } -/* - * function: zfcp_cfdc_dev_ioctl - * - * purpose: Handle control file upload/download transaction via IOCTL - * interface - * - * returns: 0 - Operation completed successfuly - * -ENOTTY - Unknown IOCTL command - * -EINVAL - Invalid sense data record - * -ENXIO - The FCP adapter is not available - * -EOPNOTSUPP - The FCP adapter does not have CFDC support - * -ENOMEM - Insufficient memory - * -EFAULT - User space memory I/O operation fault - * -EPERM - Cannot create or queue FSF request or create SBALs - * -ERESTARTSYS- Received signal (is mapped to EAGAIN by VFS) - */ -static long -zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, - unsigned long buffer) -{ - struct zfcp_cfdc_sense_data *sense_data, __user *sense_data_user; - struct zfcp_adapter *adapter = NULL; - struct zfcp_fsf_req *fsf_req = NULL; - struct zfcp_sg_list *sg_list = NULL; - u32 fsf_command, option; - char *bus_id = NULL; - int retval = 0; - - sense_data = kmalloc(sizeof(struct zfcp_cfdc_sense_data), GFP_KERNEL); - if (sense_data == NULL) { - retval = -ENOMEM; - goto out; - } - - sg_list = kzalloc(sizeof(struct zfcp_sg_list), GFP_KERNEL); - if (sg_list == NULL) { - retval = -ENOMEM; - goto out; - } - - if (command != ZFCP_CFDC_IOC) { - ZFCP_LOG_INFO("IOC request code 0x%x invalid\n", command); - retval = -ENOTTY; - goto out; - } - - if ((sense_data_user = (void __user *) buffer) == NULL) { - ZFCP_LOG_INFO("sense data record is required\n"); - retval = -EINVAL; - goto out; - } - - retval = copy_from_user(sense_data, sense_data_user, - sizeof(struct zfcp_cfdc_sense_data)); - if (retval) { - retval = -EFAULT; - goto out; - } - - if (sense_data->signature != ZFCP_CFDC_SIGNATURE) { - ZFCP_LOG_INFO("invalid sense data request signature 0x%08x\n", - ZFCP_CFDC_SIGNATURE); - retval = -EINVAL; - goto out; - } - - switch (sense_data->command) { - - case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL: - fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - option = FSF_CFDC_OPTION_NORMAL_MODE; - break; - - case ZFCP_CFDC_CMND_DOWNLOAD_FORCE: - fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - option = FSF_CFDC_OPTION_FORCE; - break; - - case ZFCP_CFDC_CMND_FULL_ACCESS: - fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - option = FSF_CFDC_OPTION_FULL_ACCESS; - break; - - case ZFCP_CFDC_CMND_RESTRICTED_ACCESS: - fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; - option = FSF_CFDC_OPTION_RESTRICTED_ACCESS; - break; - - case ZFCP_CFDC_CMND_UPLOAD: - fsf_command = FSF_QTCB_UPLOAD_CONTROL_FILE; - option = 0; - break; - - default: - ZFCP_LOG_INFO("invalid command code 0x%08x\n", - sense_data->command); - retval = -EINVAL; - goto out; - } - - bus_id = kmalloc(BUS_ID_SIZE, GFP_KERNEL); - if (bus_id == NULL) { - retval = -ENOMEM; - goto out; - } - snprintf(bus_id, BUS_ID_SIZE, "%d.%d.%04x", - (sense_data->devno >> 24), - (sense_data->devno >> 16) & 0xFF, - (sense_data->devno & 0xFFFF)); - - read_lock_irq(&zfcp_data.config_lock); - adapter = zfcp_get_adapter_by_busid(bus_id); - if (adapter) - zfcp_adapter_get(adapter); - read_unlock_irq(&zfcp_data.config_lock); - - kfree(bus_id); - - if (adapter == NULL) { - ZFCP_LOG_INFO("invalid adapter\n"); - retval = -ENXIO; - goto out; - } - - if (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE) { - retval = zfcp_sg_list_alloc(sg_list, - ZFCP_CFDC_MAX_CONTROL_FILE_SIZE); - if (retval) { - retval = -ENOMEM; - goto out; - } - } - - if ((sense_data->command & ZFCP_CFDC_DOWNLOAD) && - (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE)) { - retval = zfcp_sg_list_copy_from_user( - sg_list, &sense_data_user->control_file, - ZFCP_CFDC_MAX_CONTROL_FILE_SIZE); - if (retval) { - retval = -EFAULT; - goto out; - } - } - - retval = zfcp_fsf_control_file(adapter, &fsf_req, fsf_command, - option, sg_list); - if (retval) - goto out; - - if ((fsf_req->qtcb->prefix.prot_status != FSF_PROT_GOOD) && - (fsf_req->qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { - retval = -ENXIO; - goto out; - } - - sense_data->fsf_status = fsf_req->qtcb->header.fsf_status; - memcpy(&sense_data->fsf_status_qual, - &fsf_req->qtcb->header.fsf_status_qual, - sizeof(union fsf_status_qual)); - memcpy(&sense_data->payloads, &fsf_req->qtcb->bottom.support.els, 256); - - retval = copy_to_user(sense_data_user, sense_data, - sizeof(struct zfcp_cfdc_sense_data)); - if (retval) { - retval = -EFAULT; - goto out; - } - - if (sense_data->command & ZFCP_CFDC_UPLOAD) { - retval = zfcp_sg_list_copy_to_user( - &sense_data_user->control_file, sg_list, - ZFCP_CFDC_MAX_CONTROL_FILE_SIZE); - if (retval) { - retval = -EFAULT; - goto out; - } - } - - out: - if (fsf_req != NULL) - zfcp_fsf_req_free(fsf_req); - - if ((adapter != NULL) && (retval != -ENXIO)) - zfcp_adapter_put(adapter); - - if (sg_list != NULL) { - zfcp_sg_list_free(sg_list); - kfree(sg_list); - } - - kfree(sense_data); - - return retval; -} - - -/** - * zfcp_sg_list_alloc - create a scatter-gather list of the specified size - * @sg_list: structure describing a scatter gather list - * @size: size of scatter-gather list - * Return: 0 on success, else -ENOMEM - * - * In sg_list->sg a pointer to the created scatter-gather list is returned, - * or NULL if we run out of memory. sg_list->count specifies the number of - * elements of the scatter-gather list. The maximum size of a single element - * in the scatter-gather list is PAGE_SIZE. - */ -static int -zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) -{ - struct scatterlist *sg; - unsigned int i; - int retval = 0; - void *address; - - BUG_ON(sg_list == NULL); - - sg_list->count = size >> PAGE_SHIFT; - if (size & ~PAGE_MASK) - sg_list->count++; - sg_list->sg = kcalloc(sg_list->count, sizeof(struct scatterlist), - GFP_KERNEL); - if (sg_list->sg == NULL) { - sg_list->count = 0; - retval = -ENOMEM; - goto out; - } - sg_init_table(sg_list->sg, sg_list->count); - - for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) { - address = (void *) get_zeroed_page(GFP_KERNEL); - if (address == NULL) { - sg_list->count = i; - zfcp_sg_list_free(sg_list); - retval = -ENOMEM; - goto out; - } - zfcp_address_to_sg(address, sg, min(size, PAGE_SIZE)); - size -= sg->length; - } - - out: - return retval; -} - - -/** - * zfcp_sg_list_free - free memory of a scatter-gather list - * @sg_list: structure describing a scatter-gather list - * - * Memory for each element in the scatter-gather list is freed. - * Finally sg_list->sg is freed itself and sg_list->count is reset. - */ -static void -zfcp_sg_list_free(struct zfcp_sg_list *sg_list) -{ - struct scatterlist *sg; - unsigned int i; - - BUG_ON(sg_list == NULL); - - for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) - free_page((unsigned long) zfcp_sg_to_address(sg)); - - sg_list->count = 0; - kfree(sg_list->sg); -} - -/** - * zfcp_sg_size - determine size of a scatter-gather list - * @sg: array of (struct scatterlist) - * @sg_count: elements in array - * Return: size of entire scatter-gather list - */ -static size_t zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count) -{ - unsigned int i; - struct scatterlist *p; - size_t size; - - size = 0; - for (i = 0, p = sg; i < sg_count; i++, p++) { - BUG_ON(p == NULL); - size += p->length; - } - - return size; -} - - -/** - * zfcp_sg_list_copy_from_user -copy data from user space to scatter-gather list - * @sg_list: structure describing a scatter-gather list - * @user_buffer: pointer to buffer in user space - * @size: number of bytes to be copied - * Return: 0 on success, -EFAULT if copy_from_user fails. - */ -static int -zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, - void __user *user_buffer, - size_t size) -{ - struct scatterlist *sg; - unsigned int length; - void *zfcp_buffer; - int retval = 0; - - BUG_ON(sg_list == NULL); - - if (zfcp_sg_size(sg_list->sg, sg_list->count) < size) - return -EFAULT; - - for (sg = sg_list->sg; size > 0; sg++) { - length = min((unsigned int)size, sg->length); - zfcp_buffer = zfcp_sg_to_address(sg); - if (copy_from_user(zfcp_buffer, user_buffer, length)) { - retval = -EFAULT; - goto out; - } - user_buffer += length; - size -= length; - } - - out: - return retval; -} - - -/** - * zfcp_sg_list_copy_to_user - copy data from scatter-gather list to user space - * @user_buffer: pointer to buffer in user space - * @sg_list: structure describing a scatter-gather list - * @size: number of bytes to be copied - * Return: 0 on success, -EFAULT if copy_to_user fails - */ -static int -zfcp_sg_list_copy_to_user(void __user *user_buffer, - struct zfcp_sg_list *sg_list, - size_t size) -{ - struct scatterlist *sg; - unsigned int length; - void *zfcp_buffer; - int retval = 0; - - BUG_ON(sg_list == NULL); - - if (zfcp_sg_size(sg_list->sg, sg_list->count) < size) - return -EFAULT; - - for (sg = sg_list->sg; size > 0; sg++) { - length = min((unsigned int) size, sg->length); - zfcp_buffer = zfcp_sg_to_address(sg); - if (copy_to_user(user_buffer, zfcp_buffer, length)) { - retval = -EFAULT; - goto out; - } - user_buffer += length; - size -= length; - } - - out: - return retval; -} - - -#undef ZFCP_LOG_AREA - -/****************************************************************/ -/****** Functions for configuration/set-up of structures ********/ -/****************************************************************/ - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG +module_init(zfcp_module_init); /** * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN * @port: pointer to port to search for unit * @fcp_lun: FCP LUN to search for - * Traverse list of all units of a port and return pointer to a unit - * with the given FCP LUN. + * + * Returns: pointer to zfcp_unit or NULL */ -struct zfcp_unit * -zfcp_get_unit_by_lun(struct zfcp_port *port, fcp_lun_t fcp_lun) +struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, + fcp_lun_t fcp_lun) { struct zfcp_unit *unit; - int found = 0; - list_for_each_entry(unit, &port->unit_list_head, list) { + list_for_each_entry(unit, &port->unit_list_head, list) if ((unit->fcp_lun == fcp_lun) && - !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) - { - found = 1; - break; - } - } - return found ? unit : NULL; + !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) + return unit; + return NULL; } /** * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn * @adapter: pointer to adapter to search for port * @wwpn: wwpn to search for - * Traverse list of all ports of an adapter and return pointer to a port - * with the given wwpn. - */ -struct zfcp_port * -zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, wwn_t wwpn) -{ - struct zfcp_port *port; - int found = 0; - - list_for_each_entry(port, &adapter->port_list_head, list) { - if ((port->wwpn == wwpn) && - !(atomic_read(&port->status) & - (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) { - found = 1; - break; - } - } - return found ? port : NULL; -} - -/** - * zfcp_get_port_by_did - find port in port list of adapter by d_id - * @adapter: pointer to adapter to search for port - * @d_id: d_id to search for - * Traverse list of all ports of an adapter and return pointer to a port - * with the given d_id. + * + * Returns: pointer to zfcp_port or NULL */ -struct zfcp_port * -zfcp_get_port_by_did(struct zfcp_adapter *adapter, u32 d_id) +struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, + wwn_t wwpn) { struct zfcp_port *port; - int found = 0; - list_for_each_entry(port, &adapter->port_list_head, list) { - if ((port->d_id == d_id) && - !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) - { - found = 1; - break; - } - } - return found ? port : NULL; + list_for_each_entry(port, &adapter->port_list_head, list) + if ((port->wwpn == wwpn) && !(atomic_read(&port->status) & + (ZFCP_STATUS_PORT_NO_WWPN | ZFCP_STATUS_COMMON_REMOVE))) + return port; + return NULL; } -/** - * zfcp_get_adapter_by_busid - find adpater in adapter list by bus_id - * @bus_id: bus_id to search for - * Traverse list of all adapters and return pointer to an adapter - * with the given bus_id. - */ -struct zfcp_adapter * -zfcp_get_adapter_by_busid(char *bus_id) +static void zfcp_sysfs_unit_release(struct device *dev) { - struct zfcp_adapter *adapter; - int found = 0; - - list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list) { - if ((strncmp(bus_id, zfcp_get_busid_by_adapter(adapter), - BUS_ID_SIZE) == 0) && - !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, - &adapter->status)){ - found = 1; - break; - } - } - return found ? adapter : NULL; + kfree(container_of(dev, struct zfcp_unit, sysfs_device)); } /** * zfcp_unit_enqueue - enqueue unit to unit list of a port. * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued - * Return: pointer to enqueued unit on success, NULL on error + * Returns: pointer to enqueued unit on success, ERR_PTR on error * Locks: config_sema must be held to serialize changes to the unit list * * Sets up some unit internal structures and creates sysfs entry. */ -struct zfcp_unit * -zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) +struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) { struct zfcp_unit *unit; - /* - * check that there is no unit with this FCP_LUN already in list - * and enqueue it. - * Note: Unlike for the adapter and the port, this is an error - */ - read_lock_irq(&zfcp_data.config_lock); - unit = zfcp_get_unit_by_lun(port, fcp_lun); - read_unlock_irq(&zfcp_data.config_lock); - if (unit) - return NULL; - - unit = kzalloc(sizeof (struct zfcp_unit), GFP_KERNEL); + unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) - return NULL; + return ERR_PTR(-ENOMEM); - /* initialise reference count stuff */ atomic_set(&unit->refcount, 0); init_waitqueue_head(&unit->remove_wq); unit->port = port; unit->fcp_lun = fcp_lun; - /* setup for sysfs registration */ snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun); unit->sysfs_device.parent = &port->sysfs_device; unit->sysfs_device.release = zfcp_sysfs_unit_release; @@ -847,14 +298,28 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) /* mark unit unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); - if (device_register(&unit->sysfs_device)) { - kfree(unit); - return NULL; + spin_lock_init(&unit->latencies.lock); + unit->latencies.write.channel.min = 0xFFFFFFFF; + unit->latencies.write.fabric.min = 0xFFFFFFFF; + unit->latencies.read.channel.min = 0xFFFFFFFF; + unit->latencies.read.fabric.min = 0xFFFFFFFF; + unit->latencies.cmd.channel.min = 0xFFFFFFFF; + unit->latencies.cmd.fabric.min = 0xFFFFFFFF; + + read_lock_irq(&zfcp_data.config_lock); + if (zfcp_get_unit_by_lun(port, fcp_lun)) { + read_unlock_irq(&zfcp_data.config_lock); + goto err_out_free; } + read_unlock_irq(&zfcp_data.config_lock); - if (zfcp_sysfs_unit_create_files(&unit->sysfs_device)) { + if (device_register(&unit->sysfs_device)) + goto err_out_free; + + if (sysfs_create_group(&unit->sysfs_device.kobj, + &zfcp_sysfs_unit_attrs)) { device_unregister(&unit->sysfs_device); - return NULL; + return ERR_PTR(-EIO); } zfcp_unit_get(unit); @@ -864,16 +329,27 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) list_add_tail(&unit->list, &port->unit_list_head); atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); + write_unlock_irq(&zfcp_data.config_lock); port->units++; zfcp_port_get(port); return unit; + +err_out_free: + kfree(unit); + return ERR_PTR(-EINVAL); } -void -zfcp_unit_dequeue(struct zfcp_unit *unit) +/** + * zfcp_unit_dequeue - dequeue unit + * @unit: pointer to zfcp_unit + * + * waits until all work is done on unit and removes it then from the unit->list + * of the associated port. + */ +void zfcp_unit_dequeue(struct zfcp_unit *unit) { zfcp_unit_wait(unit); write_lock_irq(&zfcp_data.config_lock); @@ -881,68 +357,51 @@ zfcp_unit_dequeue(struct zfcp_unit *unit) write_unlock_irq(&zfcp_data.config_lock); unit->port->units--; zfcp_port_put(unit->port); - zfcp_sysfs_unit_remove_files(&unit->sysfs_device); + sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs); device_unregister(&unit->sysfs_device); } -/* - * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI - * commands. - * It also genrates fcp-nameserver request/response buffer and unsolicited - * status read fsf_req buffers. - * - * locks: must only be called with zfcp_data.config_sema taken - */ -static int -zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) +static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { + /* must only be called with zfcp_data.config_sema taken */ adapter->pool.fsf_req_erp = - mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ERP_NR, - zfcp_data.fsf_req_qtcb_cache); + mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); if (!adapter->pool.fsf_req_erp) return -ENOMEM; adapter->pool.fsf_req_scsi = - mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, - zfcp_data.fsf_req_qtcb_cache); + mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); if (!adapter->pool.fsf_req_scsi) return -ENOMEM; adapter->pool.fsf_req_abort = - mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, - zfcp_data.fsf_req_qtcb_cache); + mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); if (!adapter->pool.fsf_req_abort) return -ENOMEM; adapter->pool.fsf_req_status_read = - mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, + mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.fsf_req_status_read) return -ENOMEM; adapter->pool.data_status_read = - mempool_create_slab_pool(ZFCP_POOL_STATUS_READ_NR, + mempool_create_slab_pool(FSF_STATUS_READS_RECOM, zfcp_data.sr_buffer_cache); if (!adapter->pool.data_status_read) return -ENOMEM; adapter->pool.data_gid_pn = - mempool_create_slab_pool(ZFCP_POOL_DATA_GID_PN_NR, - zfcp_data.gid_pn_cache); + mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); if (!adapter->pool.data_gid_pn) return -ENOMEM; return 0; } -/** - * zfcp_free_low_mem_buffers - free memory pools of an adapter - * @adapter: pointer to zfcp_adapter for which memory pools should be freed - * locking: zfcp_data.config_sema must be held - */ -static void -zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) +static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) { + /* zfcp_data.config_sema must be held */ if (adapter->pool.fsf_req_erp) mempool_destroy(adapter->pool.fsf_req_erp); if (adapter->pool.fsf_req_scsi) @@ -962,20 +421,61 @@ static void zfcp_dummy_release(struct device *dev) return; } -/* +/** + * zfcp_status_read_refill - refill the long running status_read_requests + * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled + * + * Returns: 0 on success, 1 otherwise + * + * if there are 16 or more status_read requests missing an adapter_reopen + * is triggered + */ +int zfcp_status_read_refill(struct zfcp_adapter *adapter) +{ + while (atomic_read(&adapter->stat_miss) > 0) + if (zfcp_fsf_status_read(adapter)) { + if (atomic_read(&adapter->stat_miss) >= 16) { + zfcp_erp_adapter_reopen(adapter, 0, 103, NULL); + return 1; + } + break; + } else + atomic_dec(&adapter->stat_miss); + return 0; +} + +static void _zfcp_status_read_scheduler(struct work_struct *work) +{ + zfcp_status_read_refill(container_of(work, struct zfcp_adapter, + stat_work)); +} + +static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter) +{ + struct zfcp_port *port; + + port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA, + ZFCP_DID_DIRECTORY_SERVICE); + if (IS_ERR(port)) + return PTR_ERR(port); + zfcp_port_put(port); + + return 0; +} + +/** + * zfcp_adapter_enqueue - enqueue a new adapter to the list + * @ccw_device: pointer to the struct cc_device + * + * Returns: 0 if a new adapter was successfully enqueued + * -ENOMEM if alloc failed * Enqueues an adapter at the end of the adapter list in the driver data. * All adapter internal structures are set up. * Proc-fs entries are also created. - * - * returns: 0 if a new adapter was successfully enqueued - * ZFCP_KNOWN if an adapter with this devno was already present - * -ENOMEM if alloc failed * locks: config_sema must be held to serialise changes to the adapter list */ -struct zfcp_adapter * -zfcp_adapter_enqueue(struct ccw_device *ccw_device) +int zfcp_adapter_enqueue(struct ccw_device *ccw_device) { - int retval = 0; struct zfcp_adapter *adapter; /* @@ -983,85 +483,58 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) * are protected by the config_sema, which must be held to get here */ - /* try to allocate new adapter data structure (zeroed) */ - adapter = kzalloc(sizeof (struct zfcp_adapter), GFP_KERNEL); - if (!adapter) { - ZFCP_LOG_INFO("error: allocation of base adapter " - "structure failed\n"); - goto out; - } + adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); + if (!adapter) + return -ENOMEM; ccw_device->handler = NULL; - - /* save ccw_device pointer */ adapter->ccw_device = ccw_device; + atomic_set(&adapter->refcount, 0); - retval = zfcp_qdio_allocate_queues(adapter); - if (retval) - goto queues_alloc_failed; - - retval = zfcp_qdio_allocate(adapter); - if (retval) + if (zfcp_qdio_allocate(adapter)) goto qdio_allocate_failed; - retval = zfcp_allocate_low_mem_buffers(adapter); - if (retval) { - ZFCP_LOG_INFO("error: pool allocation failed\n"); + if (zfcp_allocate_low_mem_buffers(adapter)) goto failed_low_mem_buffers; - } - /* initialise reference count stuff */ - atomic_set(&adapter->refcount, 0); + if (zfcp_reqlist_alloc(adapter)) + goto failed_low_mem_buffers; + + if (zfcp_adapter_debug_register(adapter)) + goto debug_register_failed; + init_waitqueue_head(&adapter->remove_wq); + init_waitqueue_head(&adapter->erp_thread_wqh); + init_waitqueue_head(&adapter->erp_done_wqh); - /* initialise list of ports */ INIT_LIST_HEAD(&adapter->port_list_head); - - /* initialise list of ports to be removed */ INIT_LIST_HEAD(&adapter->port_remove_lh); + INIT_LIST_HEAD(&adapter->erp_ready_head); + INIT_LIST_HEAD(&adapter->erp_running_head); - /* initialize list of fsf requests */ spin_lock_init(&adapter->req_list_lock); - retval = zfcp_reqlist_alloc(adapter); - if (retval) { - ZFCP_LOG_INFO("request list initialization failed\n"); - goto failed_low_mem_buffers; - } - - /* initialize debug locks */ spin_lock_init(&adapter->hba_dbf_lock); spin_lock_init(&adapter->san_dbf_lock); spin_lock_init(&adapter->scsi_dbf_lock); spin_lock_init(&adapter->rec_dbf_lock); - - retval = zfcp_adapter_debug_register(adapter); - if (retval) - goto debug_register_failed; - - /* initialize error recovery stuff */ + spin_lock_init(&adapter->req_q.lock); rwlock_init(&adapter->erp_lock); - sema_init(&adapter->erp_ready_sem, 0); - INIT_LIST_HEAD(&adapter->erp_ready_head); - INIT_LIST_HEAD(&adapter->erp_running_head); - - /* initialize abort lock */ rwlock_init(&adapter->abort_lock); - /* initialise some erp stuff */ - init_waitqueue_head(&adapter->erp_thread_wqh); - init_waitqueue_head(&adapter->erp_done_wqh); + sema_init(&adapter->erp_ready_sem, 0); - /* initialize lock of associated request queue */ - rwlock_init(&adapter->request_queue.queue_lock); + INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); + INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); /* mark adapter unusable as long as sysfs registration is not complete */ atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); dev_set_drvdata(&ccw_device->dev, adapter); - if (zfcp_sysfs_adapter_create_files(&ccw_device->dev)) + if (sysfs_create_group(&ccw_device->dev.kobj, + &zfcp_sysfs_adapter_attrs)) goto sysfs_failed; adapter->generic_services.parent = &adapter->ccw_device->dev; @@ -1072,7 +545,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (device_register(&adapter->generic_services)) goto generic_services_failed; - /* put allocated adapter at list tail */ write_lock_irq(&zfcp_data.config_lock); atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); list_add_tail(&adapter->list, &zfcp_data.adapter_list_head); @@ -1080,57 +552,49 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) zfcp_data.adapters++; - goto out; + zfcp_nameserver_enqueue(adapter); + + return 0; - generic_services_failed: - zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); - sysfs_failed: +generic_services_failed: + sysfs_remove_group(&ccw_device->dev.kobj, + &zfcp_sysfs_adapter_attrs); +sysfs_failed: zfcp_adapter_debug_unregister(adapter); - debug_register_failed: +debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); - zfcp_reqlist_free(adapter); - failed_low_mem_buffers: + kfree(adapter->req_list); +failed_low_mem_buffers: zfcp_free_low_mem_buffers(adapter); - if (qdio_free(ccw_device) != 0) - ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n", - zfcp_get_busid_by_adapter(adapter)); - qdio_allocate_failed: - zfcp_qdio_free_queues(adapter); - queues_alloc_failed: +qdio_allocate_failed: + zfcp_qdio_free(adapter); kfree(adapter); - adapter = NULL; - out: - return adapter; + return -ENOMEM; } -/* - * returns: 0 - struct zfcp_adapter data structure successfully removed - * !0 - struct zfcp_adapter data structure could not be removed - * (e.g. still used) +/** + * zfcp_adapter_dequeue - remove the adapter from the resource list + * @adapter: pointer to struct zfcp_adapter which should be removed * locks: adapter list write lock is assumed to be held by caller */ -void -zfcp_adapter_dequeue(struct zfcp_adapter *adapter) +void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) { int retval = 0; unsigned long flags; + cancel_work_sync(&adapter->scan_work); + cancel_work_sync(&adapter->stat_work); zfcp_adapter_scsi_unregister(adapter); device_unregister(&adapter->generic_services); - zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); + sysfs_remove_group(&adapter->ccw_device->dev.kobj, + &zfcp_sysfs_adapter_attrs); dev_set_drvdata(&adapter->ccw_device->dev, NULL); /* sanity check: no pending FSF requests */ spin_lock_irqsave(&adapter->req_list_lock, flags); retval = zfcp_reqlist_isempty(adapter); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - if (!retval) { - ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " - "%i requests outstanding\n", - zfcp_get_busid_by_adapter(adapter), adapter, - atomic_read(&adapter->reqs_active)); - retval = -EBUSY; - goto out; - } + if (!retval) + return; zfcp_adapter_debug_unregister(adapter); @@ -1142,26 +606,18 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) /* decrease number of adapters in list */ zfcp_data.adapters--; - ZFCP_LOG_TRACE("adapter %s (%p) removed from list, " - "%i adapters still in list\n", - zfcp_get_busid_by_adapter(adapter), - adapter, zfcp_data.adapters); - - retval = qdio_free(adapter->ccw_device); - if (retval) - ZFCP_LOG_NORMAL("bug: qdio_free for adapter %s failed\n", - zfcp_get_busid_by_adapter(adapter)); + zfcp_qdio_free(adapter); zfcp_free_low_mem_buffers(adapter); - /* free memory of adapter data structure and queues */ - zfcp_qdio_free_queues(adapter); - zfcp_reqlist_free(adapter); + kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); - ZFCP_LOG_TRACE("freeing adapter structure\n"); kfree(adapter); - out: - return; +} + +static void zfcp_sysfs_port_release(struct device *dev) +{ + kfree(container_of(dev, struct zfcp_port, sysfs_device)); } /** @@ -1170,98 +626,90 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) * @wwpn: WWPN of the remote port to be enqueued * @status: initial status for the port * @d_id: destination id of the remote port to be enqueued - * Return: pointer to enqueued port on success, NULL on error + * Returns: pointer to enqueued port on success, ERR_PTR on error * Locks: config_sema must be held to serialize changes to the port list * * All port internal structures are set up and the sysfs entry is generated. * d_id is used to enqueue ports with a well known address like the Directory * Service for nameserver lookup. */ -struct zfcp_port * -zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status, - u32 d_id) +struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, + u32 status, u32 d_id) { struct zfcp_port *port; - int check_wwpn; - - check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN); - /* - * check that there is no port with this WWPN already in list - */ - if (check_wwpn) { - read_lock_irq(&zfcp_data.config_lock); - port = zfcp_get_port_by_wwpn(adapter, wwpn); - read_unlock_irq(&zfcp_data.config_lock); - if (port) - return NULL; - } + int retval; + char *bus_id; - port = kzalloc(sizeof (struct zfcp_port), GFP_KERNEL); + port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) - return NULL; + return ERR_PTR(-ENOMEM); - /* initialise reference count stuff */ - atomic_set(&port->refcount, 0); init_waitqueue_head(&port->remove_wq); INIT_LIST_HEAD(&port->unit_list_head); INIT_LIST_HEAD(&port->unit_remove_lh); port->adapter = adapter; + port->d_id = d_id; + port->wwpn = wwpn; - if (check_wwpn) - port->wwpn = wwpn; - - atomic_set_mask(status, &port->status); + /* mark port unusable as long as sysfs registration is not complete */ + atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); + atomic_set(&port->refcount, 0); - /* setup for sysfs registration */ if (status & ZFCP_STATUS_PORT_WKA) { switch (d_id) { case ZFCP_DID_DIRECTORY_SERVICE: - snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, - "directory"); + bus_id = "directory"; break; case ZFCP_DID_MANAGEMENT_SERVICE: - snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, - "management"); + bus_id = "management"; break; case ZFCP_DID_KEY_DISTRIBUTION_SERVICE: - snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, - "key_distribution"); + bus_id = "key_distribution"; break; case ZFCP_DID_ALIAS_SERVICE: - snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, - "alias"); + bus_id = "alias"; break; case ZFCP_DID_TIME_SERVICE: - snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, - "time"); + bus_id = "time"; break; default: kfree(port); - return NULL; + return ERR_PTR(-EINVAL); } - port->d_id = d_id; + snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id); port->sysfs_device.parent = &adapter->generic_services; } else { snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", wwpn); port->sysfs_device.parent = &adapter->ccw_device->dev; } + port->sysfs_device.release = zfcp_sysfs_port_release; dev_set_drvdata(&port->sysfs_device, port); - /* mark port unusable as long as sysfs registration is not complete */ - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); + read_lock_irq(&zfcp_data.config_lock); + if (!(status & ZFCP_STATUS_PORT_NO_WWPN)) + if (zfcp_get_port_by_wwpn(adapter, wwpn)) { + read_unlock_irq(&zfcp_data.config_lock); + goto err_out_free; + } + read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&port->sysfs_device)) { - kfree(port); - return NULL; - } + if (device_register(&port->sysfs_device)) + goto err_out_free; + + if (status & ZFCP_STATUS_PORT_WKA) + retval = sysfs_create_group(&port->sysfs_device.kobj, + &zfcp_sysfs_ns_port_attrs); + else + retval = sysfs_create_group(&port->sysfs_device.kobj, + &zfcp_sysfs_port_attrs); - if (zfcp_sysfs_port_create_files(&port->sysfs_device, status)) { + if (retval) { device_unregister(&port->sysfs_device); - return NULL; + goto err_out; } zfcp_port_get(port); @@ -1274,15 +722,23 @@ zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, u32 status, if (!adapter->nameserver_port) adapter->nameserver_port = port; adapter->ports++; + write_unlock_irq(&zfcp_data.config_lock); zfcp_adapter_get(adapter); - return port; + +err_out_free: + kfree(port); +err_out: + return ERR_PTR(-EINVAL); } -void -zfcp_port_dequeue(struct zfcp_port *port) +/** + * zfcp_port_dequeue - dequeues a port from the port list of the adapter + * @port: pointer to struct zfcp_port which should be removed + */ +void zfcp_port_dequeue(struct zfcp_port *port) { zfcp_port_wait(port); write_lock_irq(&zfcp_data.config_lock); @@ -1293,546 +749,53 @@ zfcp_port_dequeue(struct zfcp_port *port) fc_remote_port_delete(port->rport); port->rport = NULL; zfcp_adapter_put(port->adapter); - zfcp_sysfs_port_remove_files(&port->sysfs_device, - atomic_read(&port->status)); - device_unregister(&port->sysfs_device); -} - -/* Enqueues a nameserver port */ -int -zfcp_nameserver_enqueue(struct zfcp_adapter *adapter) -{ - struct zfcp_port *port; - - port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA, - ZFCP_DID_DIRECTORY_SERVICE); - if (!port) { - ZFCP_LOG_INFO("error: enqueue of nameserver port for " - "adapter %s failed\n", - zfcp_get_busid_by_adapter(adapter)); - return -ENXIO; - } - zfcp_port_put(port); - - return 0; -} - -#undef ZFCP_LOG_AREA - -/****************************************************************/ -/******* Fibre Channel Standard related Functions **************/ -/****************************************************************/ - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC - -static void zfcp_fsf_incoming_els_rscn(struct zfcp_fsf_req *fsf_req) -{ - struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fcp_rscn_head *fcp_rscn_head; - struct fcp_rscn_element *fcp_rscn_element; - struct zfcp_port *port; - u16 i; - u16 no_entries; - u32 range_mask; - unsigned long flags; - - fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload; - fcp_rscn_element = (struct fcp_rscn_element *) status_buffer->payload; - - /* see FC-FS */ - no_entries = (fcp_rscn_head->payload_len / 4); - - for (i = 1; i < no_entries; i++) { - /* skip head and start with 1st element */ - fcp_rscn_element++; - switch (fcp_rscn_element->addr_format) { - case ZFCP_PORT_ADDRESS: - range_mask = ZFCP_PORTS_RANGE_PORT; - break; - case ZFCP_AREA_ADDRESS: - range_mask = ZFCP_PORTS_RANGE_AREA; - break; - case ZFCP_DOMAIN_ADDRESS: - range_mask = ZFCP_PORTS_RANGE_DOMAIN; - break; - case ZFCP_FABRIC_ADDRESS: - range_mask = ZFCP_PORTS_RANGE_FABRIC; - break; - default: - ZFCP_LOG_INFO("incoming RSCN with unknown " - "address format\n"); - continue; - } - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &adapter->port_list_head, list) { - if (atomic_test_mask - (ZFCP_STATUS_PORT_WKA, &port->status)) - continue; - /* Do we know this port? If not skip it. */ - if (!atomic_test_mask - (ZFCP_STATUS_PORT_DID_DID, &port->status)) { - ZFCP_LOG_INFO("incoming RSCN, trying to open " - "port 0x%016Lx\n", port->wwpn); - zfcp_erp_port_reopen(port, - ZFCP_STATUS_COMMON_ERP_FAILED, - 82, fsf_req); - continue; - } - - /* - * FIXME: race: d_id might being invalidated - * (...DID_DID reset) - */ - if ((port->d_id & range_mask) - == (fcp_rscn_element->nport_did & range_mask)) { - ZFCP_LOG_TRACE("reopen did 0x%08x\n", - fcp_rscn_element->nport_did); - /* - * Unfortunately, an RSCN does not specify the - * type of change a target underwent. We assume - * that it makes sense to reopen the link. - * FIXME: Shall we try to find out more about - * the target and link state before closing it? - * How to accomplish this? (nameserver?) - * Where would such code be put in? - * (inside or outside erp) - */ - ZFCP_LOG_INFO("incoming RSCN, trying to open " - "port 0x%016Lx\n", port->wwpn); - zfcp_test_link(port); - } - } - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - } -} - -static void zfcp_fsf_incoming_els_plogi(struct zfcp_fsf_req *fsf_req) -{ - struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fsf_plogi *els_plogi; - struct zfcp_port *port; - unsigned long flags; - - els_plogi = (struct fsf_plogi *) status_buffer->payload; - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &adapter->port_list_head, list) { - if (port->wwpn == (*(wwn_t *) &els_plogi->serv_param.wwpn)) - break; - } - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - if (!port || (port->wwpn != (*(wwn_t *) &els_plogi->serv_param.wwpn))) { - ZFCP_LOG_DEBUG("ignored incoming PLOGI for nonexisting port " - "with d_id 0x%06x on adapter %s\n", - status_buffer->d_id, - zfcp_get_busid_by_adapter(adapter)); - } else { - zfcp_erp_port_forced_reopen(port, 0, 83, fsf_req); - } -} - -static void zfcp_fsf_incoming_els_logo(struct zfcp_fsf_req *fsf_req) -{ - struct fsf_status_read_buffer *status_buffer = (void*)fsf_req->data; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fcp_logo *els_logo = (struct fcp_logo *) status_buffer->payload; - struct zfcp_port *port; - unsigned long flags; - - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &adapter->port_list_head, list) { - if (port->wwpn == els_logo->nport_wwpn) - break; - } - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - if (!port || (port->wwpn != els_logo->nport_wwpn)) { - ZFCP_LOG_DEBUG("ignored incoming LOGO for nonexisting port " - "with d_id 0x%06x on adapter %s\n", - status_buffer->d_id, - zfcp_get_busid_by_adapter(adapter)); - } else { - zfcp_erp_port_forced_reopen(port, 0, 84, fsf_req); - } -} - -static void -zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter, - struct fsf_status_read_buffer *status_buffer) -{ - ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x " - "for adapter %s\n", *(u32 *) (status_buffer->payload), - zfcp_get_busid_by_adapter(adapter)); - -} - -void -zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req) -{ - struct fsf_status_read_buffer *status_buffer; - u32 els_type; - struct zfcp_adapter *adapter; - - status_buffer = (struct fsf_status_read_buffer *) fsf_req->data; - els_type = *(u32 *) (status_buffer->payload); - adapter = fsf_req->adapter; - - zfcp_san_dbf_event_incoming_els(fsf_req); - if (els_type == LS_PLOGI) - zfcp_fsf_incoming_els_plogi(fsf_req); - else if (els_type == LS_LOGO) - zfcp_fsf_incoming_els_logo(fsf_req); - else if ((els_type & 0xffff0000) == LS_RSCN) - /* we are only concerned with the command, not the length */ - zfcp_fsf_incoming_els_rscn(fsf_req); - else - zfcp_fsf_incoming_els_unknown(adapter, status_buffer); -} - - -/** - * zfcp_gid_pn_buffers_alloc - allocate buffers for GID_PN nameserver request - * @gid_pn: pointer to return pointer to struct zfcp_gid_pn_data - * @pool: pointer to mempool_t if non-null memory pool is used for allocation - */ -static int -zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool) -{ - struct zfcp_gid_pn_data *data; - - if (pool != NULL) { - data = mempool_alloc(pool, GFP_ATOMIC); - if (likely(data != NULL)) { - data->ct.pool = pool; - } - } else { - data = kmem_cache_alloc(zfcp_data.gid_pn_cache, GFP_ATOMIC); - } - - if (NULL == data) - return -ENOMEM; - - memset(data, 0, sizeof(*data)); - sg_init_table(&data->req , 1); - sg_init_table(&data->resp , 1); - data->ct.req = &data->req; - data->ct.resp = &data->resp; - data->ct.req_count = data->ct.resp_count = 1; - zfcp_address_to_sg(&data->ct_iu_req, &data->req, sizeof(struct ct_iu_gid_pn_req)); - zfcp_address_to_sg(&data->ct_iu_resp, &data->resp, sizeof(struct ct_iu_gid_pn_resp)); - - *gid_pn = data; - return 0; -} - -/** - * zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request - * @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed - */ -static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn) -{ - if (gid_pn->ct.pool) - mempool_free(gid_pn, gid_pn->ct.pool); + if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA) + sysfs_remove_group(&port->sysfs_device.kobj, + &zfcp_sysfs_ns_port_attrs); else - kmem_cache_free(zfcp_data.gid_pn_cache, gid_pn); -} - -/** - * zfcp_ns_gid_pn_request - initiate GID_PN nameserver request - * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed - */ -int -zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action) -{ - int ret; - struct ct_iu_gid_pn_req *ct_iu_req; - struct zfcp_gid_pn_data *gid_pn; - struct zfcp_adapter *adapter = erp_action->adapter; - - ret = zfcp_gid_pn_buffers_alloc(&gid_pn, adapter->pool.data_gid_pn); - if (ret < 0) { - ZFCP_LOG_INFO("error: buffer allocation for gid_pn nameserver " - "request failed for adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - goto out; - } - - /* setup nameserver request */ - ct_iu_req = zfcp_sg_to_address(gid_pn->ct.req); - ct_iu_req->header.revision = ZFCP_CT_REVISION; - ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; - ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER; - ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS; - ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GID_PN; - ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE; - ct_iu_req->wwpn = erp_action->port->wwpn; - - /* setup parameters for send generic command */ - gid_pn->ct.port = adapter->nameserver_port; - gid_pn->ct.handler = zfcp_ns_gid_pn_handler; - gid_pn->ct.handler_data = (unsigned long) gid_pn; - gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; - gid_pn->port = erp_action->port; - - ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, - erp_action); - if (ret) { - ZFCP_LOG_INFO("error: initiation of gid_pn nameserver request " - "failed for adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - - zfcp_gid_pn_buffers_free(gid_pn); - } - - out: - return ret; -} - -/** - * zfcp_ns_gid_pn_handler - handler for GID_PN nameserver request - * @data: unsigned long, contains pointer to struct zfcp_gid_pn_data - */ -static void zfcp_ns_gid_pn_handler(unsigned long data) -{ - struct zfcp_port *port; - struct zfcp_send_ct *ct; - struct ct_iu_gid_pn_req *ct_iu_req; - struct ct_iu_gid_pn_resp *ct_iu_resp; - struct zfcp_gid_pn_data *gid_pn; - - - gid_pn = (struct zfcp_gid_pn_data *) data; - port = gid_pn->port; - ct = &gid_pn->ct; - ct_iu_req = zfcp_sg_to_address(ct->req); - ct_iu_resp = zfcp_sg_to_address(ct->resp); - - if (ct->status != 0) - goto failed; - - if (zfcp_check_ct_response(&ct_iu_resp->header)) { - /* FIXME: do we need some specific erp entry points */ - atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status); - goto failed; - } - /* paranoia */ - if (ct_iu_req->wwpn != port->wwpn) { - ZFCP_LOG_NORMAL("bug: wwpn 0x%016Lx returned by nameserver " - "lookup does not match expected wwpn 0x%016Lx " - "for adapter %s\n", ct_iu_req->wwpn, port->wwpn, - zfcp_get_busid_by_port(port)); - goto mismatch; - } - - /* looks like a valid d_id */ - port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; - atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); - ZFCP_LOG_DEBUG("adapter %s: wwpn=0x%016Lx ---> d_id=0x%06x\n", - zfcp_get_busid_by_port(port), port->wwpn, port->d_id); - goto out; - - mismatch: - ZFCP_LOG_DEBUG("CT IUs do not match:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_req, - sizeof(struct ct_iu_gid_pn_req)); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, (char *) ct_iu_resp, - sizeof(struct ct_iu_gid_pn_resp)); - - failed: - ZFCP_LOG_NORMAL("warning: failed gid_pn nameserver request for wwpn " - "0x%016Lx for adapter %s\n", - port->wwpn, zfcp_get_busid_by_port(port)); - out: - zfcp_gid_pn_buffers_free(gid_pn); - return; + sysfs_remove_group(&port->sysfs_device.kobj, + &zfcp_sysfs_port_attrs); + device_unregister(&port->sysfs_device); } -/* reject CT_IU reason codes acc. to FC-GS-4 */ -static const struct zfcp_rc_entry zfcp_ct_rc[] = { - {0x01, "invalid command code"}, - {0x02, "invalid version level"}, - {0x03, "logical error"}, - {0x04, "invalid CT_IU size"}, - {0x05, "logical busy"}, - {0x07, "protocol error"}, - {0x09, "unable to perform command request"}, - {0x0b, "command not supported"}, - {0x0d, "server not available"}, - {0x0e, "session could not be established"}, - {0xff, "vendor specific error"}, - {0, NULL}, -}; - -/* LS_RJT reason codes acc. to FC-FS */ -static const struct zfcp_rc_entry zfcp_ls_rjt_rc[] = { - {0x01, "invalid LS_Command code"}, - {0x03, "logical error"}, - {0x05, "logical busy"}, - {0x07, "protocol error"}, - {0x09, "unable to perform command request"}, - {0x0b, "command not supported"}, - {0x0e, "command already in progress"}, - {0xff, "vendor specific error"}, - {0, NULL}, -}; - -/* reject reason codes according to FC-PH/FC-FS */ -static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = { - {0x01, "invalid D_ID"}, - {0x02, "invalid S_ID"}, - {0x03, "Nx_Port not available, temporary"}, - {0x04, "Nx_Port not available, permament"}, - {0x05, "class not supported"}, - {0x06, "delimiter usage error"}, - {0x07, "TYPE not supported"}, - {0x08, "invalid Link_Control"}, - {0x09, "invalid R_CTL field"}, - {0x0a, "invalid F_CTL field"}, - {0x0b, "invalid OX_ID"}, - {0x0c, "invalid RX_ID"}, - {0x0d, "invalid SEQ_ID"}, - {0x0e, "invalid DF_CTL"}, - {0x0f, "invalid SEQ_CNT"}, - {0x10, "invalid parameter field"}, - {0x11, "exchange error"}, - {0x12, "protocol error"}, - {0x13, "incorrect length"}, - {0x14, "unsupported ACK"}, - {0x15, "class of service not supported by entity at FFFFFE"}, - {0x16, "login required"}, - {0x17, "excessive sequences attempted"}, - {0x18, "unable to establish exchange"}, - {0x1a, "fabric path not available"}, - {0x1b, "invalid VC_ID (class 4)"}, - {0x1c, "invalid CS_CTL field"}, - {0x1d, "insufficient resources for VC (class 4)"}, - {0x1f, "invalid class of service"}, - {0x20, "preemption request rejected"}, - {0x21, "preemption not enabled"}, - {0x22, "multicast error"}, - {0x23, "multicast error terminate"}, - {0x24, "process login required"}, - {0xff, "vendor specific reject"}, - {0, NULL}, -}; - /** - * zfcp_rc_description - return description for given reaon code - * @code: reason code - * @rc_table: table of reason codes and descriptions + * zfcp_sg_free_table - free memory used by scatterlists + * @sg: pointer to scatterlist + * @count: number of scatterlist which are to be free'ed + * the scatterlist are expected to reference pages always */ -static const char * -zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) +void zfcp_sg_free_table(struct scatterlist *sg, int count) { - const char *descr = "unknown reason code"; + int i; - do { - if (code == rc_table->code) { - descr = rc_table->description; + for (i = 0; i < count; i++, sg++) + if (sg) + free_page((unsigned long) sg_virt(sg)); + else break; - } - rc_table++; - } while (rc_table->code && rc_table->description); - - return descr; } /** - * zfcp_check_ct_response - evaluate reason code for CT_IU - * @rjt: response payload to an CT_IU request - * Return: 0 for accept CT_IU, 1 for reject CT_IU or invlid response code + * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers + * @sg: pointer to struct scatterlist + * @count: number of scatterlists which should be assigned with buffers + * of size page + * + * Returns: 0 on success, -ENOMEM otherwise */ -int -zfcp_check_ct_response(struct ct_hdr *rjt) +int zfcp_sg_setup_table(struct scatterlist *sg, int count) { - if (rjt->cmd_rsp_code == ZFCP_CT_ACCEPT) - return 0; + void *addr; + int i; - if (rjt->cmd_rsp_code != ZFCP_CT_REJECT) { - ZFCP_LOG_NORMAL("error: invalid Generic Service command/" - "response code (0x%04hx)\n", - rjt->cmd_rsp_code); - return 1; + sg_init_table(sg, count); + for (i = 0; i < count; i++, sg++) { + addr = (void *) get_zeroed_page(GFP_KERNEL); + if (!addr) { + zfcp_sg_free_table(sg, i); + return -ENOMEM; + } + sg_set_buf(sg, addr, PAGE_SIZE); } - - ZFCP_LOG_INFO("Generic Service command rejected\n"); - ZFCP_LOG_INFO("%s (0x%02x, 0x%02x, 0x%02x)\n", - zfcp_rc_description(rjt->reason_code, zfcp_ct_rc), - (u32) rjt->reason_code, (u32) rjt->reason_code_expl, - (u32) rjt->vendor_unique); - - return 1; -} - -/** - * zfcp_print_els_rjt - print reject parameter and description for ELS reject - * @rjt_par: reject parameter acc. to FC-PH/FC-FS - * @rc_table: table of reason codes and descriptions - */ -static void -zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, - const struct zfcp_rc_entry *rc_table) -{ - ZFCP_LOG_INFO("%s (%02x %02x %02x %02x)\n", - zfcp_rc_description(rjt_par->reason_code, rc_table), - (u32) rjt_par->action, (u32) rjt_par->reason_code, - (u32) rjt_par->reason_expl, (u32) rjt_par->vendor_unique); -} - -/** - * zfcp_fsf_handle_els_rjt - evaluate status qualifier/reason code on ELS reject - * @sq: status qualifier word - * @rjt_par: reject parameter as described in FC-PH and FC-FS - * Return: -EROMTEIO for LS_RJT, -EREMCHG for invalid D_ID, -EIO else - */ -int -zfcp_handle_els_rjt(u32 sq, struct zfcp_ls_rjt_par *rjt_par) -{ - int ret = -EIO; - - if (sq == FSF_IOSTAT_NPORT_RJT) { - ZFCP_LOG_INFO("ELS rejected (P_RJT)\n"); - zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc); - /* invalid d_id */ - if (rjt_par->reason_code == 0x01) - ret = -EREMCHG; - } else if (sq == FSF_IOSTAT_FABRIC_RJT) { - ZFCP_LOG_INFO("ELS rejected (F_RJT)\n"); - zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc); - /* invalid d_id */ - if (rjt_par->reason_code == 0x01) - ret = -EREMCHG; - } else if (sq == FSF_IOSTAT_LS_RJT) { - ZFCP_LOG_INFO("ELS rejected (LS_RJT)\n"); - zfcp_print_els_rjt(rjt_par, zfcp_ls_rjt_rc); - ret = -EREMOTEIO; - } else - ZFCP_LOG_INFO("unexpected SQ: 0x%02x\n", sq); - - return ret; -} - -/** - * zfcp_plogi_evaluate - evaluate PLOGI playload and copy important fields - * into zfcp_port structure - * @port: zfcp_port structure - * @plogi: plogi payload - */ -void -zfcp_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi) -{ - port->maxframe_size = plogi->serv_param.common_serv_param[7] | - ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8); - if (plogi->serv_param.class1_serv_param[0] & 0x80) - port->supported_classes |= FC_COS_CLASS1; - if (plogi->serv_param.class2_serv_param[0] & 0x80) - port->supported_classes |= FC_COS_CLASS2; - if (plogi->serv_param.class3_serv_param[0] & 0x80) - port->supported_classes |= FC_COS_CLASS3; - if (plogi->serv_param.class4_serv_param[0] & 0x80) - port->supported_classes |= FC_COS_CLASS4; + return 0; } - -#undef ZFCP_LOG_AREA diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 66d3b88844b0..391dd29749f8 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -1,64 +1,13 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Registration and callback for the s390 common I/O layer. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ #include "zfcp_ext.h" -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG - -static int zfcp_ccw_probe(struct ccw_device *); -static void zfcp_ccw_remove(struct ccw_device *); -static int zfcp_ccw_set_online(struct ccw_device *); -static int zfcp_ccw_set_offline(struct ccw_device *); -static int zfcp_ccw_notify(struct ccw_device *, int); -static void zfcp_ccw_shutdown(struct ccw_device *); - -static struct ccw_device_id zfcp_ccw_device_id[] = { - {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE, - ZFCP_CONTROL_UNIT_MODEL, - ZFCP_DEVICE_TYPE, - ZFCP_DEVICE_MODEL)}, - {CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE, - ZFCP_CONTROL_UNIT_MODEL, - ZFCP_DEVICE_TYPE, - ZFCP_DEVICE_MODEL_PRIV)}, - {}, -}; - -static struct ccw_driver zfcp_ccw_driver = { - .owner = THIS_MODULE, - .name = ZFCP_NAME, - .ids = zfcp_ccw_device_id, - .probe = zfcp_ccw_probe, - .remove = zfcp_ccw_remove, - .set_online = zfcp_ccw_set_online, - .set_offline = zfcp_ccw_set_offline, - .notify = zfcp_ccw_notify, - .shutdown = zfcp_ccw_shutdown, - .driver = { - .groups = zfcp_driver_attr_groups, - }, -}; - -MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); - /** * zfcp_ccw_probe - probe function of zfcp driver * @ccw_device: pointer to belonging ccw device @@ -69,19 +18,16 @@ MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); * In addition the nameserver port will be added to the ports of the adapter * and its sysfs representation will be created too. */ -static int -zfcp_ccw_probe(struct ccw_device *ccw_device) +static int zfcp_ccw_probe(struct ccw_device *ccw_device) { - struct zfcp_adapter *adapter; int retval = 0; down(&zfcp_data.config_sema); - adapter = zfcp_adapter_enqueue(ccw_device); - if (!adapter) + if (zfcp_adapter_enqueue(ccw_device)) { + dev_err(&ccw_device->dev, + "Setup of data structures failed.\n"); retval = -EINVAL; - else - ZFCP_LOG_DEBUG("Probed adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); + } up(&zfcp_data.config_sema); return retval; } @@ -95,8 +41,7 @@ zfcp_ccw_probe(struct ccw_device *ccw_device) * ports that belong to this adapter. And in addition all resources of this * adapter will be freed too. */ -static void -zfcp_ccw_remove(struct ccw_device *ccw_device) +static void zfcp_ccw_remove(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; struct zfcp_port *port, *p; @@ -106,8 +51,6 @@ zfcp_ccw_remove(struct ccw_device *ccw_device) down(&zfcp_data.config_sema); adapter = dev_get_drvdata(&ccw_device->dev); - ZFCP_LOG_DEBUG("Removing adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); write_lock_irq(&zfcp_data.config_lock); list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { @@ -145,8 +88,7 @@ zfcp_ccw_remove(struct ccw_device *ccw_device) * registered with the SCSI stack, that the QDIO queues will be set up * and that the adapter will be opened (asynchronously). */ -static int -zfcp_ccw_set_online(struct ccw_device *ccw_device) +static int zfcp_ccw_set_online(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; int retval; @@ -155,12 +97,8 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) adapter = dev_get_drvdata(&ccw_device->dev); retval = zfcp_erp_thread_setup(adapter); - if (retval) { - ZFCP_LOG_INFO("error: start of error recovery thread for " - "adapter %s failed\n", - zfcp_get_busid_by_adapter(adapter)); + if (retval) goto out; - } retval = zfcp_adapter_scsi_register(adapter); if (retval) @@ -191,8 +129,7 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) * This function gets called by the common i/o layer and sets an adapter * into state offline. */ -static int -zfcp_ccw_set_offline(struct ccw_device *ccw_device) +static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; @@ -206,15 +143,14 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device) } /** - * zfcp_ccw_notify + * zfcp_ccw_notify - ccw notify function * @ccw_device: pointer to belonging ccw device * @event: indicates if adapter was detached or attached * * This function gets called by the common i/o layer if an adapter has gone * or reappeared. */ -static int -zfcp_ccw_notify(struct ccw_device *ccw_device, int event) +static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) { struct zfcp_adapter *adapter; @@ -222,18 +158,15 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event) adapter = dev_get_drvdata(&ccw_device->dev); switch (event) { case CIO_GONE: - ZFCP_LOG_NORMAL("adapter %s: device gone\n", - zfcp_get_busid_by_adapter(adapter)); + dev_warn(&adapter->ccw_device->dev, "device gone\n"); zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); break; case CIO_NO_PATH: - ZFCP_LOG_NORMAL("adapter %s: no path\n", - zfcp_get_busid_by_adapter(adapter)); + dev_warn(&adapter->ccw_device->dev, "no path\n"); zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); break; case CIO_OPER: - ZFCP_LOG_NORMAL("adapter %s: operational again\n", - zfcp_get_busid_by_adapter(adapter)); + dev_info(&adapter->ccw_device->dev, "operational again\n"); zfcp_erp_modify_adapter_status(adapter, 11, NULL, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); @@ -247,24 +180,10 @@ zfcp_ccw_notify(struct ccw_device *ccw_device, int event) } /** - * zfcp_ccw_register - ccw register function - * - * Registers the driver at the common i/o layer. This function will be called - * at module load time/system start. - */ -int __init -zfcp_ccw_register(void) -{ - return ccw_driver_register(&zfcp_ccw_driver); -} - -/** - * zfcp_ccw_shutdown - gets called on reboot/shutdown - * - * Makes sure that QDIO queues are down when the system gets stopped. + * zfcp_ccw_shutdown - handle shutdown from cio + * @cdev: device for adapter to shutdown. */ -static void -zfcp_ccw_shutdown(struct ccw_device *cdev) +static void zfcp_ccw_shutdown(struct ccw_device *cdev) { struct zfcp_adapter *adapter; @@ -275,4 +194,33 @@ zfcp_ccw_shutdown(struct ccw_device *cdev) up(&zfcp_data.config_sema); } -#undef ZFCP_LOG_AREA +static struct ccw_device_id zfcp_ccw_device_id[] = { + { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, + { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */ + {}, +}; + +MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); + +static struct ccw_driver zfcp_ccw_driver = { + .owner = THIS_MODULE, + .name = "zfcp", + .ids = zfcp_ccw_device_id, + .probe = zfcp_ccw_probe, + .remove = zfcp_ccw_remove, + .set_online = zfcp_ccw_set_online, + .set_offline = zfcp_ccw_set_offline, + .notify = zfcp_ccw_notify, + .shutdown = zfcp_ccw_shutdown, +}; + +/** + * zfcp_ccw_register - ccw register function + * + * Registers the driver at the common i/o layer. This function will be called + * at module load time/system start. + */ +int __init zfcp_ccw_register(void) +{ + return ccw_driver_register(&zfcp_ccw_driver); +} diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c new file mode 100644 index 000000000000..ec2abceca6dc --- /dev/null +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -0,0 +1,259 @@ +/* + * zfcp device driver + * + * Userspace interface for accessing the + * Access Control Lists / Control File Data Channel + * + * Copyright IBM Corporation 2008 + */ + +#include <linux/types.h> +#include <linux/miscdevice.h> +#include <asm/ccwdev.h> +#include "zfcp_def.h" +#include "zfcp_ext.h" +#include "zfcp_fsf.h" + +#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001 +#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101 +#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201 +#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401 +#define ZFCP_CFDC_CMND_UPLOAD 0x00010002 + +#define ZFCP_CFDC_DOWNLOAD 0x00000001 +#define ZFCP_CFDC_UPLOAD 0x00000002 +#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000 + +#define ZFCP_CFDC_IOC_MAGIC 0xDD +#define ZFCP_CFDC_IOC \ + _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, struct zfcp_cfdc_data) + +/** + * struct zfcp_cfdc_data - data for ioctl cfdc interface + * @signature: request signature + * @devno: FCP adapter device number + * @command: command code + * @fsf_status: returns status of FSF command to userspace + * @fsf_status_qual: returned to userspace + * @payloads: access conflicts list + * @control_file: access control table + */ +struct zfcp_cfdc_data { + u32 signature; + u32 devno; + u32 command; + u32 fsf_status; + u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; + u8 payloads[256]; + u8 control_file[0]; +}; + +static int zfcp_cfdc_copy_from_user(struct scatterlist *sg, + void __user *user_buffer) +{ + unsigned int length; + unsigned int size = ZFCP_CFDC_MAX_SIZE; + + while (size) { + length = min((unsigned int)size, sg->length); + if (copy_from_user(sg_virt(sg++), user_buffer, length)) + return -EFAULT; + user_buffer += length; + size -= length; + } + return 0; +} + +static int zfcp_cfdc_copy_to_user(void __user *user_buffer, + struct scatterlist *sg) +{ + unsigned int length; + unsigned int size = ZFCP_CFDC_MAX_SIZE; + + while (size) { + length = min((unsigned int) size, sg->length); + if (copy_to_user(user_buffer, sg_virt(sg++), length)) + return -EFAULT; + user_buffer += length; + size -= length; + } + return 0; +} + +static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) +{ + struct zfcp_adapter *adapter = NULL, *cur_adapter; + struct ccw_dev_id dev_id; + + read_lock_irq(&zfcp_data.config_lock); + list_for_each_entry(cur_adapter, &zfcp_data.adapter_list_head, list) { + ccw_device_get_id(cur_adapter->ccw_device, &dev_id); + if (dev_id.devno == devno) { + adapter = cur_adapter; + zfcp_adapter_get(adapter); + break; + } + } + read_unlock_irq(&zfcp_data.config_lock); + return adapter; +} + +static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command) +{ + switch (command) { + case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL: + fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; + fsf_cfdc->option = FSF_CFDC_OPTION_NORMAL_MODE; + break; + case ZFCP_CFDC_CMND_DOWNLOAD_FORCE: + fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; + fsf_cfdc->option = FSF_CFDC_OPTION_FORCE; + break; + case ZFCP_CFDC_CMND_FULL_ACCESS: + fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; + fsf_cfdc->option = FSF_CFDC_OPTION_FULL_ACCESS; + break; + case ZFCP_CFDC_CMND_RESTRICTED_ACCESS: + fsf_cfdc->command = FSF_QTCB_DOWNLOAD_CONTROL_FILE; + fsf_cfdc->option = FSF_CFDC_OPTION_RESTRICTED_ACCESS; + break; + case ZFCP_CFDC_CMND_UPLOAD: + fsf_cfdc->command = FSF_QTCB_UPLOAD_CONTROL_FILE; + fsf_cfdc->option = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int zfcp_cfdc_sg_setup(int command, struct scatterlist *sg, + u8 __user *control_file) +{ + int retval; + retval = zfcp_sg_setup_table(sg, ZFCP_CFDC_PAGES); + if (retval) + return retval; + + sg[ZFCP_CFDC_PAGES - 1].length = ZFCP_CFDC_MAX_SIZE % PAGE_SIZE; + + if (command & ZFCP_CFDC_WITH_CONTROL_FILE && + command & ZFCP_CFDC_DOWNLOAD) { + retval = zfcp_cfdc_copy_from_user(sg, control_file); + if (retval) { + zfcp_sg_free_table(sg, ZFCP_CFDC_PAGES); + return -EFAULT; + } + } + + return 0; +} + +static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data, + struct zfcp_fsf_req *req) +{ + data->fsf_status = req->qtcb->header.fsf_status; + memcpy(&data->fsf_status_qual, &req->qtcb->header.fsf_status_qual, + sizeof(union fsf_status_qual)); + memcpy(&data->payloads, &req->qtcb->bottom.support.els, + sizeof(req->qtcb->bottom.support.els)); +} + +static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, + unsigned long buffer) +{ + struct zfcp_cfdc_data *data; + struct zfcp_cfdc_data __user *data_user; + struct zfcp_adapter *adapter; + struct zfcp_fsf_req *req; + struct zfcp_fsf_cfdc *fsf_cfdc; + int retval; + + if (command != ZFCP_CFDC_IOC) + return -ENOTTY; + + data_user = (void __user *) buffer; + if (!data_user) + return -EINVAL; + + fsf_cfdc = kmalloc(sizeof(struct zfcp_fsf_cfdc), GFP_KERNEL); + if (!fsf_cfdc) + return -ENOMEM; + + data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL); + if (!data) { + retval = -ENOMEM; + goto no_mem_sense; + } + + retval = copy_from_user(data, data_user, sizeof(*data)); + if (retval) { + retval = -EFAULT; + goto free_buffer; + } + + if (data->signature != 0xCFDCACDF) { + retval = -EINVAL; + goto free_buffer; + } + + retval = zfcp_cfdc_set_fsf(fsf_cfdc, data->command); + + adapter = zfcp_cfdc_get_adapter(data->devno); + if (!adapter) { + retval = -ENXIO; + goto free_buffer; + } + + retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg, + data_user->control_file); + if (retval) + goto adapter_put; + req = zfcp_fsf_control_file(adapter, fsf_cfdc); + if (IS_ERR(req)) { + retval = PTR_ERR(req); + goto free_sg; + } + + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { + retval = -ENXIO; + goto free_fsf; + } + + zfcp_cfdc_req_to_sense(data, req); + retval = copy_to_user(data_user, data, sizeof(*data_user)); + if (retval) { + retval = -EFAULT; + goto free_fsf; + } + + if (data->command & ZFCP_CFDC_UPLOAD) + retval = zfcp_cfdc_copy_to_user(&data_user->control_file, + fsf_cfdc->sg); + + free_fsf: + zfcp_fsf_req_free(req); + free_sg: + zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); + adapter_put: + zfcp_adapter_put(adapter); + free_buffer: + kfree(data); + no_mem_sense: + kfree(fsf_cfdc); + return retval; +} + +static const struct file_operations zfcp_cfdc_fops = { + .unlocked_ioctl = zfcp_cfdc_dev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = zfcp_cfdc_dev_ioctl +#endif +}; + +struct miscdevice zfcp_cfdc_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "zfcp_cfdc", + .fops = &zfcp_cfdc_fops, +}; diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index c8bad675dbd1..fca48b88fc53 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -1,22 +1,9 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Debug traces for zfcp. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ #include <linux/ctype.h> @@ -29,8 +16,6 @@ module_param(dbfsize, uint, 0400); MODULE_PARM_DESC(dbfsize, "number of pages for each debug feature area (default 4)"); -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER - static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, int level, char *from, int from_len) { @@ -186,8 +171,8 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); response->fsf_req_status = fsf_req->status; response->sbal_first = fsf_req->sbal_first; - response->sbal_curr = fsf_req->sbal_curr; response->sbal_last = fsf_req->sbal_last; + response->sbal_response = fsf_req->sbal_response; response->pool = fsf_req->pool != NULL; response->erp_action = (unsigned long)fsf_req->erp_action; @@ -268,7 +253,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); - rec->u.status.failed = adapter->status_read_failed; + rec->u.status.failed = atomic_read(&adapter->stat_miss); if (status_buffer != NULL) { rec->u.status.status_type = status_buffer->status_type; rec->u.status.status_subtype = status_buffer->status_subtype; @@ -312,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, /** * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure * @adapter: adapter affected by this QDIO related event - * @status: as passed by qdio module * @qdio_error: as passed by qdio module - * @siga_error: as passed by qdio module * @sbal_index: first buffer with error condition, as passed by qdio module * @sbal_count: number of buffers affected, as passed by qdio module */ -void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, - unsigned int qdio_error, unsigned int siga_error, - int sbal_index, int sbal_count) +void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, + unsigned int qdio_error, int sbal_index, + int sbal_count) { struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; unsigned long flags; @@ -328,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, spin_lock_irqsave(&adapter->hba_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); - r->u.qdio.status = status; r->u.qdio.qdio_error = qdio_error; - r->u.qdio.siga_error = siga_error; r->u.qdio.sbal_index = sbal_index; r->u.qdio.sbal_count = sbal_count; debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); @@ -355,8 +336,8 @@ static void zfcp_hba_dbf_view_response(char **p, FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE); zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status); zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first); - zfcp_dbf_out(p, "sbal_curr", "0x%02x", r->sbal_curr); zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last); + zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response); zfcp_dbf_out(p, "pool", "0x%02x", r->pool); switch (r->fsf_command) { @@ -413,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p, static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) { - zfcp_dbf_out(p, "status", "0x%08x", r->status); zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); - zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error); zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); } @@ -515,13 +494,13 @@ static const char *zfcp_rec_dbf_ids[] = { [52] = "port boxed close unit", [53] = "port boxed fcp", [54] = "unit boxed fcp", - [55] = "port access denied ct", - [56] = "port access denied els", - [57] = "port access denied open port", - [58] = "port access denied close physical", - [59] = "unit access denied open unit", + [55] = "port access denied", + [56] = "", + [57] = "", + [58] = "", + [59] = "unit access denied", [60] = "shared unit access denied open unit", - [61] = "unit access denied fcp", + [61] = "", [62] = "request timeout", [63] = "adisc link test reject or timeout", [64] = "adisc link test d_id changed", @@ -546,8 +525,8 @@ static const char *zfcp_rec_dbf_ids[] = { [80] = "exclusive read-only unit access unsupported", [81] = "shared read-write unit access unsupported", [82] = "incoming rscn", - [83] = "incoming plogi", - [84] = "incoming logo", + [83] = "incoming wwpn", + [84] = "", [85] = "online", [86] = "offline", [87] = "ccw device gone", @@ -586,8 +565,8 @@ static const char *zfcp_rec_dbf_ids[] = { [120] = "unknown fsf command", [121] = "no recommendation for status qualifier", [122] = "status read physical port closed in error", - [123] = "fc service class not supported ct", - [124] = "fc service class not supported els", + [123] = "fc service class not supported", + [124] = "", [125] = "need newer zfcp", [126] = "need newer microcode", [127] = "arbitrated loop not supported", @@ -595,7 +574,7 @@ static const char *zfcp_rec_dbf_ids[] = { [129] = "qtcb size mismatch", [130] = "unknown fsf status ecd", [131] = "fcp request too big", - [132] = "fc service class not supported fcp", + [132] = "", [133] = "data direction not valid fcp", [134] = "command length not valid fcp", [135] = "status read act update", @@ -603,13 +582,18 @@ static const char *zfcp_rec_dbf_ids[] = { [137] = "hbaapi port open", [138] = "hbaapi unit open", [139] = "hbaapi unit shutdown", - [140] = "qdio error", + [140] = "qdio error outbound", [141] = "scsi host reset", [142] = "dismissing fsf request for recovery action", [143] = "recovery action timed out", [144] = "recovery action gone", [145] = "recovery action being processed", [146] = "recovery action ready for next step", + [147] = "qdio error inbound", + [148] = "nameserver needed for port scan", + [149] = "port scan", + [150] = "ptp attach", + [151] = "port validation failed", }; static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, @@ -670,24 +654,20 @@ static struct debug_view zfcp_rec_dbf_view = { * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation * @id2: identifier for event * @adapter: adapter - * @lock: non-zero value indicates that erp_lock has not yet been acquired + * This function assumes that the caller is holding erp_lock. */ -void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock) +void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter) { struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; unsigned long flags = 0; struct list_head *entry; unsigned ready = 0, running = 0, total; - if (lock) - read_lock_irqsave(&adapter->erp_lock, flags); list_for_each(entry, &adapter->erp_ready_head) ready++; list_for_each(entry, &adapter->erp_running_head) running++; total = adapter->erp_total_count; - if (lock) - read_unlock_irqrestore(&adapter->erp_lock, flags); spin_lock_irqsave(&adapter->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); @@ -696,10 +676,25 @@ void zfcp_rec_dbf_event_thread(u8 id2, struct zfcp_adapter *adapter, int lock) r->u.thread.total = total; r->u.thread.ready = ready; r->u.thread.running = running; - debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); + debug_event(adapter->rec_dbf, 6, r, sizeof(*r)); spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); } +/** + * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation + * @id2: identifier for event + * @adapter: adapter + * This function assumes that the caller does not hold erp_lock. + */ +void zfcp_rec_dbf_event_thread_lock(u8 id2, struct zfcp_adapter *adapter) +{ + unsigned long flags; + + read_lock_irqsave(&adapter->erp_lock, flags); + zfcp_rec_dbf_event_thread(id2, adapter); + read_unlock_irqrestore(&adapter->erp_lock, flags); +} + static void zfcp_rec_dbf_event_target(u8 id2, void *ref, struct zfcp_adapter *adapter, atomic_t *status, atomic_t *erp_count, @@ -823,7 +818,7 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action) r->u.action.status = erp_action->status; r->u.action.step = erp_action->step; r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; - debug_event(adapter->rec_dbf, 4, r, sizeof(*r)); + debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); } @@ -960,7 +955,7 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, fc_host_port_id(adapter->scsi_host), - *(u8 *)buf->payload, (void *)buf->payload, + buf->payload.data[0], (void *)buf->payload.data, length); } @@ -1064,8 +1059,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, if (fsf_req != NULL) { fcp_rsp = (struct fcp_rsp_iu *) &(fsf_req->qtcb->bottom.io.fcp_rsp); - fcp_rsp_info = - zfcp_get_fcp_rsp_info_ptr(fcp_rsp); + fcp_rsp_info = (unsigned char *) &fcp_rsp[1]; fcp_sns_info = zfcp_get_fcp_sns_info_ptr(fcp_rsp); @@ -1279,5 +1273,3 @@ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) adapter->hba_dbf = NULL; adapter->rec_dbf = NULL; } - -#undef ZFCP_LOG_AREA diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 54c34e483457..0ddb18449d11 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -38,7 +38,7 @@ struct zfcp_rec_dbf_record_thread { u32 total; u32 ready; u32 running; -} __attribute__ ((packed)); +}; struct zfcp_rec_dbf_record_target { u64 ref; @@ -47,7 +47,7 @@ struct zfcp_rec_dbf_record_target { u64 wwpn; u64 fcp_lun; u32 erp_count; -} __attribute__ ((packed)); +}; struct zfcp_rec_dbf_record_trigger { u8 want; @@ -59,14 +59,14 @@ struct zfcp_rec_dbf_record_trigger { u64 action; u64 wwpn; u64 fcp_lun; -} __attribute__ ((packed)); +}; struct zfcp_rec_dbf_record_action { u32 status; u32 step; u64 action; u64 fsf_req; -} __attribute__ ((packed)); +}; struct zfcp_rec_dbf_record { u8 id; @@ -77,7 +77,7 @@ struct zfcp_rec_dbf_record { struct zfcp_rec_dbf_record_target target; struct zfcp_rec_dbf_record_trigger trigger; } u; -} __attribute__ ((packed)); +}; enum { ZFCP_REC_DBF_ID_ACTION, @@ -97,8 +97,8 @@ struct zfcp_hba_dbf_record_response { u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; u32 fsf_req_status; u8 sbal_first; - u8 sbal_curr; u8 sbal_last; + u8 sbal_response; u8 pool; u64 erp_action; union { @@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status { } __attribute__ ((packed)); struct zfcp_hba_dbf_record_qdio { - u32 status; u32 qdio_error; - u32 siga_error; u8 sbal_index; u8 sbal_count; } __attribute__ ((packed)); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index bda8c77b22da..67f45fc62f53 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -1,22 +1,9 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Global definitions for the zfcp device driver. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ #ifndef ZFCP_DEF_H @@ -26,7 +13,6 @@ #include <linux/init.h> #include <linux/moduleparam.h> -#include <linux/miscdevice.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/delay.h> @@ -53,9 +39,6 @@ /********************* GENERAL DEFINES *********************************/ -/* zfcp version number, it consists of major, minor, and patch-level number */ -#define ZFCP_VERSION "4.8.0" - /** * zfcp_sg_to_address - determine kernel address from struct scatterlist * @list: struct scatterlist @@ -93,11 +76,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size) #define ZFCP_DEVICE_MODEL 0x03 #define ZFCP_DEVICE_MODEL_PRIV 0x04 -/* allow as many chained SBALs as are supported by hardware */ -#define ZFCP_MAX_SBALS_PER_REQ FSF_MAX_SBALS_PER_REQ -#define ZFCP_MAX_SBALS_PER_CT_REQ FSF_MAX_SBALS_PER_REQ -#define ZFCP_MAX_SBALS_PER_ELS_REQ FSF_MAX_SBALS_PER_ELS_REQ - /* DMQ bug workaround: don't use last SBALE */ #define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) @@ -106,42 +84,17 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size) /* max. number of (data buffer) SBALEs in largest SBAL chain */ #define ZFCP_MAX_SBALES_PER_REQ \ - (ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2) + (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2) /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ #define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8) /* max. number of (data buffer) SBALEs in largest SBAL chain multiplied with number of sectors per 4k block */ -/* FIXME(tune): free space should be one max. SBAL chain plus what? */ -#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \ - - (ZFCP_MAX_SBALS_PER_REQ + 4)) - -#define ZFCP_SBAL_TIMEOUT (5*HZ) - -#define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */ - -/* queue polling (values in microseconds) */ -#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */ -#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */ -#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */ -#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */ - -#define QDIO_SCSI_QFMT 1 /* 1 for FSF */ -#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) - /********************* FSF SPECIFIC DEFINES *********************************/ -#define ZFCP_ULP_INFO_VERSION 26 -#define ZFCP_QTCB_VERSION FSF_QTCB_CURRENT_VERSION /* ATTENTION: value must not be used by hardware */ #define FSF_QTCB_UNSOLICITED_STATUS 0x6305 -#define ZFCP_STATUS_READ_FAILED_THRESHOLD 3 -#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM - -/* Do 1st retry in 1 second, then double the timeout for each following retry */ -#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 1 -#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 /* timeout value for "default timer" for fsf requests */ #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) @@ -153,17 +106,9 @@ typedef unsigned long long fcp_lun_t; /* data length field may be at variable position in FCP-2 FCP_CMND IU */ typedef unsigned int fcp_dl_t; -#define ZFCP_FC_SERVICE_CLASS_DEFAULT FSF_CLASS_3 - /* timeout for name-server lookup (in seconds) */ #define ZFCP_NS_GID_PN_TIMEOUT 10 -/* largest SCSI command we can process */ -/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */ -#define ZFCP_MAX_SCSI_CMND_LENGTH 255 -/* maximum number of commands in LUN queue (tagged queueing) */ -#define ZFCP_CMND_PER_LUN 32 - /* task attribute values in FCP-2 FCP_CMND IU */ #define SIMPLE_Q 0 #define HEAD_OF_Q 1 @@ -224,9 +169,9 @@ struct fcp_rsp_iu { #define RSP_CODE_TASKMAN_FAILED 5 /* see fc-fs */ -#define LS_RSCN 0x61040000 -#define LS_LOGO 0x05000000 -#define LS_PLOGI 0x03000000 +#define LS_RSCN 0x61 +#define LS_LOGO 0x05 +#define LS_PLOGI 0x03 struct fcp_rscn_head { u8 command; @@ -266,7 +211,6 @@ struct fcp_logo { * FC-FS stuff */ #define R_A_TOV 10 /* seconds */ -#define ZFCP_ELS_TIMEOUT (2 * R_A_TOV) #define ZFCP_LS_RLS 0x0f #define ZFCP_LS_ADISC 0x52 @@ -311,7 +255,10 @@ struct zfcp_rc_entry { #define ZFCP_CT_DIRECTORY_SERVICE 0xFC #define ZFCP_CT_NAME_SERVER 0x02 #define ZFCP_CT_SYNCHRONOUS 0x00 +#define ZFCP_CT_SCSI_FCP 0x08 +#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09 #define ZFCP_CT_GID_PN 0x0121 +#define ZFCP_CT_GPN_FT 0x0172 #define ZFCP_CT_MAX_SIZE 0x1020 #define ZFCP_CT_ACCEPT 0x8002 #define ZFCP_CT_REJECT 0x8001 @@ -321,107 +268,6 @@ struct zfcp_rc_entry { */ #define ZFCP_CT_TIMEOUT (3 * R_A_TOV) -/******************** LOGGING MACROS AND DEFINES *****************************/ - -/* - * Logging may be applied on certain kinds of driver operations - * independently. Additionally, different log-levels are supported for - * each of these areas. - */ - -#define ZFCP_NAME "zfcp" - -/* independent log areas */ -#define ZFCP_LOG_AREA_OTHER 0 -#define ZFCP_LOG_AREA_SCSI 1 -#define ZFCP_LOG_AREA_FSF 2 -#define ZFCP_LOG_AREA_CONFIG 3 -#define ZFCP_LOG_AREA_CIO 4 -#define ZFCP_LOG_AREA_QDIO 5 -#define ZFCP_LOG_AREA_ERP 6 -#define ZFCP_LOG_AREA_FC 7 - -/* log level values*/ -#define ZFCP_LOG_LEVEL_NORMAL 0 -#define ZFCP_LOG_LEVEL_INFO 1 -#define ZFCP_LOG_LEVEL_DEBUG 2 -#define ZFCP_LOG_LEVEL_TRACE 3 - -/* - * this allows removal of logging code by the preprocessor - * (the most detailed log level still to be compiled in is specified, - * higher log levels are removed) - */ -#define ZFCP_LOG_LEVEL_LIMIT ZFCP_LOG_LEVEL_TRACE - -/* get "loglevel" nibble assignment */ -#define ZFCP_GET_LOG_VALUE(zfcp_lognibble) \ - ((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF) - -/* set "loglevel" nibble */ -#define ZFCP_SET_LOG_NIBBLE(value, zfcp_lognibble) \ - (value << (zfcp_lognibble << 2)) - -/* all log-level defaults are combined to generate initial log-level */ -#define ZFCP_LOG_LEVEL_DEFAULTS \ - (ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_OTHER) | \ - ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_SCSI) | \ - ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FSF) | \ - ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CONFIG) | \ - ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_CIO) | \ - ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_QDIO) | \ - ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_ERP) | \ - ZFCP_SET_LOG_NIBBLE(ZFCP_LOG_LEVEL_NORMAL, ZFCP_LOG_AREA_FC)) - -/* check whether we have the right level for logging */ -#define ZFCP_LOG_CHECK(level) \ - ((ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA)) >= level) - -/* logging routine for zfcp */ -#define _ZFCP_LOG(fmt, args...) \ - printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \ - __LINE__ , ##args) - -#define ZFCP_LOG(level, fmt, args...) \ -do { \ - if (ZFCP_LOG_CHECK(level)) \ - _ZFCP_LOG(fmt, ##args); \ -} while (0) - -#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL -# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0) -#else -# define ZFCP_LOG_NORMAL(fmt, args...) \ -do { \ - if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_NORMAL)) \ - printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \ -} while (0) -#endif - -#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO -# define ZFCP_LOG_INFO(fmt, args...) do { } while (0) -#else -# define ZFCP_LOG_INFO(fmt, args...) \ -do { \ - if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_INFO)) \ - printk(KERN_ERR ZFCP_NAME": " fmt, ##args); \ -} while (0) -#endif - -#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG -# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0) -#else -# define ZFCP_LOG_DEBUG(fmt, args...) \ - ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args) -#endif - -#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE -# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0) -#else -# define ZFCP_LOG_TRACE(fmt, args...) \ - ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args) -#endif - /*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ /* @@ -441,6 +287,7 @@ do { \ #define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000 #define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000 #define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000 +#define ZFCP_STATUS_COMMON_NOESC 0x00200000 /* adapter status */ #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 @@ -496,77 +343,6 @@ do { \ #define ZFCP_STATUS_FSFREQ_RETRY 0x00000800 #define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 -/*********************** ERROR RECOVERY PROCEDURE DEFINES ********************/ - -#define ZFCP_MAX_ERPS 3 - -#define ZFCP_ERP_FSFREQ_TIMEOUT (30 * HZ) -#define ZFCP_ERP_MEMWAIT_TIMEOUT HZ - -#define ZFCP_STATUS_ERP_TIMEDOUT 0x10000000 -#define ZFCP_STATUS_ERP_CLOSE_ONLY 0x01000000 -#define ZFCP_STATUS_ERP_DISMISSING 0x00100000 -#define ZFCP_STATUS_ERP_DISMISSED 0x00200000 -#define ZFCP_STATUS_ERP_LOWMEM 0x00400000 - -#define ZFCP_ERP_STEP_UNINITIALIZED 0x00000000 -#define ZFCP_ERP_STEP_FSF_XCONFIG 0x00000001 -#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING 0x00000010 -#define ZFCP_ERP_STEP_PORT_CLOSING 0x00000100 -#define ZFCP_ERP_STEP_NAMESERVER_OPEN 0x00000200 -#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP 0x00000400 -#define ZFCP_ERP_STEP_PORT_OPENING 0x00000800 -#define ZFCP_ERP_STEP_UNIT_CLOSING 0x00001000 -#define ZFCP_ERP_STEP_UNIT_OPENING 0x00002000 - -/* Ordered by escalation level (necessary for proper erp-code operation) */ -#define ZFCP_ERP_ACTION_REOPEN_ADAPTER 0x4 -#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED 0x3 -#define ZFCP_ERP_ACTION_REOPEN_PORT 0x2 -#define ZFCP_ERP_ACTION_REOPEN_UNIT 0x1 - -#define ZFCP_ERP_ACTION_RUNNING 0x1 -#define ZFCP_ERP_ACTION_READY 0x2 - -#define ZFCP_ERP_SUCCEEDED 0x0 -#define ZFCP_ERP_FAILED 0x1 -#define ZFCP_ERP_CONTINUES 0x2 -#define ZFCP_ERP_EXIT 0x3 -#define ZFCP_ERP_DISMISSED 0x4 -#define ZFCP_ERP_NOMEM 0x5 - - -/******************** CFDC SPECIFIC STUFF *****************************/ - -/* Firewall data channel sense data record */ -struct zfcp_cfdc_sense_data { - u32 signature; /* Request signature */ - u32 devno; /* FCP adapter device number */ - u32 command; /* Command code */ - u32 fsf_status; /* FSF request status and status qualifier */ - u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; - u8 payloads[256]; /* Access conflicts list */ - u8 control_file[0]; /* Access control table */ -}; - -#define ZFCP_CFDC_SIGNATURE 0xCFDCACDF - -#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001 -#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101 -#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201 -#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401 -#define ZFCP_CFDC_CMND_UPLOAD 0x00010002 - -#define ZFCP_CFDC_DOWNLOAD 0x00000001 -#define ZFCP_CFDC_UPLOAD 0x00000002 -#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000 - -#define ZFCP_CFDC_DEV_NAME "zfcp_cfdc" -#define ZFCP_CFDC_DEV_MAJOR MISC_MAJOR -#define ZFCP_CFDC_DEV_MINOR MISC_DYNAMIC_MINOR - -#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE 127 * 1024 - /************************* STRUCTURE DEFINITIONS *****************************/ struct zfcp_fsf_req; @@ -623,7 +399,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long); * @resp_count: number of elements in response scatter-gather list * @handler: handler function (called for response to the request) * @handler_data: data passed to handler function - * @pool: pointer to memory pool for ct request structure * @timeout: FSF timeout for this request * @completion: completion for synchronization purposes * @status: used to pass error status to calling function @@ -636,7 +411,6 @@ struct zfcp_send_ct { unsigned int resp_count; zfcp_send_ct_handler_t handler; unsigned long handler_data; - mempool_t *pool; int timeout; struct completion *completion; int status; @@ -685,13 +459,13 @@ struct zfcp_send_els { }; struct zfcp_qdio_queue { - struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ - u8 free_index; /* index of next free bfr + struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ + u8 first; /* index of next free bfr in queue (free_count>0) */ - atomic_t free_count; /* number of free buffers + atomic_t count; /* number of free buffers in queue */ - rwlock_t queue_lock; /* lock for operations on queue */ - int distance_from_int; /* SBALs used since PCI indication + spinlock_t lock; /* lock for operations on queue */ + int pci_batch; /* SBALs since PCI indication was last set */ }; @@ -708,6 +482,24 @@ struct zfcp_erp_action { struct timer_list timer; }; +struct fsf_latency_record { + u32 min; + u32 max; + u64 sum; +}; + +struct latency_cont { + struct fsf_latency_record channel; + struct fsf_latency_record fabric; + u64 counter; +}; + +struct zfcp_latencies { + struct latency_cont read; + struct latency_cont write; + struct latency_cont cmd; + spinlock_t lock; +}; struct zfcp_adapter { struct list_head list; /* list of adapters */ @@ -723,24 +515,25 @@ struct zfcp_adapter { u32 adapter_features; /* FCP channel features */ u32 connection_features; /* host connection features */ u32 hardware_version; /* of FCP channel */ + u16 timer_ticks; /* time int for a tick */ struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ struct list_head port_list_head; /* remote port list */ struct list_head port_remove_lh; /* head of ports to be removed */ u32 ports; /* number of remote ports */ - atomic_t reqs_active; /* # active FSF reqs */ unsigned long req_no; /* unique FSF req number */ struct list_head *req_list; /* list of pending reqs */ spinlock_t req_list_lock; /* request list lock */ - struct zfcp_qdio_queue request_queue; /* request queue */ + struct zfcp_qdio_queue req_q; /* request queue */ u32 fsf_req_seq_no; /* FSF cmnd seq number */ wait_queue_head_t request_wq; /* can be used to wait for more avaliable SBALs */ - struct zfcp_qdio_queue response_queue; /* response queue */ + struct zfcp_qdio_queue resp_q; /* response queue */ rwlock_t abort_lock; /* Protects against SCSI stack abort/command completion races */ - u16 status_read_failed; /* # failed status reads */ + atomic_t stat_miss; /* # missing status reads*/ + struct work_struct stat_work; atomic_t status; /* status of this adapter */ struct list_head erp_ready_head; /* error recovery for this adapter/devices */ @@ -774,13 +567,9 @@ struct zfcp_adapter { struct fc_host_statistics *fc_stats; struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; + struct work_struct scan_work; }; -/* - * the struct device sysfs_device must be at the beginning of this structure. - * pointer to struct device is used to free port structure in release function - * of the device. don't change! - */ struct zfcp_port { struct device sysfs_device; /* sysfs device */ struct fc_rport *rport; /* rport of fc transport class */ @@ -804,10 +593,6 @@ struct zfcp_port { u32 supported_classes; }; -/* the struct device sysfs_device must be at the beginning of this structure. - * pointer to struct device is used to free unit structure in release function - * of the device. don't change! - */ struct zfcp_unit { struct device sysfs_device; /* sysfs device */ struct list_head list; /* list of logical units */ @@ -822,6 +607,7 @@ struct zfcp_unit { struct scsi_device *device; /* scsi device struct pointer */ struct zfcp_erp_action erp_action; /* pending error recovery */ atomic_t erp_counter; + struct zfcp_latencies latencies; }; /* FSF request */ @@ -831,19 +617,19 @@ struct zfcp_fsf_req { struct zfcp_adapter *adapter; /* adapter request belongs to */ u8 sbal_number; /* nr of SBALs free for use */ u8 sbal_first; /* first SBAL for this request */ - u8 sbal_last; /* last possible SBAL for + u8 sbal_last; /* last SBAL for this request */ + u8 sbal_limit; /* last possible SBAL for this reuest */ - u8 sbal_curr; /* current SBAL during creation - of request */ u8 sbale_curr; /* current SBALE during creation of request */ + u8 sbal_response; /* SBAL used in interrupt */ wait_queue_head_t completion_wq; /* can be used by a routine to wait for completion */ volatile u32 status; /* status of this request */ u32 fsf_command; /* FSF Command copy */ struct fsf_qtcb *qtcb; /* address of associated QTCB */ u32 seq_no; /* Sequence number of request */ - unsigned long data; /* private data of request */ + void *data; /* private data of request */ struct timer_list timer; /* used for erp or scsi er */ struct zfcp_erp_action *erp_action; /* used if this request is issued on behalf of erp */ @@ -851,10 +637,9 @@ struct zfcp_fsf_req { from emergency pool */ unsigned long long issued; /* request sent time (STCK) */ struct zfcp_unit *unit; + void (*handler)(struct zfcp_fsf_req *); }; -typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*); - /* driver data */ struct zfcp_data { struct scsi_host_template scsi_host_template; @@ -873,29 +658,11 @@ struct zfcp_data { char init_busid[BUS_ID_SIZE]; wwn_t init_wwpn; fcp_lun_t init_fcp_lun; - char *driver_version; struct kmem_cache *fsf_req_qtcb_cache; struct kmem_cache *sr_buffer_cache; struct kmem_cache *gid_pn_cache; }; -/** - * struct zfcp_sg_list - struct describing a scatter-gather list - * @sg: pointer to array of (struct scatterlist) - * @count: number of elements in scatter-gather list - */ -struct zfcp_sg_list { - struct scatterlist *sg; - unsigned int count; -}; - -/* number of elements for various memory pools */ -#define ZFCP_POOL_FSF_REQ_ERP_NR 1 -#define ZFCP_POOL_FSF_REQ_SCSI_NR 1 -#define ZFCP_POOL_FSF_REQ_ABORT_NR 1 -#define ZFCP_POOL_STATUS_READ_NR ZFCP_STATUS_READS_RECOM -#define ZFCP_POOL_DATA_GID_PN_NR 1 - /* struct used by memory pools for fsf_requests */ struct zfcp_fsf_req_qtcb { struct zfcp_fsf_req fsf_req; @@ -905,7 +672,6 @@ struct zfcp_fsf_req_qtcb { /********************** ZFCP SPECIFIC DEFINES ********************************/ #define ZFCP_REQ_AUTO_CLEANUP 0x00000002 -#define ZFCP_WAIT_FOR_SBAL 0x00000004 #define ZFCP_REQ_NO_QTCB 0x00000008 #define ZFCP_SET 0x00000100 @@ -916,12 +682,6 @@ struct zfcp_fsf_req_qtcb { ((atomic_read(target) & mask) == mask) #endif -extern void _zfcp_hex_dump(char *, int); -#define ZFCP_HEX_DUMP(level, addr, count) \ - if (ZFCP_LOG_CHECK(level)) { \ - _zfcp_hex_dump(addr, count); \ - } - #define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id) #define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter)) #define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port)) @@ -934,15 +694,6 @@ static inline int zfcp_reqlist_hash(unsigned long req_id) return req_id % REQUEST_LIST_SIZE; } -static inline void zfcp_reqlist_add(struct zfcp_adapter *adapter, - struct zfcp_fsf_req *fsf_req) -{ - unsigned int idx; - - idx = zfcp_reqlist_hash(fsf_req->req_id); - list_add_tail(&fsf_req->list, &adapter->req_list[idx]); -} - static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter, struct zfcp_fsf_req *fsf_req) { diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 805484658dd9..643ac4bba5b5 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -1,641 +1,406 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Error Recovery Procedures (ERP). * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP - #include "zfcp_ext.h" -static int zfcp_erp_adisc(struct zfcp_port *); -static void zfcp_erp_adisc_handler(unsigned long); - -static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *, int, u8, - void *); -static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *, int, u8, - void *); -static int zfcp_erp_port_reopen_internal(struct zfcp_port *, int, u8, void *); -static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *, int, u8, void *); - -static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *, int, u8, - void *); -static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *, int, u8, - void *); - -static void zfcp_erp_adapter_block(struct zfcp_adapter *, int); -static void zfcp_erp_adapter_unblock(struct zfcp_adapter *); -static void zfcp_erp_port_block(struct zfcp_port *, int); -static void zfcp_erp_port_unblock(struct zfcp_port *); -static void zfcp_erp_unit_block(struct zfcp_unit *, int); -static void zfcp_erp_unit_unblock(struct zfcp_unit *); - -static int zfcp_erp_thread(void *); - -static int zfcp_erp_strategy(struct zfcp_erp_action *); - -static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *); -static int zfcp_erp_strategy_memwait(struct zfcp_erp_action *); -static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *, int); -static int zfcp_erp_strategy_check_unit(struct zfcp_unit *, int); -static int zfcp_erp_strategy_check_port(struct zfcp_port *, int); -static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *, int); -static int zfcp_erp_strategy_statechange(int, u32, struct zfcp_adapter *, - struct zfcp_port *, - struct zfcp_unit *, int); -static int zfcp_erp_strategy_statechange_detected(atomic_t *, u32); -static int zfcp_erp_strategy_followup_actions(int, struct zfcp_adapter *, - struct zfcp_port *, - struct zfcp_unit *, int); -static int zfcp_erp_strategy_check_queues(struct zfcp_adapter *); -static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int); - -static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); -static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_open_fsf_statusread( - struct zfcp_erp_action *); - -static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *); -static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *); - -static int zfcp_erp_port_strategy(struct zfcp_erp_action *); -static int zfcp_erp_port_strategy_clearstati(struct zfcp_port *); -static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *); -static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *); -static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *); -static int zfcp_erp_port_strategy_open_nameserver_wakeup( - struct zfcp_erp_action *); -static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *); -static int zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *); -static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *); - -static int zfcp_erp_unit_strategy(struct zfcp_erp_action *); -static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *); -static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); -static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); - -static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); -static void zfcp_erp_action_dismiss_port(struct zfcp_port *); -static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); -static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); - -static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, - struct zfcp_port *, struct zfcp_unit *, - u8 id, void *ref); -static int zfcp_erp_action_dequeue(struct zfcp_erp_action *); -static void zfcp_erp_action_cleanup(int, struct zfcp_adapter *, - struct zfcp_port *, struct zfcp_unit *, - int); - -static void zfcp_erp_action_ready(struct zfcp_erp_action *); -static int zfcp_erp_action_exists(struct zfcp_erp_action *); - -static void zfcp_erp_action_to_ready(struct zfcp_erp_action *); -static void zfcp_erp_action_to_running(struct zfcp_erp_action *); - -static void zfcp_erp_memwait_handler(unsigned long); +#define ZFCP_MAX_ERPS 3 -/** - * zfcp_close_qdio - close qdio queues for an adapter - */ -static void zfcp_close_qdio(struct zfcp_adapter *adapter) -{ - struct zfcp_qdio_queue *req_queue; - int first, count; +enum zfcp_erp_act_flags { + ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000, + ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000, + ZFCP_STATUS_ERP_DISMISSING = 0x00100000, + ZFCP_STATUS_ERP_DISMISSED = 0x00200000, + ZFCP_STATUS_ERP_LOWMEM = 0x00400000, +}; - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) - return; +enum zfcp_erp_steps { + ZFCP_ERP_STEP_UNINITIALIZED = 0x0000, + ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, + ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, + ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, + ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200, + ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400, + ZFCP_ERP_STEP_PORT_OPENING = 0x0800, + ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, + ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, +}; - /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ - req_queue = &adapter->request_queue; - write_lock_irq(&req_queue->queue_lock); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); - write_unlock_irq(&req_queue->queue_lock); - - while (qdio_shutdown(adapter->ccw_device, - QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) - ssleep(1); - - /* cleanup used outbound sbals */ - count = atomic_read(&req_queue->free_count); - if (count < QDIO_MAX_BUFFERS_PER_Q) { - first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q; - count = QDIO_MAX_BUFFERS_PER_Q - count; - zfcp_qdio_zero_sbals(req_queue->buffer, first, count); - } - req_queue->free_index = 0; - atomic_set(&req_queue->free_count, 0); - req_queue->distance_from_int = 0; - adapter->response_queue.free_index = 0; - atomic_set(&adapter->response_queue.free_count, 0); +enum zfcp_erp_act_type { + ZFCP_ERP_ACTION_REOPEN_UNIT = 1, + ZFCP_ERP_ACTION_REOPEN_PORT = 2, + ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, + ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, +}; + +enum zfcp_erp_act_state { + ZFCP_ERP_ACTION_RUNNING = 1, + ZFCP_ERP_ACTION_READY = 2, +}; + +enum zfcp_erp_act_result { + ZFCP_ERP_SUCCEEDED = 0, + ZFCP_ERP_FAILED = 1, + ZFCP_ERP_CONTINUES = 2, + ZFCP_ERP_EXIT = 3, + ZFCP_ERP_DISMISSED = 4, + ZFCP_ERP_NOMEM = 5, +}; + +static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) +{ + zfcp_erp_modify_adapter_status(adapter, 15, NULL, + ZFCP_STATUS_COMMON_UNBLOCKED | mask, + ZFCP_CLEAR); } -/** - * zfcp_close_fsf - stop FSF operations for an adapter - * - * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of - * requests waiting for completion; especially this returns SCSI commands - * with error state). - */ -static void zfcp_close_fsf(struct zfcp_adapter *adapter) +static int zfcp_erp_action_exists(struct zfcp_erp_action *act) { - /* close queues to ensure that buffers are not accessed by adapter */ - zfcp_close_qdio(adapter); - zfcp_fsf_req_dismiss_all(adapter); - /* reset FSF request sequence number */ - adapter->fsf_req_seq_no = 0; - /* all ports and units are closed */ - zfcp_erp_modify_adapter_status(adapter, 24, NULL, - ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); + struct zfcp_erp_action *curr_act; + + list_for_each_entry(curr_act, &act->adapter->erp_running_head, list) + if (act == curr_act) + return ZFCP_ERP_ACTION_RUNNING; + return 0; } -/** - * zfcp_fsf_request_timeout_handler - called if a request timed out - * @data: pointer to adapter for handler function - * - * This function needs to be called if requests (ELS, Generic Service, - * or SCSI commands) exceed a certain time limit. The assumption is - * that after the time limit the adapter get stuck. So we trigger a reopen of - * the adapter. - */ -static void zfcp_fsf_request_timeout_handler(unsigned long data) +static void zfcp_erp_action_ready(struct zfcp_erp_action *act) { - struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; - zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62, - NULL); + struct zfcp_adapter *adapter = act->adapter; + + list_move(&act->list, &act->adapter->erp_ready_head); + zfcp_rec_dbf_event_action(146, act); + up(&adapter->erp_ready_sem); + zfcp_rec_dbf_event_thread(2, adapter); } -void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) +static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) { - fsf_req->timer.function = zfcp_fsf_request_timeout_handler; - fsf_req->timer.data = (unsigned long) fsf_req->adapter; - fsf_req->timer.expires = jiffies + timeout; - add_timer(&fsf_req->timer); + act->status |= ZFCP_STATUS_ERP_DISMISSED; + if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING) + zfcp_erp_action_ready(act); } -/* - * function: - * - * purpose: called if an adapter failed, - * initiates adapter recovery which is done - * asynchronously - * - * returns: 0 - initiated action successfully - * <0 - failed to initiate action - */ -static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, - int clear_mask, u8 id, void *ref) +static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) { - int retval; + if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE) + zfcp_erp_action_dismiss(&unit->erp_action); +} - ZFCP_LOG_DEBUG("reopen adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); +static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) +{ + struct zfcp_unit *unit; - zfcp_erp_adapter_block(adapter, clear_mask); + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) + zfcp_erp_action_dismiss(&port->erp_action); + else + list_for_each_entry(unit, &port->unit_list_head, list) + zfcp_erp_action_dismiss_unit(unit); +} - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) { - ZFCP_LOG_DEBUG("skipped reopen of failed adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - /* ensure propagation of failed status to new devices */ - zfcp_erp_adapter_failed(adapter, 13, NULL); - retval = -EIO; - goto out; - } - retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, - adapter, NULL, NULL, id, ref); +static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) +{ + struct zfcp_port *port; - out: - return retval; + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE) + zfcp_erp_action_dismiss(&adapter->erp_action); + else + list_for_each_entry(port, &adapter->port_list_head, list) + zfcp_erp_action_dismiss_port(port); } -/* - * function: - * - * purpose: Wrappper for zfcp_erp_adapter_reopen_internal - * used to ensure the correct locking - * - * returns: 0 - initiated action successfully - * <0 - failed to initiate action - */ -int zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask, - u8 id, void *ref) +static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct zfcp_unit *unit) { - int retval; - unsigned long flags; + int need = want; + int u_status, p_status, a_status; - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); - retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask, id, ref); - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + switch (want) { + case ZFCP_ERP_ACTION_REOPEN_UNIT: + u_status = atomic_read(&unit->status); + if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE) + return 0; + p_status = atomic_read(&port->status); + if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || + p_status & ZFCP_STATUS_COMMON_ERP_FAILED) + return 0; + if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) + need = ZFCP_ERP_ACTION_REOPEN_PORT; + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT: + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + p_status = atomic_read(&port->status); + if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) + return 0; + a_status = atomic_read(&adapter->status); + if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || + a_status & ZFCP_STATUS_COMMON_ERP_FAILED) + return 0; + if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) + need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + a_status = atomic_read(&adapter->status); + if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) + return 0; + } - return retval; + return need; } -int zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear_mask, - u8 id, void *ref) +static struct zfcp_erp_action *zfcp_erp_setup_act(int need, + struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct zfcp_unit *unit) { - int retval; + struct zfcp_erp_action *erp_action; + u32 status = 0; - retval = zfcp_erp_adapter_reopen(adapter, - ZFCP_STATUS_COMMON_RUNNING | - ZFCP_STATUS_COMMON_ERP_FAILED | - clear_mask, id, ref); + switch (need) { + case ZFCP_ERP_ACTION_REOPEN_UNIT: + zfcp_unit_get(unit); + atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); + erp_action = &unit->erp_action; + if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) + status = ZFCP_STATUS_ERP_CLOSE_ONLY; + break; - return retval; -} + case ZFCP_ERP_ACTION_REOPEN_PORT: + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + zfcp_port_get(port); + zfcp_erp_action_dismiss_port(port); + atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); + erp_action = &port->erp_action; + if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) + status = ZFCP_STATUS_ERP_CLOSE_ONLY; + break; -int zfcp_erp_port_shutdown(struct zfcp_port *port, int clear_mask, u8 id, - void *ref) -{ - int retval; + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + zfcp_adapter_get(adapter); + zfcp_erp_action_dismiss_adapter(adapter); + atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); + erp_action = &adapter->erp_action; + if (!(atomic_read(&adapter->status) & + ZFCP_STATUS_COMMON_RUNNING)) + status = ZFCP_STATUS_ERP_CLOSE_ONLY; + break; - retval = zfcp_erp_port_reopen(port, - ZFCP_STATUS_COMMON_RUNNING | - ZFCP_STATUS_COMMON_ERP_FAILED | - clear_mask, id, ref); + default: + return NULL; + } - return retval; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + erp_action->adapter = adapter; + erp_action->port = port; + erp_action->unit = unit; + erp_action->action = need; + erp_action->status = status; + + return erp_action; } -int zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask, u8 id, - void *ref) +static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct zfcp_unit *unit, u8 id, void *ref) { - int retval; + int retval = 1, need; + struct zfcp_erp_action *act = NULL; + + if (!(atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)) + return -EIO; - retval = zfcp_erp_unit_reopen(unit, - ZFCP_STATUS_COMMON_RUNNING | - ZFCP_STATUS_COMMON_ERP_FAILED | - clear_mask, id, ref); + need = zfcp_erp_required_act(want, adapter, port, unit); + if (!need) + goto out; + atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); + act = zfcp_erp_setup_act(need, adapter, port, unit); + if (!act) + goto out; + ++adapter->erp_total_count; + list_add_tail(&act->list, &adapter->erp_ready_head); + up(&adapter->erp_ready_sem); + zfcp_rec_dbf_event_thread(1, adapter); + retval = 0; + out: + zfcp_rec_dbf_event_trigger(id, ref, want, need, act, + adapter, port, unit); return retval; } - -/** - * zfcp_erp_adisc - send ADISC ELS command - * @port: port structure - */ -static int -zfcp_erp_adisc(struct zfcp_port *port) +static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, + int clear_mask, u8 id, void *ref) { - struct zfcp_adapter *adapter = port->adapter; - struct zfcp_send_els *send_els; - struct zfcp_ls_adisc *adisc; - void *address = NULL; - int retval = 0; - - send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC); - if (send_els == NULL) - goto nomem; - - send_els->req = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC); - if (send_els->req == NULL) - goto nomem; - sg_init_table(send_els->req, 1); - - send_els->resp = kmalloc(sizeof(struct scatterlist), GFP_ATOMIC); - if (send_els->resp == NULL) - goto nomem; - sg_init_table(send_els->resp, 1); - - address = (void *) get_zeroed_page(GFP_ATOMIC); - if (address == NULL) - goto nomem; - - zfcp_address_to_sg(address, send_els->req, sizeof(struct zfcp_ls_adisc)); - address += PAGE_SIZE >> 1; - zfcp_address_to_sg(address, send_els->resp, sizeof(struct zfcp_ls_adisc_acc)); - send_els->req_count = send_els->resp_count = 1; - - send_els->adapter = adapter; - send_els->port = port; - send_els->d_id = port->d_id; - send_els->handler = zfcp_erp_adisc_handler; - send_els->handler_data = (unsigned long) send_els; - - adisc = zfcp_sg_to_address(send_els->req); - send_els->ls_code = adisc->code = ZFCP_LS_ADISC; - - /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports - without FC-AL-2 capability, so we don't set it */ - adisc->wwpn = fc_host_port_name(adapter->scsi_host); - adisc->wwnn = fc_host_node_name(adapter->scsi_host); - adisc->nport_id = fc_host_port_id(adapter->scsi_host); - ZFCP_LOG_INFO("ADISC request from s_id 0x%06x to d_id 0x%06x " - "(wwpn=0x%016Lx, wwnn=0x%016Lx, " - "hard_nport_id=0x%06x, nport_id=0x%06x)\n", - adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn, - (wwn_t) adisc->wwnn, adisc->hard_nport_id, - adisc->nport_id); - - retval = zfcp_fsf_send_els(send_els); - if (retval != 0) { - ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port " - "0x%06x on adapter %s\n", send_els->d_id, - zfcp_get_busid_by_adapter(adapter)); - goto freemem; - } + zfcp_erp_adapter_block(adapter, clear_mask); - goto out; - - nomem: - retval = -ENOMEM; - freemem: - if (address != NULL) - __free_pages(sg_page(send_els->req), 0); - if (send_els != NULL) { - kfree(send_els->req); - kfree(send_els->resp); - kfree(send_els); + /* ensure propagation of failed status to new devices */ + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { + zfcp_erp_adapter_failed(adapter, 13, NULL); + return -EIO; } - out: - return retval; + return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, + adapter, NULL, NULL, id, ref); } - /** - * zfcp_erp_adisc_handler - handler for ADISC ELS command - * @data: pointer to struct zfcp_send_els - * - * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. + * zfcp_erp_adapter_reopen - Reopen adapter. + * @adapter: Adapter to reopen. + * @clear: Status flags to clear. + * @id: Id for debug trace event. + * @ref: Reference for debug trace event. */ -static void -zfcp_erp_adisc_handler(unsigned long data) +void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, + u8 id, void *ref) { - struct zfcp_send_els *send_els; - struct zfcp_port *port; - struct zfcp_adapter *adapter; - u32 d_id; - struct zfcp_ls_adisc_acc *adisc; - - send_els = (struct zfcp_send_els *) data; - adapter = send_els->adapter; - port = send_els->port; - d_id = send_els->d_id; - - /* request rejected or timed out */ - if (send_els->status != 0) { - ZFCP_LOG_NORMAL("ELS request rejected/timed out, " - "force physical port reopen " - "(adapter %s, port d_id=0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); - if (zfcp_erp_port_forced_reopen(port, 0, 63, NULL)) - ZFCP_LOG_NORMAL("failed reopen of port " - "(adapter %s, wwpn=0x%016Lx)\n", - zfcp_get_busid_by_port(port), - port->wwpn); - goto out; - } - - adisc = zfcp_sg_to_address(send_els->resp); - - ZFCP_LOG_INFO("ADISC response from d_id 0x%06x to s_id " - "0x%06x (wwpn=0x%016Lx, wwnn=0x%016Lx, " - "hard_nport_id=0x%06x, nport_id=0x%06x)\n", - d_id, fc_host_port_id(adapter->scsi_host), - (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn, - adisc->hard_nport_id, adisc->nport_id); - - /* set wwnn for port */ - if (port->wwnn == 0) - port->wwnn = adisc->wwnn; - - if (port->wwpn != adisc->wwpn) { - ZFCP_LOG_NORMAL("d_id assignment changed, reopening " - "port (adapter %s, wwpn=0x%016Lx, " - "adisc_resp_wwpn=0x%016Lx)\n", - zfcp_get_busid_by_port(port), - port->wwpn, (wwn_t) adisc->wwpn); - if (zfcp_erp_port_reopen(port, 0, 64, NULL)) - ZFCP_LOG_NORMAL("failed reopen of port " - "(adapter %s, wwpn=0x%016Lx)\n", - zfcp_get_busid_by_port(port), - port->wwpn); - } + unsigned long flags; - out: - zfcp_port_put(port); - __free_pages(sg_page(send_els->req), 0); - kfree(send_els->req); - kfree(send_els->resp); - kfree(send_els); + read_lock_irqsave(&zfcp_data.config_lock, flags); + write_lock(&adapter->erp_lock); + _zfcp_erp_adapter_reopen(adapter, clear, id, ref); + write_unlock(&adapter->erp_lock); + read_unlock_irqrestore(&zfcp_data.config_lock, flags); } - /** - * zfcp_test_link - lightweight link test procedure - * @port: port to be tested - * - * Test status of a link to a remote port using the ELS command ADISC. + * zfcp_erp_adapter_shutdown - Shutdown adapter. + * @adapter: Adapter to shut down. + * @clear: Status flags to clear. + * @id: Id for debug trace event. + * @ref: Reference for debug trace event. */ -int -zfcp_test_link(struct zfcp_port *port) +void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, + u8 id, void *ref) { - int retval; - - zfcp_port_get(port); - retval = zfcp_erp_adisc(port); - if (retval != 0 && retval != -EBUSY) { - zfcp_port_put(port); - ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx " - "on adapter %s\n ", port->wwpn, - zfcp_get_busid_by_port(port)); - retval = zfcp_erp_port_forced_reopen(port, 0, 65, NULL); - if (retval != 0) { - ZFCP_LOG_NORMAL("reopen of remote port 0x%016Lx " - "on adapter %s failed\n", port->wwpn, - zfcp_get_busid_by_port(port)); - retval = -EPERM; - } - } - - return retval; + int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; + zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref); } - -/* - * function: - * - * purpose: called if a port failed to be opened normally - * initiates Forced Reopen recovery which is done - * asynchronously - * - * returns: 0 - initiated action successfully - * <0 - failed to initiate action +/** + * zfcp_erp_port_shutdown - Shutdown port + * @port: Port to shut down. + * @clear: Status flags to clear. + * @id: Id for debug trace event. + * @ref: Reference for debug trace event. */ -static int zfcp_erp_port_forced_reopen_internal(struct zfcp_port *port, - int clear_mask, u8 id, - void *ref) +void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, u8 id, void *ref) { - int retval; + int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; + zfcp_erp_port_reopen(port, clear | flags, id, ref); +} - ZFCP_LOG_DEBUG("forced reopen of port 0x%016Lx on adapter %s\n", - port->wwpn, zfcp_get_busid_by_port(port)); +/** + * zfcp_erp_unit_shutdown - Shutdown unit + * @unit: Unit to shut down. + * @clear: Status flags to clear. + * @id: Id for debug trace event. + * @ref: Reference for debug trace event. + */ +void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, u8 id, void *ref) +{ + int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; + zfcp_erp_unit_reopen(unit, clear | flags, id, ref); +} - zfcp_erp_port_block(port, clear_mask); +static void zfcp_erp_port_block(struct zfcp_port *port, int clear) +{ + zfcp_erp_modify_port_status(port, 17, NULL, + ZFCP_STATUS_COMMON_UNBLOCKED | clear, + ZFCP_CLEAR); +} - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) { - ZFCP_LOG_DEBUG("skipped forced reopen of failed port 0x%016Lx " - "on adapter %s\n", port->wwpn, - zfcp_get_busid_by_port(port)); - retval = -EIO; - goto out; - } +static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, + int clear, u8 id, void *ref) +{ + zfcp_erp_port_block(port, clear); - retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, - port->adapter, port, NULL, id, ref); + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + return; - out: - return retval; + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, + port->adapter, port, NULL, id, ref); } -/* - * function: - * - * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal - * used to ensure the correct locking - * - * returns: 0 - initiated action successfully - * <0 - failed to initiate action +/** + * zfcp_erp_port_forced_reopen - Forced close of port and open again + * @port: Port to force close and to reopen. + * @id: Id for debug trace event. + * @ref: Reference for debug trace event. */ -int zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear_mask, u8 id, - void *ref) +void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, u8 id, + void *ref) { - int retval; unsigned long flags; - struct zfcp_adapter *adapter; + struct zfcp_adapter *adapter = port->adapter; - adapter = port->adapter; read_lock_irqsave(&zfcp_data.config_lock, flags); write_lock(&adapter->erp_lock); - retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask, id, - ref); + _zfcp_erp_port_forced_reopen(port, clear, id, ref); write_unlock(&adapter->erp_lock); read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - return retval; } -/* - * function: - * - * purpose: called if a port is to be opened - * initiates Reopen recovery which is done - * asynchronously - * - * returns: 0 - initiated action successfully - * <0 - failed to initiate action - */ -static int zfcp_erp_port_reopen_internal(struct zfcp_port *port, int clear_mask, - u8 id, void *ref) +static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, + void *ref) { - int retval; - - ZFCP_LOG_DEBUG("reopen of port 0x%016Lx on adapter %s\n", - port->wwpn, zfcp_get_busid_by_port(port)); + zfcp_erp_port_block(port, clear); - zfcp_erp_port_block(port, clear_mask); - - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) { - ZFCP_LOG_DEBUG("skipped reopen of failed port 0x%016Lx " - "on adapter %s\n", port->wwpn, - zfcp_get_busid_by_port(port)); + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { /* ensure propagation of failed status to new devices */ zfcp_erp_port_failed(port, 14, NULL); - retval = -EIO; - goto out; + return -EIO; } - retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, - port->adapter, port, NULL, id, ref); - - out: - return retval; + return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, + port->adapter, port, NULL, id, ref); } /** - * zfcp_erp_port_reopen - initiate reopen of a remote port - * @port: port to be reopened - * @clear_mask: specifies flags in port status to be cleared - * Return: 0 on success, < 0 on error + * zfcp_erp_port_reopen - trigger remote port recovery + * @port: port to recover + * @clear_mask: flags in port status to be cleared * - * This is a wrappper function for zfcp_erp_port_reopen_internal. It ensures - * correct locking. An error recovery task is initiated to do the reopen. - * To wait for the completion of the reopen zfcp_erp_wait should be used. + * Returns 0 if recovery has been triggered, < 0 if not. */ -int zfcp_erp_port_reopen(struct zfcp_port *port, int clear_mask, u8 id, - void *ref) +int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, u8 id, void *ref) { - int retval; unsigned long flags; + int retval; struct zfcp_adapter *adapter = port->adapter; read_lock_irqsave(&zfcp_data.config_lock, flags); write_lock(&adapter->erp_lock); - retval = zfcp_erp_port_reopen_internal(port, clear_mask, id, ref); + retval = _zfcp_erp_port_reopen(port, clear, id, ref); write_unlock(&adapter->erp_lock); read_unlock_irqrestore(&zfcp_data.config_lock, flags); return retval; } -/* - * function: - * - * purpose: called if a unit is to be opened - * initiates Reopen recovery which is done - * asynchronously - * - * returns: 0 - initiated action successfully - * <0 - failed to initiate action - */ -static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask, - u8 id, void *ref) +static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) { - int retval; - struct zfcp_adapter *adapter = unit->port->adapter; + zfcp_erp_modify_unit_status(unit, 19, NULL, + ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, + ZFCP_CLEAR); +} - ZFCP_LOG_DEBUG("reopen of unit 0x%016Lx on port 0x%016Lx " - "on adapter %s\n", unit->fcp_lun, - unit->port->wwpn, zfcp_get_busid_by_unit(unit)); +static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, + void *ref) +{ + struct zfcp_adapter *adapter = unit->port->adapter; - zfcp_erp_unit_block(unit, clear_mask); + zfcp_erp_unit_block(unit, clear); - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) { - ZFCP_LOG_DEBUG("skipped reopen of failed unit 0x%016Lx " - "on port 0x%016Lx on adapter %s\n", - unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - retval = -EIO; - goto out; - } + if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + return; - retval = zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, - adapter, unit->port, unit, id, ref); - out: - return retval; + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, + adapter, unit->port, unit, id, ref); } /** @@ -643,987 +408,182 @@ static int zfcp_erp_unit_reopen_internal(struct zfcp_unit *unit, int clear_mask, * @unit: unit to be reopened * @clear_mask: specifies flags in unit status to be cleared * Return: 0 on success, < 0 on error - * - * This is a wrappper for zfcp_erp_unit_reopen_internal. It ensures correct - * locking. An error recovery task is initiated to do the reopen. - * To wait for the completion of the reopen zfcp_erp_wait should be used. */ -int zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask, u8 id, - void *ref) +void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, u8 id, void *ref) { - int retval; unsigned long flags; - struct zfcp_adapter *adapter; - struct zfcp_port *port; - - port = unit->port; - adapter = port->adapter; + struct zfcp_port *port = unit->port; + struct zfcp_adapter *adapter = port->adapter; read_lock_irqsave(&zfcp_data.config_lock, flags); write_lock(&adapter->erp_lock); - retval = zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); + _zfcp_erp_unit_reopen(unit, clear, id, ref); write_unlock(&adapter->erp_lock); read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - return retval; } -/** - * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests - */ -static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) -{ - zfcp_erp_modify_adapter_status(adapter, 15, NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | - clear_mask, ZFCP_CLEAR); -} - -/* FIXME: isn't really atomic */ -/* - * returns the mask which has not been set so far, i.e. - * 0 if no bit has been changed, !0 if some bit has been changed - */ -static int atomic_test_and_set_mask(unsigned long mask, atomic_t *v) +static int status_change_set(unsigned long mask, atomic_t *status) { - int changed_bits = (atomic_read(v) /*XOR*/^ mask) & mask; - atomic_set_mask(mask, v); - return changed_bits; + return (atomic_read(status) ^ mask) & mask; } -/* FIXME: isn't really atomic */ -/* - * returns the mask which has not been cleared so far, i.e. - * 0 if no bit has been changed, !0 if some bit has been changed - */ -static int atomic_test_and_clear_mask(unsigned long mask, atomic_t *v) +static int status_change_clear(unsigned long mask, atomic_t *status) { - int changed_bits = atomic_read(v) & mask; - atomic_clear_mask(mask, v); - return changed_bits; + return atomic_read(status) & mask; } -/** - * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests - */ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { - if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &adapter->status)) + if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) zfcp_rec_dbf_event_adapter(16, NULL, adapter); + atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); } -/* - * function: - * - * purpose: disable I/O, - * return any open requests and clean them up, - * aim: no pending and incoming I/O - * - * returns: - */ -static void -zfcp_erp_port_block(struct zfcp_port *port, int clear_mask) -{ - zfcp_erp_modify_port_status(port, 17, NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, - ZFCP_CLEAR); -} - -/* - * function: - * - * purpose: enable I/O - * - * returns: - */ -static void -zfcp_erp_port_unblock(struct zfcp_port *port) +static void zfcp_erp_port_unblock(struct zfcp_port *port) { - if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &port->status)) + if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) zfcp_rec_dbf_event_port(18, NULL, port); + atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); } -/* - * function: - * - * purpose: disable I/O, - * return any open requests and clean them up, - * aim: no pending and incoming I/O - * - * returns: - */ -static void -zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) -{ - zfcp_erp_modify_unit_status(unit, 19, NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, - ZFCP_CLEAR); -} - -/* - * function: - * - * purpose: enable I/O - * - * returns: - */ -static void -zfcp_erp_unit_unblock(struct zfcp_unit *unit) +static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) { - if (atomic_test_and_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &unit->status)) + if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) zfcp_rec_dbf_event_unit(20, NULL, unit); + atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); } -static void -zfcp_erp_action_ready(struct zfcp_erp_action *erp_action) +static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) { - struct zfcp_adapter *adapter = erp_action->adapter; - - zfcp_erp_action_to_ready(erp_action); - up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread(2, adapter, 0); + list_move(&erp_action->list, &erp_action->adapter->erp_running_head); + zfcp_rec_dbf_event_action(145, erp_action); } -/* - * function: - * - * purpose: - * - * returns: <0 erp_action not found in any list - * ZFCP_ERP_ACTION_READY erp_action is in ready list - * ZFCP_ERP_ACTION_RUNNING erp_action is in running list - * - * locks: erp_lock must be held - */ -static int -zfcp_erp_action_exists(struct zfcp_erp_action *erp_action) +static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) { - int retval = -EINVAL; - struct list_head *entry; - struct zfcp_erp_action *entry_erp_action; - struct zfcp_adapter *adapter = erp_action->adapter; - - /* search in running list */ - list_for_each(entry, &adapter->erp_running_head) { - entry_erp_action = - list_entry(entry, struct zfcp_erp_action, list); - if (entry_erp_action == erp_action) { - retval = ZFCP_ERP_ACTION_RUNNING; - goto out; - } - } - /* search in ready list */ - list_for_each(entry, &adapter->erp_ready_head) { - entry_erp_action = - list_entry(entry, struct zfcp_erp_action, list); - if (entry_erp_action == erp_action) { - retval = ZFCP_ERP_ACTION_READY; - goto out; - } - } + struct zfcp_adapter *adapter = act->adapter; - out: - return retval; -} - -/* - * purpose: checks current status of action (timed out, dismissed, ...) - * and does appropriate preparations (dismiss fsf request, ...) - * - * locks: called under erp_lock (disabled interrupts) - */ -static void -zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) -{ - struct zfcp_adapter *adapter = erp_action->adapter; + if (!act->fsf_req) + return; - if (erp_action->fsf_req) { - /* take lock to ensure that request is not deleted meanwhile */ - spin_lock(&adapter->req_list_lock); - if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) && - erp_action->fsf_req->erp_action == erp_action) { - /* fsf_req still exists */ - /* dismiss fsf_req of timed out/dismissed erp_action */ - if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | - ZFCP_STATUS_ERP_TIMEDOUT)) { - erp_action->fsf_req->status |= - ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_rec_dbf_event_action(142, erp_action); - } - if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { - zfcp_rec_dbf_event_action(143, erp_action); - ZFCP_LOG_NORMAL("error: erp step timed out " - "(action=%d, fsf_req=%p)\n ", - erp_action->action, - erp_action->fsf_req); - } - /* - * If fsf_req is neither dismissed nor completed - * then keep it running asynchronously and don't mess - * with the association of erp_action and fsf_req. - */ - if (erp_action->fsf_req->status & - (ZFCP_STATUS_FSFREQ_COMPLETED | - ZFCP_STATUS_FSFREQ_DISMISSED)) { - /* forget about association between fsf_req - and erp_action */ - erp_action->fsf_req = NULL; - } - } else { - /* - * even if this fsf_req has gone, forget about - * association between erp_action and fsf_req - */ - erp_action->fsf_req = NULL; + spin_lock(&adapter->req_list_lock); + if (zfcp_reqlist_find_safe(adapter, act->fsf_req) && + act->fsf_req->erp_action == act) { + if (act->status & (ZFCP_STATUS_ERP_DISMISSED | + ZFCP_STATUS_ERP_TIMEDOUT)) { + act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; + zfcp_rec_dbf_event_action(142, act); } - spin_unlock(&adapter->req_list_lock); - } + if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) + zfcp_rec_dbf_event_action(143, act); + if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED | + ZFCP_STATUS_FSFREQ_DISMISSED)) + act->fsf_req = NULL; + } else + act->fsf_req = NULL; + spin_unlock(&adapter->req_list_lock); } /** - * zfcp_erp_async_handler_nolock - complete erp_action - * - * Used for normal completion, time-out, dismissal and failure after - * low memory condition. + * zfcp_erp_notify - Trigger ERP action. + * @erp_action: ERP action to continue. + * @set_mask: ERP action status flags to set. */ -static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, - unsigned long set_mask) -{ - if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { - erp_action->status |= set_mask; - zfcp_erp_action_ready(erp_action); - } else { - /* action is ready or gone - nothing to do */ - } -} - -/** - * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking - */ -void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) { struct zfcp_adapter *adapter = erp_action->adapter; unsigned long flags; write_lock_irqsave(&adapter->erp_lock, flags); - zfcp_erp_async_handler_nolock(erp_action, set_mask); - write_unlock_irqrestore(&adapter->erp_lock, flags); -} - -/* - * purpose: is called for erp_action which was slept waiting for - * memory becoming avaliable, - * will trigger that this action will be continued - */ -static void -zfcp_erp_memwait_handler(unsigned long data) -{ - struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; - - zfcp_erp_async_handler(erp_action, 0); -} - -/* - * purpose: is called if an asynchronous erp step timed out, - * action gets an appropriate flag and will be processed - * accordingly - */ -static void zfcp_erp_timeout_handler(unsigned long data) -{ - struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; - - zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); -} - -/** - * zfcp_erp_action_dismiss - dismiss an erp_action - * - * adapter->erp_lock must be held - * - * Dismissal of an erp_action is usually required if an erp_action of - * higher priority is generated. - */ -static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) -{ - erp_action->status |= ZFCP_STATUS_ERP_DISMISSED; - if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) + if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { + erp_action->status |= set_mask; zfcp_erp_action_ready(erp_action); -} - -int -zfcp_erp_thread_setup(struct zfcp_adapter *adapter) -{ - int retval = 0; - - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - - retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); - if (retval < 0) { - ZFCP_LOG_NORMAL("error: creation of erp thread failed for " - "adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - } else { - wait_event(adapter->erp_thread_wqh, - atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, - &adapter->status)); } - - return (retval < 0); -} - -/* - * function: - * - * purpose: - * - * returns: - * - * context: process (i.e. proc-fs or rmmod/insmod) - * - * note: The caller of this routine ensures that the specified - * adapter has been shut down and that this operation - * has been completed. Thus, there are no pending erp_actions - * which would need to be handled here. - */ -int -zfcp_erp_thread_kill(struct zfcp_adapter *adapter) -{ - int retval = 0; - - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); - up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread(2, adapter, 1); - - wait_event(adapter->erp_thread_wqh, - !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, - &adapter->status)); - - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, - &adapter->status); - - return retval; -} - -/* - * purpose: is run as a kernel thread, - * goes through list of error recovery actions of associated adapter - * and delegates single action to execution - * - * returns: 0 - */ -static int -zfcp_erp_thread(void *data) -{ - struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; - struct list_head *next; - struct zfcp_erp_action *erp_action; - unsigned long flags; - - daemonize("zfcperp%s", zfcp_get_busid_by_adapter(adapter)); - /* Block all signals */ - siginitsetinv(¤t->blocked, 0); - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - wake_up(&adapter->erp_thread_wqh); - - while (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, - &adapter->status)) { - - write_lock_irqsave(&adapter->erp_lock, flags); - next = adapter->erp_ready_head.next; - write_unlock_irqrestore(&adapter->erp_lock, flags); - - if (next != &adapter->erp_ready_head) { - erp_action = - list_entry(next, struct zfcp_erp_action, list); - /* - * process action (incl. [re]moving it - * from 'ready' queue) - */ - zfcp_erp_strategy(erp_action); - } - - /* - * sleep as long as there is nothing to do, i.e. - * no action in 'ready' queue to be processed and - * thread is not to be killed - */ - zfcp_rec_dbf_event_thread(4, adapter, 1); - down_interruptible(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread(5, adapter, 1); - } - - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - wake_up(&adapter->erp_thread_wqh); - - return 0; -} - -/* - * function: - * - * purpose: drives single error recovery action and schedules higher and - * subordinate actions, if necessary - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_SUCCEEDED - action finished successfully (deqd) - * ZFCP_ERP_FAILED - action finished unsuccessfully (deqd) - * ZFCP_ERP_EXIT - action finished (dequeued), offline - * ZFCP_ERP_DISMISSED - action canceled (dequeued) - */ -static int -zfcp_erp_strategy(struct zfcp_erp_action *erp_action) -{ - int retval = 0; - struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_port *port = erp_action->port; - struct zfcp_unit *unit = erp_action->unit; - int action = erp_action->action; - u32 status = erp_action->status; - unsigned long flags; - - /* serialise dismissing, timing out, moving, enqueueing */ - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); - - /* dequeue dismissed action and leave, if required */ - retval = zfcp_erp_strategy_check_action(erp_action, retval); - if (retval == ZFCP_ERP_DISMISSED) { - goto unlock; - } - - /* - * move action to 'running' queue before processing it - * (to avoid a race condition regarding moving the - * action to the 'running' queue and back) - */ - zfcp_erp_action_to_running(erp_action); - - /* - * try to process action as far as possible, - * no lock to allow for blocking operations (kmalloc, qdio, ...), - * afterwards the lock is required again for the following reasons: - * - dequeueing of finished action and enqueueing of - * follow-up actions must be atomic so that any other - * reopen-routine does not believe there is nothing to do - * and that it is safe to enqueue something else, - * - we want to force any control thread which is dismissing - * actions to finish this before we decide about - * necessary steps to be taken here further - */ - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - retval = zfcp_erp_strategy_do_action(erp_action); - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); - - /* - * check for dismissed status again to avoid follow-up actions, - * failing of targets and so on for dismissed actions, - * we go through down() here because there has been an up() - */ - if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) - retval = ZFCP_ERP_CONTINUES; - - switch (retval) { - case ZFCP_ERP_NOMEM: - /* no memory to continue immediately, let it sleep */ - if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) { - ++adapter->erp_low_mem_count; - erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; - } - /* This condition is true if there is no memory available - for any erp_action on this adapter. This implies that there - are no elements in the memory pool(s) left for erp_actions. - This might happen if an erp_action that used a memory pool - element was timed out. - */ - if (adapter->erp_total_count == adapter->erp_low_mem_count) { - ZFCP_LOG_NORMAL("error: no mempool elements available, " - "restarting I/O on adapter %s " - "to free mempool\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_reopen_internal(adapter, 0, 66, NULL); - } else { - retval = zfcp_erp_strategy_memwait(erp_action); - } - goto unlock; - case ZFCP_ERP_CONTINUES: - /* leave since this action runs asynchronously */ - if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { - --adapter->erp_low_mem_count; - erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; - } - goto unlock; - } - /* ok, finished action (whatever its result is) */ - - /* check for unrecoverable targets */ - retval = zfcp_erp_strategy_check_target(erp_action, retval); - - /* action must be dequeued (here to allow for further ones) */ - zfcp_erp_action_dequeue(erp_action); - - /* - * put this target through the erp mill again if someone has - * requested to change the status of a target being online - * to offline or the other way around - * (old retval is preserved if nothing has to be done here) - */ - retval = zfcp_erp_strategy_statechange(action, status, adapter, - port, unit, retval); - - /* - * leave if target is in permanent error state or if - * action is repeated in order to process state change - */ - if (retval == ZFCP_ERP_EXIT) { - goto unlock; - } - - /* trigger follow up actions */ - zfcp_erp_strategy_followup_actions(action, adapter, port, unit, retval); - - unlock: - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - if (retval != ZFCP_ERP_CONTINUES) - zfcp_erp_action_cleanup(action, adapter, port, unit, retval); - - /* - * a few tasks remain when the erp queues are empty - * (don't do that if the last action evaluated was dismissed - * since this clearly indicates that there is more to come) : - * - close the name server port if it is open yet - * (enqueues another [probably] final action) - * - otherwise, wake up whoever wants to be woken when we are - * done with erp - */ - if (retval != ZFCP_ERP_DISMISSED) - zfcp_erp_strategy_check_queues(adapter); - - return retval; + write_unlock_irqrestore(&adapter->erp_lock, flags); } -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_DISMISSED - if action has been dismissed - * retval - otherwise +/** + * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request + * @data: ERP action (from timer data) */ -static int -zfcp_erp_strategy_check_action(struct zfcp_erp_action *erp_action, int retval) +void zfcp_erp_timeout_handler(unsigned long data) { - zfcp_erp_strategy_check_fsfreq(erp_action); - - if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { - zfcp_erp_action_dequeue(erp_action); - retval = ZFCP_ERP_DISMISSED; - } - - return retval; + struct zfcp_erp_action *act = (struct zfcp_erp_action *) data; + zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT); } -static int -zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) +static void zfcp_erp_memwait_handler(unsigned long data) { - int retval = ZFCP_ERP_FAILED; - - /* - * try to execute/continue action as far as possible, - * note: no lock in subsequent strategy routines - * (this allows these routine to call schedule, e.g. - * kmalloc with such flags or qdio_initialize & friends) - * Note: in case of timeout, the separate strategies will fail - * anyhow. No need for a special action. Even worse, a nameserver - * failure would not wake up waiting ports without the call. - */ - switch (erp_action->action) { - - case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - retval = zfcp_erp_adapter_strategy(erp_action); - break; - - case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - retval = zfcp_erp_port_forced_strategy(erp_action); - break; - - case ZFCP_ERP_ACTION_REOPEN_PORT: - retval = zfcp_erp_port_strategy(erp_action); - break; - - case ZFCP_ERP_ACTION_REOPEN_UNIT: - retval = zfcp_erp_unit_strategy(erp_action); - break; - - default: - ZFCP_LOG_NORMAL("bug: unknown erp action requested on " - "adapter %s (action=%d)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->action); - } - - return retval; + zfcp_erp_notify((struct zfcp_erp_action *)data, 0); } -/* - * function: - * - * purpose: triggers retry of this action after a certain amount of time - * by means of timer provided by erp_action - * - * returns: ZFCP_ERP_CONTINUES - erp_action sleeps in erp running queue - */ -static int -zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) +static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_CONTINUES; - init_timer(&erp_action->timer); erp_action->timer.function = zfcp_erp_memwait_handler; erp_action->timer.data = (unsigned long) erp_action; - erp_action->timer.expires = jiffies + ZFCP_ERP_MEMWAIT_TIMEOUT; + erp_action->timer.expires = jiffies + HZ; add_timer(&erp_action->timer); - - return retval; } -/* - * function: zfcp_erp_adapter_failed - * - * purpose: sets the adapter and all underlying devices to ERP_FAILED - * - */ -void -zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref) -{ - zfcp_erp_modify_adapter_status(adapter, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); - ZFCP_LOG_NORMAL("adapter erp failed on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); -} - -/* - * function: zfcp_erp_port_failed - * - * purpose: sets the port and all underlying devices to ERP_FAILED - * - */ -void -zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref) -{ - zfcp_erp_modify_port_status(port, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); - - if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) - ZFCP_LOG_NORMAL("port erp failed (adapter %s, " - "port d_id=0x%06x)\n", - zfcp_get_busid_by_port(port), port->d_id); - else - ZFCP_LOG_NORMAL("port erp failed (adapter %s, wwpn=0x%016Lx)\n", - zfcp_get_busid_by_port(port), port->wwpn); -} - -/* - * function: zfcp_erp_unit_failed - * - * purpose: sets the unit to ERP_FAILED - * - */ -void -zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref) -{ - zfcp_erp_modify_unit_status(unit, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); - - ZFCP_LOG_NORMAL("unit erp failed on unit 0x%016Lx on port 0x%016Lx " - " on adapter %s\n", unit->fcp_lun, - unit->port->wwpn, zfcp_get_busid_by_unit(unit)); -} - -/* - * function: zfcp_erp_strategy_check_target - * - * purpose: increments the erp action count on the device currently in - * recovery if the action failed or resets the count in case of - * success. If a maximum count is exceeded the device is marked - * as ERP_FAILED. - * The 'blocked' state of a target which has been recovered - * successfully is reset. - * - * returns: ZFCP_ERP_CONTINUES - action continues (not considered) - * ZFCP_ERP_SUCCEEDED - action finished successfully - * ZFCP_ERP_EXIT - action failed and will not continue - */ -static int -zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, int result) -{ - struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_port *port = erp_action->port; - struct zfcp_unit *unit = erp_action->unit; - - switch (erp_action->action) { - - case ZFCP_ERP_ACTION_REOPEN_UNIT: - result = zfcp_erp_strategy_check_unit(unit, result); - break; - - case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - case ZFCP_ERP_ACTION_REOPEN_PORT: - result = zfcp_erp_strategy_check_port(port, result); - break; - - case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - result = zfcp_erp_strategy_check_adapter(adapter, result); - break; - } - - return result; -} - -static int -zfcp_erp_strategy_statechange(int action, - u32 status, - struct zfcp_adapter *adapter, - struct zfcp_port *port, - struct zfcp_unit *unit, int retval) -{ - switch (action) { - - case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - if (zfcp_erp_strategy_statechange_detected(&adapter->status, - status)) { - zfcp_erp_adapter_reopen_internal(adapter, - ZFCP_STATUS_COMMON_ERP_FAILED, - 67, NULL); - retval = ZFCP_ERP_EXIT; - } - break; - - case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - case ZFCP_ERP_ACTION_REOPEN_PORT: - if (zfcp_erp_strategy_statechange_detected(&port->status, - status)) { - zfcp_erp_port_reopen_internal(port, - ZFCP_STATUS_COMMON_ERP_FAILED, - 68, NULL); - retval = ZFCP_ERP_EXIT; - } - break; - - case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (zfcp_erp_strategy_statechange_detected(&unit->status, - status)) { - zfcp_erp_unit_reopen_internal(unit, - ZFCP_STATUS_COMMON_ERP_FAILED, - 69, NULL); - retval = ZFCP_ERP_EXIT; - } - break; - } - - return retval; -} - -static int -zfcp_erp_strategy_statechange_detected(atomic_t * target_status, u32 erp_status) +static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, + int clear, u8 id, void *ref) { - return - /* take it online */ - (atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) && - (ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)) || - /* take it offline */ - (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) && - !(ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)); -} - -static int -zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) -{ - switch (result) { - case ZFCP_ERP_SUCCEEDED : - atomic_set(&unit->erp_counter, 0); - zfcp_erp_unit_unblock(unit); - break; - case ZFCP_ERP_FAILED : - atomic_inc(&unit->erp_counter); - if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) - zfcp_erp_unit_failed(unit, 21, NULL); - break; - case ZFCP_ERP_EXIT : - /* nothing */ - break; - } - - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) { - zfcp_erp_unit_block(unit, 0); /* for ZFCP_ERP_SUCCEEDED */ - result = ZFCP_ERP_EXIT; - } - - return result; -} - -static int -zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) -{ - switch (result) { - case ZFCP_ERP_SUCCEEDED : - atomic_set(&port->erp_counter, 0); - zfcp_erp_port_unblock(port); - break; - case ZFCP_ERP_FAILED : - atomic_inc(&port->erp_counter); - if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) - zfcp_erp_port_failed(port, 22, NULL); - break; - case ZFCP_ERP_EXIT : - /* nothing */ - break; - } - - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) { - zfcp_erp_port_block(port, 0); /* for ZFCP_ERP_SUCCEEDED */ - result = ZFCP_ERP_EXIT; - } + struct zfcp_port *port; - return result; + list_for_each_entry(port, &adapter->port_list_head, list) + if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)) + _zfcp_erp_port_reopen(port, clear, id, ref); } -static int -zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, int result) +static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id, + void *ref) { - switch (result) { - case ZFCP_ERP_SUCCEEDED : - atomic_set(&adapter->erp_counter, 0); - zfcp_erp_adapter_unblock(adapter); - break; - case ZFCP_ERP_FAILED : - atomic_inc(&adapter->erp_counter); - if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) - zfcp_erp_adapter_failed(adapter, 23, NULL); - break; - case ZFCP_ERP_EXIT : - /* nothing */ - break; - } - - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) { - zfcp_erp_adapter_block(adapter, 0); /* for ZFCP_ERP_SUCCEEDED */ - result = ZFCP_ERP_EXIT; - } - - return result; -} - -struct zfcp_erp_add_work { - struct zfcp_unit *unit; - struct work_struct work; -}; + struct zfcp_unit *unit; -/** - * zfcp_erp_scsi_scan - * @data: pointer to a struct zfcp_erp_add_work - * - * Registers a logical unit with the SCSI stack. - */ -static void zfcp_erp_scsi_scan(struct work_struct *work) -{ - struct zfcp_erp_add_work *p = - container_of(work, struct zfcp_erp_add_work, work); - struct zfcp_unit *unit = p->unit; - struct fc_rport *rport = unit->port->rport; - scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, - unit->scsi_lun, 0); - atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); - zfcp_unit_put(unit); - kfree(p); + list_for_each_entry(unit, &port->unit_list_head, list) + _zfcp_erp_unit_reopen(unit, clear, id, ref); } -/** - * zfcp_erp_schedule_work - * @unit: pointer to unit which should be registered with SCSI stack - * - * Schedules work which registers a unit with the SCSI stack - */ -static void -zfcp_erp_schedule_work(struct zfcp_unit *unit) +static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act) { - struct zfcp_erp_add_work *p; + struct zfcp_adapter *adapter = act->adapter; + struct zfcp_port *port = act->port; + struct zfcp_unit *unit = act->unit; + u32 status = act->status; - p = kzalloc(sizeof(*p), GFP_KERNEL); - if (!p) { - ZFCP_LOG_NORMAL("error: Out of resources. Could not register " - "the FCP-LUN 0x%Lx connected to " - "the port with WWPN 0x%Lx connected to " - "the adapter %s with the SCSI stack.\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - return; - } - - zfcp_unit_get(unit); - atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); - INIT_WORK(&p->work, zfcp_erp_scsi_scan); - p->unit = unit; - schedule_work(&p->work); -} - -/* - * function: - * - * purpose: remaining things in good cases, - * escalation in bad cases - * - * returns: - */ -static int -zfcp_erp_strategy_followup_actions(int action, - struct zfcp_adapter *adapter, - struct zfcp_port *port, - struct zfcp_unit *unit, int status) -{ /* initiate follow-up actions depending on success of finished action */ - switch (action) { + switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_ADAPTER: if (status == ZFCP_ERP_SUCCEEDED) - zfcp_erp_port_reopen_all_internal(adapter, 0, 70, NULL); + _zfcp_erp_port_reopen_all(adapter, 0, 70, NULL); else - zfcp_erp_adapter_reopen_internal(adapter, 0, 71, NULL); + _zfcp_erp_adapter_reopen(adapter, 0, 71, NULL); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: if (status == ZFCP_ERP_SUCCEEDED) - zfcp_erp_port_reopen_internal(port, 0, 72, NULL); + _zfcp_erp_port_reopen(port, 0, 72, NULL); else - zfcp_erp_adapter_reopen_internal(adapter, 0, 73, NULL); + _zfcp_erp_adapter_reopen(adapter, 0, 73, NULL); break; case ZFCP_ERP_ACTION_REOPEN_PORT: if (status == ZFCP_ERP_SUCCEEDED) - zfcp_erp_unit_reopen_all_internal(port, 0, 74, NULL); + _zfcp_erp_unit_reopen_all(port, 0, 74, NULL); else - zfcp_erp_port_forced_reopen_internal(port, 0, 75, NULL); + _zfcp_erp_port_forced_reopen(port, 0, 75, NULL); break; case ZFCP_ERP_ACTION_REOPEN_UNIT: - /* Nothing to do if status == ZFCP_ERP_SUCCEEDED */ if (status != ZFCP_ERP_SUCCEEDED) - zfcp_erp_port_reopen_internal(unit->port, 0, 76, NULL); + _zfcp_erp_port_reopen(unit->port, 0, 76, NULL); break; } - - return 0; } -static int -zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter) +static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) { unsigned long flags; @@ -1637,1277 +597,622 @@ zfcp_erp_strategy_check_queues(struct zfcp_adapter *adapter) } read_unlock(&adapter->erp_lock); read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - return 0; } -/** - * zfcp_erp_wait - wait for completion of error recovery on an adapter - * @adapter: adapter for which to wait for completion of its error recovery - * Return: 0 - */ -int -zfcp_erp_wait(struct zfcp_adapter *adapter) +static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) { - int retval = 0; - - wait_event(adapter->erp_done_wqh, - !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, - &adapter->status)); - - return retval; + if (zfcp_qdio_open(act->adapter)) + return ZFCP_ERP_FAILED; + init_waitqueue_head(&act->adapter->request_wq); + atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); + return ZFCP_ERP_SUCCEEDED; } -void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id, - void *ref, u32 mask, int set_or_clear) +static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) { struct zfcp_port *port; - u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; - - if (set_or_clear == ZFCP_SET) { - changed = atomic_test_and_set_mask(mask, &adapter->status); - } else { - changed = atomic_test_and_clear_mask(mask, &adapter->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) - atomic_set(&adapter->erp_counter, 0); - } - if (changed) - zfcp_rec_dbf_event_adapter(id, ref, adapter); - - /* Deal with all underlying devices, only pass common_mask */ - if (common_mask) - list_for_each_entry(port, &adapter->port_list_head, list) - zfcp_erp_modify_port_status(port, id, ref, common_mask, - set_or_clear); + port = zfcp_port_enqueue(adapter, adapter->peer_wwpn, 0, + adapter->peer_d_id); + if (IS_ERR(port)) /* error or port already attached */ + return; + _zfcp_erp_port_reopen(port, 0, 150, NULL); } -/* - * function: zfcp_erp_modify_port_status - * - * purpose: sets the port and all underlying devices to ERP_FAILED - * - */ -void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref, - u32 mask, int set_or_clear) +static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) { - struct zfcp_unit *unit; - u32 changed, common_mask = mask & ZFCP_COMMON_FLAGS; - - if (set_or_clear == ZFCP_SET) { - changed = atomic_test_and_set_mask(mask, &port->status); - } else { - changed = atomic_test_and_clear_mask(mask, &port->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) - atomic_set(&port->erp_counter, 0); - } - if (changed) - zfcp_rec_dbf_event_port(id, ref, port); - - /* Modify status of all underlying devices, only pass common mask */ - if (common_mask) - list_for_each_entry(unit, &port->unit_list_head, list) - zfcp_erp_modify_unit_status(unit, id, ref, common_mask, - set_or_clear); -} + int retries; + int sleep = 1; + struct zfcp_adapter *adapter = erp_action->adapter; -/* - * function: zfcp_erp_modify_unit_status - * - * purpose: sets the unit to ERP_FAILED - * - */ -void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref, - u32 mask, int set_or_clear) -{ - u32 changed; + atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); - if (set_or_clear == ZFCP_SET) { - changed = atomic_test_and_set_mask(mask, &unit->status); - } else { - changed = atomic_test_and_clear_mask(mask, &unit->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { - atomic_set(&unit->erp_counter, 0); + for (retries = 7; retries; retries--) { + atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + &adapter->status); + write_lock_irq(&adapter->erp_lock); + zfcp_erp_action_to_running(erp_action); + write_unlock_irq(&adapter->erp_lock); + if (zfcp_fsf_exchange_config_data(erp_action)) { + atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + &adapter->status); + return ZFCP_ERP_FAILED; } - } - if (changed) - zfcp_rec_dbf_event_unit(id, ref, unit); -} -/* - * function: - * - * purpose: Wrappper for zfcp_erp_port_reopen_all_internal - * used to ensure the correct locking - * - * returns: 0 - initiated action successfully - * <0 - failed to initiate action - */ -int zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, int clear_mask, - u8 id, void *ref) -{ - int retval; - unsigned long flags; - - read_lock_irqsave(&zfcp_data.config_lock, flags); - write_lock(&adapter->erp_lock); - retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask, id, - ref); - write_unlock(&adapter->erp_lock); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - - return retval; -} + zfcp_rec_dbf_event_thread_lock(6, adapter); + down(&adapter->erp_ready_sem); + zfcp_rec_dbf_event_thread_lock(7, adapter); + if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) + break; -static int zfcp_erp_port_reopen_all_internal(struct zfcp_adapter *adapter, - int clear_mask, u8 id, void *ref) -{ - int retval = 0; - struct zfcp_port *port; + if (!(atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_HOST_CON_INIT)) + break; - list_for_each_entry(port, &adapter->port_list_head, list) - if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) - zfcp_erp_port_reopen_internal(port, clear_mask, id, - ref); + ssleep(sleep); + sleep *= 2; + } - return retval; -} + atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + &adapter->status); -/* - * function: - * - * purpose: - * - * returns: FIXME - */ -static int zfcp_erp_unit_reopen_all_internal(struct zfcp_port *port, - int clear_mask, u8 id, void *ref) -{ - int retval = 0; - struct zfcp_unit *unit; + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK)) + return ZFCP_ERP_FAILED; - list_for_each_entry(unit, &port->unit_list_head, list) - zfcp_erp_unit_reopen_internal(unit, clear_mask, id, ref); + if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) + zfcp_erp_enqueue_ptp_port(adapter); - return retval; + return ZFCP_ERP_SUCCEEDED; } -/* - * function: - * - * purpose: this routine executes the 'Reopen Adapter' action - * (the entire action is processed synchronously, since - * there are no actions which might be run concurrently - * per definition) - * - * returns: ZFCP_ERP_SUCCEEDED - action finished successfully - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action) +static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) { - int retval; - struct zfcp_adapter *adapter = erp_action->adapter; - - retval = zfcp_erp_adapter_strategy_close(erp_action); - if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) - retval = ZFCP_ERP_EXIT; - else - retval = zfcp_erp_adapter_strategy_open(erp_action); + int ret; + struct zfcp_adapter *adapter = act->adapter; - if (retval == ZFCP_ERP_FAILED) { - ZFCP_LOG_INFO("Waiting to allow the adapter %s " - "to recover itself\n", - zfcp_get_busid_by_adapter(adapter)); - ssleep(ZFCP_TYPE2_RECOVERY_TIME); - } + atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); - return retval; -} + write_lock_irq(&adapter->erp_lock); + zfcp_erp_action_to_running(act); + write_unlock_irq(&adapter->erp_lock); -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_SUCCEEDED - action finished successfully - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *erp_action) -{ - int retval; + ret = zfcp_fsf_exchange_port_data(act); + if (ret == -EOPNOTSUPP) + return ZFCP_ERP_SUCCEEDED; + if (ret) + return ZFCP_ERP_FAILED; - atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, - &erp_action->adapter->status); - retval = zfcp_erp_adapter_strategy_generic(erp_action, 1); - atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, - &erp_action->adapter->status); + zfcp_rec_dbf_event_thread_lock(8, adapter); + down(&adapter->erp_ready_sem); + zfcp_rec_dbf_event_thread_lock(9, adapter); + if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) + return ZFCP_ERP_FAILED; - return retval; + return ZFCP_ERP_SUCCEEDED; } -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_SUCCEEDED - action finished successfully - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *erp_action) +static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) { - int retval; + if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED) + return ZFCP_ERP_FAILED; - atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, - &erp_action->adapter->status); - retval = zfcp_erp_adapter_strategy_generic(erp_action, 0); - atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, - &erp_action->adapter->status); + if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) + return ZFCP_ERP_FAILED; - return retval; + atomic_set(&act->adapter->stat_miss, 16); + if (zfcp_status_read_refill(act->adapter)) + return ZFCP_ERP_FAILED; + + return ZFCP_ERP_SUCCEEDED; } -/* - * function: zfcp_register_adapter - * - * purpose: allocate the irq associated with this devno and register - * the FSF adapter with the SCSI stack - * - * returns: - */ -static int -zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close) +static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act, + int close) { int retval = ZFCP_ERP_SUCCEEDED; + struct zfcp_adapter *adapter = act->adapter; if (close) goto close_only; - retval = zfcp_erp_adapter_strategy_open_qdio(erp_action); + retval = zfcp_erp_adapter_strategy_open_qdio(act); if (retval != ZFCP_ERP_SUCCEEDED) goto failed_qdio; - retval = zfcp_erp_adapter_strategy_open_fsf(erp_action); + retval = zfcp_erp_adapter_strategy_open_fsf(act); if (retval != ZFCP_ERP_SUCCEEDED) goto failed_openfcp; - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &erp_action->adapter->status); - goto out; + atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status); + schedule_work(&act->adapter->scan_work); + + return ZFCP_ERP_SUCCEEDED; close_only: atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, - &erp_action->adapter->status); + &act->adapter->status); failed_openfcp: - zfcp_close_fsf(erp_action->adapter); + /* close queues to ensure that buffers are not accessed by adapter */ + zfcp_qdio_close(adapter); + zfcp_fsf_req_dismiss_all(adapter); + adapter->fsf_req_seq_no = 0; + /* all ports and units are closed */ + zfcp_erp_modify_adapter_status(adapter, 24, NULL, + ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); failed_qdio: atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_ADAPTER_XPORT_OK, - &erp_action->adapter->status); - out: + &act->adapter->status); return retval; } -/* - * function: zfcp_qdio_init - * - * purpose: setup QDIO operation for specified adapter - * - * returns: 0 - successful setup - * !0 - failed setup - */ -static int -zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) +static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act) { int retval; - int i; - volatile struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; - - if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) { - ZFCP_LOG_NORMAL("bug: second attempt to set up QDIO on " - "adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - goto failed_sanity; - } - - if (qdio_establish(&adapter->qdio_init_data) != 0) { - ZFCP_LOG_INFO("error: establishment of QDIO queues failed " - "on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - goto failed_qdio_establish; - } - - if (qdio_activate(adapter->ccw_device, 0) != 0) { - ZFCP_LOG_INFO("error: activation of QDIO queues failed " - "on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - goto failed_qdio_activate; - } - - /* - * put buffers into response queue, - */ - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { - sbale = &(adapter->response_queue.buffer[i]->element[0]); - sbale->length = 0; - sbale->flags = SBAL_FLAGS_LAST_ENTRY; - sbale->addr = NULL; - } - - ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, " - "queue_no=%i, index_in_queue=%i, count=%i)\n", - zfcp_get_busid_by_adapter(adapter), - QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q); - - retval = do_QDIO(adapter->ccw_device, - QDIO_FLAG_SYNC_INPUT, - 0, 0, QDIO_MAX_BUFFERS_PER_Q, NULL); - - if (retval) { - ZFCP_LOG_NORMAL("bug: setup of QDIO failed (retval=%d)\n", - retval); - goto failed_do_qdio; - } else { - adapter->response_queue.free_index = 0; - atomic_set(&adapter->response_queue.free_count, 0); - ZFCP_LOG_DEBUG("%i buffers successfully enqueued to " - "response queue\n", QDIO_MAX_BUFFERS_PER_Q); - } - /* set index of first avalable SBALS / number of available SBALS */ - adapter->request_queue.free_index = 0; - atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q); - adapter->request_queue.distance_from_int = 0; - - /* initialize waitqueue used to wait for free SBALs in requests queue */ - init_waitqueue_head(&adapter->request_wq); - /* ok, we did it - skip all cleanups for different failures */ - atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); - retval = ZFCP_ERP_SUCCEEDED; - goto out; + atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status); + zfcp_erp_adapter_strategy_generic(act, 1); /* close */ + atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status); + if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY) + return ZFCP_ERP_EXIT; - failed_do_qdio: - /* NOP */ + atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status); + retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */ + atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status); - failed_qdio_activate: - while (qdio_shutdown(adapter->ccw_device, - QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) - ssleep(1); - - failed_qdio_establish: - failed_sanity: - retval = ZFCP_ERP_FAILED; + if (retval == ZFCP_ERP_FAILED) + ssleep(8); - out: return retval; } - -static int -zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) +static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act) { int retval; - retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); - if (retval == ZFCP_ERP_FAILED) + retval = zfcp_fsf_close_physical_port(act); + if (retval == -ENOMEM) + return ZFCP_ERP_NOMEM; + act->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING; + if (retval) return ZFCP_ERP_FAILED; - retval = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); - if (retval == ZFCP_ERP_FAILED) - return ZFCP_ERP_FAILED; - - return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); + return ZFCP_ERP_CONTINUES; } -static int -zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) +static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) { - int retval = ZFCP_ERP_SUCCEEDED; - int retries; - int sleep = ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP; - struct zfcp_adapter *adapter = erp_action->adapter; - - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); - - for (retries = ZFCP_EXCHANGE_CONFIG_DATA_RETRIES; retries; retries--) { - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, - &adapter->status); - ZFCP_LOG_DEBUG("Doing exchange config data\n"); - write_lock_irq(&adapter->erp_lock); - zfcp_erp_action_to_running(erp_action); - write_unlock_irq(&adapter->erp_lock); - if (zfcp_fsf_exchange_config_data(erp_action)) { - retval = ZFCP_ERP_FAILED; - ZFCP_LOG_INFO("error: initiation of exchange of " - "configuration data failed for " - "adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - break; - } - ZFCP_LOG_DEBUG("Xchange underway\n"); - - /* - * Why this works: - * Both the normal completion handler as well as the timeout - * handler will do an 'up' when the 'exchange config data' - * request completes or times out. Thus, the signal to go on - * won't be lost utilizing this semaphore. - * Furthermore, this 'adapter_reopen' action is - * guaranteed to be the only action being there (highest action - * which prevents other actions from being created). - * Resulting from that, the wake signal recognized here - * _must_ be the one belonging to the 'exchange config - * data' request. - */ - zfcp_rec_dbf_event_thread(6, adapter, 1); - down(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread(7, adapter, 1); - if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { - ZFCP_LOG_INFO("error: exchange of configuration data " - "for adapter %s timed out\n", - zfcp_get_busid_by_adapter(adapter)); - break; - } - - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, - &adapter->status)) - break; + atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | + ZFCP_STATUS_COMMON_CLOSING | + ZFCP_STATUS_COMMON_ACCESS_DENIED | + ZFCP_STATUS_PORT_DID_DID | + ZFCP_STATUS_PORT_PHYS_CLOSING | + ZFCP_STATUS_PORT_INVALID_WWPN, + &port->status); +} - ZFCP_LOG_DEBUG("host connection still initialising... " - "waiting and retrying...\n"); - /* sleep a little bit before retry */ - ssleep(sleep); - sleep *= 2; - } +static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) +{ + struct zfcp_port *port = erp_action->port; + int status = atomic_read(&port->status); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, - &adapter->status); + switch (erp_action->step) { + case ZFCP_ERP_STEP_UNINITIALIZED: + zfcp_erp_port_strategy_clearstati(port); + if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) && + (status & ZFCP_STATUS_COMMON_OPEN)) + return zfcp_erp_port_forced_strategy_close(erp_action); + else + return ZFCP_ERP_FAILED; - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, - &adapter->status)) { - ZFCP_LOG_INFO("error: exchange of configuration data for " - "adapter %s failed\n", - zfcp_get_busid_by_adapter(adapter)); - retval = ZFCP_ERP_FAILED; + case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: + if (status & ZFCP_STATUS_PORT_PHYS_OPEN) + return ZFCP_ERP_SUCCEEDED; } - - return retval; + return ZFCP_ERP_FAILED; } -static int -zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) +static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action) { - int ret; - struct zfcp_adapter *adapter; - - adapter = erp_action->adapter; - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); - - write_lock_irq(&adapter->erp_lock); - zfcp_erp_action_to_running(erp_action); - write_unlock_irq(&adapter->erp_lock); + int retval; - ret = zfcp_fsf_exchange_port_data(erp_action); - if (ret == -EOPNOTSUPP) { - return ZFCP_ERP_SUCCEEDED; - } else if (ret) { + retval = zfcp_fsf_close_port(erp_action); + if (retval == -ENOMEM) + return ZFCP_ERP_NOMEM; + erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING; + if (retval) return ZFCP_ERP_FAILED; - } - - ret = ZFCP_ERP_SUCCEEDED; - zfcp_rec_dbf_event_thread(8, adapter, 1); - down(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread(9, adapter, 1); - if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { - ZFCP_LOG_INFO("error: exchange port data timed out (adapter " - "%s)\n", zfcp_get_busid_by_adapter(adapter)); - ret = ZFCP_ERP_FAILED; - } - - /* don't treat as error for the sake of compatibility */ - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) - ZFCP_LOG_INFO("warning: exchange port data failed (adapter " - "%s\n", zfcp_get_busid_by_adapter(adapter)); - - return ret; + return ZFCP_ERP_CONTINUES; } -static int -zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action - *erp_action) +static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_SUCCEEDED; - int temp_ret; - struct zfcp_adapter *adapter = erp_action->adapter; - int i; - - adapter->status_read_failed = 0; - for (i = 0; i < ZFCP_STATUS_READS_RECOM; i++) { - temp_ret = zfcp_fsf_status_read(adapter, ZFCP_WAIT_FOR_SBAL); - if (temp_ret < 0) { - ZFCP_LOG_INFO("error: set-up of unsolicited status " - "notification failed on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - retval = ZFCP_ERP_FAILED; - i--; - break; - } - } + int retval; - return retval; + retval = zfcp_fsf_open_port(erp_action); + if (retval == -ENOMEM) + return ZFCP_ERP_NOMEM; + erp_action->step = ZFCP_ERP_STEP_PORT_OPENING; + if (retval) + return ZFCP_ERP_FAILED; + return ZFCP_ERP_CONTINUES; } -/* - * function: - * - * purpose: this routine executes the 'Reopen Physical Port' action - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_SUCCEEDED - action finished successfully - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) +static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act) { - int retval = ZFCP_ERP_FAILED; - struct zfcp_port *port = erp_action->port; - - switch (erp_action->step) { - - /* - * FIXME: - * the ULP spec. begs for waiting for oustanding commands - */ - case ZFCP_ERP_STEP_UNINITIALIZED: - zfcp_erp_port_strategy_clearstati(port); - /* - * it would be sufficient to test only the normal open flag - * since the phys. open flag cannot be set if the normal - * open flag is unset - however, this is for readabilty ... - */ - if (atomic_test_mask((ZFCP_STATUS_PORT_PHYS_OPEN | - ZFCP_STATUS_COMMON_OPEN), - &port->status)) { - ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying " - "close physical\n", port->wwpn); - retval = - zfcp_erp_port_forced_strategy_close(erp_action); - } else - retval = ZFCP_ERP_FAILED; - break; + unsigned long flags; + struct zfcp_adapter *adapter = ns_act->adapter; + struct zfcp_erp_action *act, *tmp; + int status; - case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: - if (atomic_test_mask(ZFCP_STATUS_PORT_PHYS_OPEN, - &port->status)) { - ZFCP_LOG_DEBUG("close physical failed for port " - "0x%016Lx\n", port->wwpn); - retval = ZFCP_ERP_FAILED; - } else - retval = ZFCP_ERP_SUCCEEDED; - break; + read_lock_irqsave(&adapter->erp_lock, flags); + list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) { + if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) { + status = atomic_read(&adapter->nameserver_port->status); + if (status & ZFCP_STATUS_COMMON_ERP_FAILED) + zfcp_erp_port_failed(act->port, 27, NULL); + zfcp_erp_action_ready(act); + } } - - return retval; + read_unlock_irqrestore(&adapter->erp_lock, flags); } -/* - * function: - * - * purpose: this routine executes the 'Reopen Port' action - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_SUCCEEDED - action finished successfully - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) +static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act) { - int retval = ZFCP_ERP_FAILED; - struct zfcp_port *port = erp_action->port; - - switch (erp_action->step) { + int retval; - /* - * FIXME: - * the ULP spec. begs for waiting for oustanding commands - */ + switch (act->step) { case ZFCP_ERP_STEP_UNINITIALIZED: - zfcp_erp_port_strategy_clearstati(port); - if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) { - ZFCP_LOG_DEBUG("port 0x%016Lx is open -> trying " - "close\n", port->wwpn); - retval = zfcp_erp_port_strategy_close(erp_action); - goto out; - } /* else it's already closed, open it */ - break; - + case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: case ZFCP_ERP_STEP_PORT_CLOSING: - if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) { - ZFCP_LOG_DEBUG("close failed for port 0x%016Lx\n", - port->wwpn); + return zfcp_erp_port_strategy_open_port(act); + + case ZFCP_ERP_STEP_PORT_OPENING: + if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN) + retval = ZFCP_ERP_SUCCEEDED; + else retval = ZFCP_ERP_FAILED; - goto out; - } /* else it's closed now, open it */ - break; - } - if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) - retval = ZFCP_ERP_EXIT; - else - retval = zfcp_erp_port_strategy_open(erp_action); + /* this is needed anyway */ + zfcp_erp_port_strategy_open_ns_wake(act); + return retval; - out: - return retval; + default: + return ZFCP_ERP_FAILED; + } } -static int -zfcp_erp_port_strategy_open(struct zfcp_erp_action *erp_action) +static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act) { int retval; - if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, - &erp_action->port->status)) - retval = zfcp_erp_port_strategy_open_nameserver(erp_action); - else - retval = zfcp_erp_port_strategy_open_common(erp_action); - - return retval; + retval = zfcp_fc_ns_gid_pn_request(act); + if (retval == -ENOMEM) + return ZFCP_ERP_NOMEM; + act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; + if (retval) + return ZFCP_ERP_FAILED; + return ZFCP_ERP_CONTINUES; } -static int -zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *erp_action) +static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) { - int retval = 0; - struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_port *port = erp_action->port; + struct zfcp_adapter *adapter = act->adapter; + struct zfcp_port *port = act->port; - switch (erp_action->step) { + if (port->wwpn != adapter->peer_wwpn) { + dev_err(&adapter->ccw_device->dev, + "Failed to open port 0x%016Lx, " + "Peer WWPN 0x%016Lx does not " + "match.\n", port->wwpn, + adapter->peer_wwpn); + zfcp_erp_port_failed(port, 25, NULL); + return ZFCP_ERP_FAILED; + } + port->d_id = adapter->peer_d_id; + atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); + return zfcp_erp_port_strategy_open_port(act); +} + +static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) +{ + struct zfcp_adapter *adapter = act->adapter; + struct zfcp_port *port = act->port; + struct zfcp_port *ns_port = adapter->nameserver_port; + int p_status = atomic_read(&port->status); + switch (act->step) { case ZFCP_ERP_STEP_UNINITIALIZED: case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: case ZFCP_ERP_STEP_PORT_CLOSING: - if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) { - if (port->wwpn != adapter->peer_wwpn) { - ZFCP_LOG_NORMAL("Failed to open port 0x%016Lx " - "on adapter %s.\nPeer WWPN " - "0x%016Lx does not match\n", - port->wwpn, - zfcp_get_busid_by_adapter(adapter), - adapter->peer_wwpn); - zfcp_erp_port_failed(port, 25, NULL); - retval = ZFCP_ERP_FAILED; - break; - } - port->d_id = adapter->peer_d_id; - atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); - retval = zfcp_erp_port_strategy_open_port(erp_action); - break; + if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) + return zfcp_erp_open_ptp_port(act); + if (!ns_port) { + dev_err(&adapter->ccw_device->dev, + "Nameserver port unavailable.\n"); + return ZFCP_ERP_FAILED; } - if (!(adapter->nameserver_port)) { - retval = zfcp_nameserver_enqueue(adapter); - if (retval != 0) { - ZFCP_LOG_NORMAL("error: nameserver port " - "unavailable for adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - retval = ZFCP_ERP_FAILED; - break; - } - } - if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &adapter->nameserver_port->status)) { - ZFCP_LOG_DEBUG("nameserver port is not open -> open " - "nameserver port\n"); + if (!(atomic_read(&ns_port->status) & + ZFCP_STATUS_COMMON_UNBLOCKED)) { /* nameserver port may live again */ atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, - &adapter->nameserver_port->status); - if (zfcp_erp_port_reopen(adapter->nameserver_port, 0, - 77, erp_action) >= 0) { - erp_action->step = - ZFCP_ERP_STEP_NAMESERVER_OPEN; - retval = ZFCP_ERP_CONTINUES; - } else - retval = ZFCP_ERP_FAILED; - break; + &ns_port->status); + if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) { + act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN; + return ZFCP_ERP_CONTINUES; + } + return ZFCP_ERP_FAILED; } /* else nameserver port is already open, fall through */ case ZFCP_ERP_STEP_NAMESERVER_OPEN: - if (!atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, - &adapter->nameserver_port->status)) { - ZFCP_LOG_DEBUG("open failed for nameserver port\n"); - retval = ZFCP_ERP_FAILED; - } else { - ZFCP_LOG_DEBUG("nameserver port is open -> " - "nameserver look-up for port 0x%016Lx\n", - port->wwpn); - retval = zfcp_erp_port_strategy_open_common_lookup - (erp_action); - } - break; + if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN)) + return ZFCP_ERP_FAILED; + return zfcp_erp_port_strategy_open_lookup(act); case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: - if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) { - if (atomic_test_mask - (ZFCP_STATUS_PORT_INVALID_WWPN, &port->status)) { - ZFCP_LOG_DEBUG("nameserver look-up failed " - "for port 0x%016Lx " - "(misconfigured WWPN?)\n", - port->wwpn); + if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) { + if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) { zfcp_erp_port_failed(port, 26, NULL); - retval = ZFCP_ERP_EXIT; - } else { - ZFCP_LOG_DEBUG("nameserver look-up failed for " - "port 0x%016Lx\n", port->wwpn); - retval = ZFCP_ERP_FAILED; + return ZFCP_ERP_EXIT; } - } else { - ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> " - "trying open\n", port->wwpn, port->d_id); - retval = zfcp_erp_port_strategy_open_port(erp_action); + return ZFCP_ERP_FAILED; } - break; + return zfcp_erp_port_strategy_open_port(act); case ZFCP_ERP_STEP_PORT_OPENING: /* D_ID might have changed during open */ - if (atomic_test_mask((ZFCP_STATUS_COMMON_OPEN | - ZFCP_STATUS_PORT_DID_DID), - &port->status)) { - ZFCP_LOG_DEBUG("port 0x%016Lx is open\n", port->wwpn); - retval = ZFCP_ERP_SUCCEEDED; - } else { - ZFCP_LOG_DEBUG("open failed for port 0x%016Lx\n", - port->wwpn); - retval = ZFCP_ERP_FAILED; - } - break; - - default: - ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n", - erp_action->step); - retval = ZFCP_ERP_FAILED; + if ((p_status & ZFCP_STATUS_COMMON_OPEN) && + (p_status & ZFCP_STATUS_PORT_DID_DID)) + return ZFCP_ERP_SUCCEEDED; + /* fall through otherwise */ } + return ZFCP_ERP_FAILED; +} - return retval; +static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act) +{ + if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA)) + return zfcp_erp_port_strategy_open_nameserver(act); + return zfcp_erp_port_strategy_open_common(act); } -static int -zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *erp_action) +static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) { - int retval; struct zfcp_port *port = erp_action->port; switch (erp_action->step) { - case ZFCP_ERP_STEP_UNINITIALIZED: - case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: - case ZFCP_ERP_STEP_PORT_CLOSING: - ZFCP_LOG_DEBUG("port 0x%016Lx has d_id=0x%06x -> trying open\n", - port->wwpn, port->d_id); - retval = zfcp_erp_port_strategy_open_port(erp_action); + zfcp_erp_port_strategy_clearstati(port); + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) + return zfcp_erp_port_strategy_close(erp_action); break; - case ZFCP_ERP_STEP_PORT_OPENING: - if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) { - ZFCP_LOG_DEBUG("WKA port is open\n"); - retval = ZFCP_ERP_SUCCEEDED; - } else { - ZFCP_LOG_DEBUG("open failed for WKA port\n"); - retval = ZFCP_ERP_FAILED; - } - /* this is needed anyway (dont care for retval of wakeup) */ - ZFCP_LOG_DEBUG("continue other open port operations\n"); - zfcp_erp_port_strategy_open_nameserver_wakeup(erp_action); + case ZFCP_ERP_STEP_PORT_CLOSING: + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) + return ZFCP_ERP_FAILED; break; - - default: - ZFCP_LOG_NORMAL("bug: unknown erp step 0x%08x\n", - erp_action->step); - retval = ZFCP_ERP_FAILED; } + if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) + return ZFCP_ERP_EXIT; + else + return zfcp_erp_port_strategy_open(erp_action); - return retval; -} - -/* - * function: - * - * purpose: makes the erp thread continue with reopen (physical) port - * actions which have been paused until the name server port - * is opened (or failed) - * - * returns: 0 (a kind of void retval, its not used) - */ -static int -zfcp_erp_port_strategy_open_nameserver_wakeup(struct zfcp_erp_action - *ns_erp_action) -{ - int retval = 0; - unsigned long flags; - struct zfcp_adapter *adapter = ns_erp_action->adapter; - struct zfcp_erp_action *erp_action, *tmp; - - read_lock_irqsave(&adapter->erp_lock, flags); - list_for_each_entry_safe(erp_action, tmp, &adapter->erp_running_head, - list) { - if (erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) { - if (atomic_test_mask( - ZFCP_STATUS_COMMON_ERP_FAILED, - &adapter->nameserver_port->status)) - zfcp_erp_port_failed(erp_action->port, 27, - NULL); - zfcp_erp_action_ready(erp_action); - } - } - read_unlock_irqrestore(&adapter->erp_lock, flags); - - return retval; -} - -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action) -{ - int retval; - - retval = zfcp_fsf_close_physical_port(erp_action); - if (retval == -ENOMEM) { - retval = ZFCP_ERP_NOMEM; - goto out; - } - erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING; - if (retval != 0) { - /* could not send 'open', fail */ - retval = ZFCP_ERP_FAILED; - goto out; - } - retval = ZFCP_ERP_CONTINUES; - out: - return retval; + return ZFCP_ERP_FAILED; } -static int -zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) +static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) { - int retval = 0; - atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | ZFCP_STATUS_COMMON_CLOSING | ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_PORT_DID_DID | - ZFCP_STATUS_PORT_PHYS_CLOSING | - ZFCP_STATUS_PORT_INVALID_WWPN, - &port->status); - return retval; -} - -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action) -{ - int retval; - - retval = zfcp_fsf_close_port(erp_action); - if (retval == -ENOMEM) { - retval = ZFCP_ERP_NOMEM; - goto out; - } - erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING; - if (retval != 0) { - /* could not send 'close', fail */ - retval = ZFCP_ERP_FAILED; - goto out; - } - retval = ZFCP_ERP_CONTINUES; - out: - return retval; + ZFCP_STATUS_UNIT_SHARED | + ZFCP_STATUS_UNIT_READONLY, + &unit->status); } -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action) +static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) { - int retval; - - retval = zfcp_fsf_open_port(erp_action); - if (retval == -ENOMEM) { - retval = ZFCP_ERP_NOMEM; - goto out; - } - erp_action->step = ZFCP_ERP_STEP_PORT_OPENING; - if (retval != 0) { - /* could not send 'open', fail */ - retval = ZFCP_ERP_FAILED; - goto out; - } - retval = ZFCP_ERP_CONTINUES; - out: - return retval; + int retval = zfcp_fsf_close_unit(erp_action); + if (retval == -ENOMEM) + return ZFCP_ERP_NOMEM; + erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; + if (retval) + return ZFCP_ERP_FAILED; + return ZFCP_ERP_CONTINUES; } -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action) +static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) { - int retval; - - retval = zfcp_ns_gid_pn_request(erp_action); - if (retval == -ENOMEM) { - retval = ZFCP_ERP_NOMEM; - goto out; - } - erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; - if (retval != 0) { - /* could not send nameserver request, fail */ - retval = ZFCP_ERP_FAILED; - goto out; - } - retval = ZFCP_ERP_CONTINUES; - out: - return retval; + int retval = zfcp_fsf_open_unit(erp_action); + if (retval == -ENOMEM) + return ZFCP_ERP_NOMEM; + erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; + if (retval) + return ZFCP_ERP_FAILED; + return ZFCP_ERP_CONTINUES; } -/* - * function: - * - * purpose: this routine executes the 'Reopen Unit' action - * currently no retries - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_SUCCEEDED - action finished successfully - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) +static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_FAILED; struct zfcp_unit *unit = erp_action->unit; switch (erp_action->step) { - - /* - * FIXME: - * the ULP spec. begs for waiting for oustanding commands - */ case ZFCP_ERP_STEP_UNINITIALIZED: zfcp_erp_unit_strategy_clearstati(unit); - if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { - ZFCP_LOG_DEBUG("unit 0x%016Lx is open -> " - "trying close\n", unit->fcp_lun); - retval = zfcp_erp_unit_strategy_close(erp_action); - break; - } - /* else it's already closed, fall through */ + if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) + return zfcp_erp_unit_strategy_close(erp_action); + /* already closed, fall through */ case ZFCP_ERP_STEP_UNIT_CLOSING: - if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { - ZFCP_LOG_DEBUG("close failed for unit 0x%016Lx\n", - unit->fcp_lun); - retval = ZFCP_ERP_FAILED; - } else { - if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) - retval = ZFCP_ERP_EXIT; - else { - ZFCP_LOG_DEBUG("unit 0x%016Lx is not open -> " - "trying open\n", unit->fcp_lun); - retval = - zfcp_erp_unit_strategy_open(erp_action); - } - } - break; + if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) + return ZFCP_ERP_FAILED; + if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) + return ZFCP_ERP_EXIT; + return zfcp_erp_unit_strategy_open(erp_action); case ZFCP_ERP_STEP_UNIT_OPENING: - if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) { - ZFCP_LOG_DEBUG("unit 0x%016Lx is open\n", - unit->fcp_lun); - retval = ZFCP_ERP_SUCCEEDED; - } else { - ZFCP_LOG_DEBUG("open failed for unit 0x%016Lx\n", - unit->fcp_lun); - retval = ZFCP_ERP_FAILED; - } - break; + if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) + return ZFCP_ERP_SUCCEEDED; } - - return retval; + return ZFCP_ERP_FAILED; } -static int -zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) +static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) { - int retval = 0; - - atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | - ZFCP_STATUS_COMMON_CLOSING | - ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_UNIT_SHARED | - ZFCP_STATUS_UNIT_READONLY, - &unit->status); + switch (result) { + case ZFCP_ERP_SUCCEEDED : + atomic_set(&unit->erp_counter, 0); + zfcp_erp_unit_unblock(unit); + break; + case ZFCP_ERP_FAILED : + atomic_inc(&unit->erp_counter); + if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) + zfcp_erp_unit_failed(unit, 21, NULL); + break; + } - return retval; + if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { + zfcp_erp_unit_block(unit, 0); + result = ZFCP_ERP_EXIT; + } + return result; } -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) +static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) { - int retval; + switch (result) { + case ZFCP_ERP_SUCCEEDED : + atomic_set(&port->erp_counter, 0); + zfcp_erp_port_unblock(port); + break; - retval = zfcp_fsf_close_unit(erp_action); - if (retval == -ENOMEM) { - retval = ZFCP_ERP_NOMEM; - goto out; - } - erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; - if (retval != 0) { - /* could not send 'close', fail */ - retval = ZFCP_ERP_FAILED; - goto out; + case ZFCP_ERP_FAILED : + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) { + zfcp_erp_port_block(port, 0); + result = ZFCP_ERP_EXIT; + } + atomic_inc(&port->erp_counter); + if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) + zfcp_erp_port_failed(port, 22, NULL); + break; } - retval = ZFCP_ERP_CONTINUES; - out: - return retval; + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { + zfcp_erp_port_block(port, 0); + result = ZFCP_ERP_EXIT; + } + return result; } -/* - * function: - * - * purpose: - * - * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously) - * ZFCP_ERP_FAILED - action finished unsuccessfully - */ -static int -zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) +static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, + int result) { - int retval; + switch (result) { + case ZFCP_ERP_SUCCEEDED : + atomic_set(&adapter->erp_counter, 0); + zfcp_erp_adapter_unblock(adapter); + break; - retval = zfcp_fsf_open_unit(erp_action); - if (retval == -ENOMEM) { - retval = ZFCP_ERP_NOMEM; - goto out; - } - erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; - if (retval != 0) { - /* could not send 'open', fail */ - retval = ZFCP_ERP_FAILED; - goto out; + case ZFCP_ERP_FAILED : + atomic_inc(&adapter->erp_counter); + if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) + zfcp_erp_adapter_failed(adapter, 23, NULL); + break; } - retval = ZFCP_ERP_CONTINUES; - out: - return retval; -} -void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req) -{ - BUG_ON(!fsf_req->erp_action); - fsf_req->timer.function = zfcp_erp_timeout_handler; - fsf_req->timer.data = (unsigned long) fsf_req->erp_action; - fsf_req->timer.expires = jiffies + ZFCP_ERP_FSFREQ_TIMEOUT; - add_timer(&fsf_req->timer); + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { + zfcp_erp_adapter_block(adapter, 0); + result = ZFCP_ERP_EXIT; + } + return result; } -/* - * function: - * - * purpose: enqueue the specified error recovery action, if needed - * - * returns: - */ -static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, - struct zfcp_port *port, - struct zfcp_unit *unit, u8 id, void *ref) +static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, + int result) { - int retval = 1, need = want; - struct zfcp_erp_action *erp_action = NULL; - u32 status = 0; + struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_port *port = erp_action->port; + struct zfcp_unit *unit = erp_action->unit; - /* - * We need some rules here which check whether we really need - * this action or whether we should just drop it. - * E.g. if there is a unfinished 'Reopen Port' request then we drop a - * 'Reopen Unit' request for an associated unit since we can't - * satisfy this request now. A 'Reopen Port' action will trigger - * 'Reopen Unit' actions when it completes. - * Thus, there are only actions in the queue which can immediately be - * executed. This makes the processing of the action queue more - * efficient. - */ - - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, - &adapter->status)) - return -EIO; + switch (erp_action->action) { - /* check whether we really need this */ - switch (want) { case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) { - goto out; - } - if (!atomic_test_mask - (ZFCP_STATUS_COMMON_RUNNING, &port->status) || - atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) { - goto out; - } - if (!atomic_test_mask - (ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) - need = ZFCP_ERP_ACTION_REOPEN_PORT; - /* fall through !!! */ - - case ZFCP_ERP_ACTION_REOPEN_PORT: - if (atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) { - goto out; - } - /* fall through !!! */ + result = zfcp_erp_strategy_check_unit(unit, result); + break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, - &port->status)) { - if (port->erp_action.action != - ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) { - ZFCP_LOG_INFO("dropped erp action %i (port " - "0x%016Lx, action in use: %i)\n", - want, port->wwpn, - port->erp_action.action); - } - goto out; - } - if (!atomic_test_mask - (ZFCP_STATUS_COMMON_RUNNING, &adapter->status) || - atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) { - goto out; - } - if (!atomic_test_mask - (ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) - need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; - /* fall through !!! */ + case ZFCP_ERP_ACTION_REOPEN_PORT: + result = zfcp_erp_strategy_check_port(port, result); + break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - if (atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) { - goto out; - } + result = zfcp_erp_strategy_check_adapter(adapter, result); break; - - default: - ZFCP_LOG_NORMAL("bug: unknown erp action requested " - "on adapter %s (action=%d)\n", - zfcp_get_busid_by_adapter(adapter), want); - goto out; } + return result; +} - /* check whether we need something stronger first */ - if (need) { - ZFCP_LOG_DEBUG("stronger erp action %d needed before " - "erp action %d on adapter %s\n", - need, want, zfcp_get_busid_by_adapter(adapter)); - } +static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status) +{ + int status = atomic_read(target_status); - /* mark adapter to have some error recovery pending */ - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); + if ((status & ZFCP_STATUS_COMMON_RUNNING) && + (erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY)) + return 1; /* take it online */ - /* setup error recovery action */ - switch (need) { + if (!(status & ZFCP_STATUS_COMMON_RUNNING) && + !(erp_status & ZFCP_STATUS_ERP_CLOSE_ONLY)) + return 1; /* take it offline */ - case ZFCP_ERP_ACTION_REOPEN_UNIT: - zfcp_unit_get(unit); - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); - erp_action = &unit->erp_action; - if (!atomic_test_mask - (ZFCP_STATUS_COMMON_RUNNING, &unit->status)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + return 0; +} + +static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) +{ + int action = act->action; + struct zfcp_adapter *adapter = act->adapter; + struct zfcp_port *port = act->port; + struct zfcp_unit *unit = act->unit; + u32 erp_status = act->status; + + switch (action) { + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { + _zfcp_erp_adapter_reopen(adapter, + ZFCP_STATUS_COMMON_ERP_FAILED, + 67, NULL); + return ZFCP_ERP_EXIT; + } break; - case ZFCP_ERP_ACTION_REOPEN_PORT: case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: - zfcp_port_get(port); - zfcp_erp_action_dismiss_port(port); - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); - erp_action = &port->erp_action; - if (!atomic_test_mask - (ZFCP_STATUS_COMMON_RUNNING, &port->status)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + case ZFCP_ERP_ACTION_REOPEN_PORT: + if (zfcp_erp_strat_change_det(&port->status, erp_status)) { + _zfcp_erp_port_reopen(port, + ZFCP_STATUS_COMMON_ERP_FAILED, + 68, NULL); + return ZFCP_ERP_EXIT; + } break; - case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - zfcp_adapter_get(adapter); - zfcp_erp_action_dismiss_adapter(adapter); - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); - erp_action = &adapter->erp_action; - if (!atomic_test_mask - (ZFCP_STATUS_COMMON_RUNNING, &adapter->status)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + case ZFCP_ERP_ACTION_REOPEN_UNIT: + if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { + _zfcp_erp_unit_reopen(unit, + ZFCP_STATUS_COMMON_ERP_FAILED, + 69, NULL); + return ZFCP_ERP_EXIT; + } break; } - - memset(erp_action, 0, sizeof (struct zfcp_erp_action)); - erp_action->adapter = adapter; - erp_action->port = port; - erp_action->unit = unit; - erp_action->action = need; - erp_action->status = status; - - ++adapter->erp_total_count; - - /* finally put it into 'ready' queue and kick erp thread */ - list_add_tail(&erp_action->list, &adapter->erp_ready_head); - up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread(1, adapter, 0); - retval = 0; - out: - zfcp_rec_dbf_event_trigger(id, ref, want, need, erp_action, - adapter, port, unit); - return retval; + return ret; } -static int -zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) +static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) { - int retval = 0; struct zfcp_adapter *adapter = erp_action->adapter; - --adapter->erp_total_count; + adapter->erp_total_count--; if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { - --adapter->erp_low_mem_count; + adapter->erp_low_mem_count--; erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; } @@ -2919,141 +1224,458 @@ zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->unit->status); break; + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->port->status); break; + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->adapter->status); break; - default: - /* bug */ - break; } - return retval; } -/** - * zfcp_erp_action_cleanup - * - * Register unit with scsi stack if appropriate and fix reference counts. - * Note: Temporary units are not registered with scsi stack. - */ -static void -zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, - struct zfcp_port *port, struct zfcp_unit *unit, - int result) +struct zfcp_erp_add_work { + struct zfcp_unit *unit; + struct work_struct work; +}; + +static void zfcp_erp_scsi_scan(struct work_struct *work) { - switch (action) { + struct zfcp_erp_add_work *p = + container_of(work, struct zfcp_erp_add_work, work); + struct zfcp_unit *unit = p->unit; + struct fc_rport *rport = unit->port->rport; + scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, + unit->scsi_lun, 0); + atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); + zfcp_unit_put(unit); + kfree(p); +} + +static void zfcp_erp_schedule_work(struct zfcp_unit *unit) +{ + struct zfcp_erp_add_work *p; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + dev_err(&unit->port->adapter->ccw_device->dev, + "Out of resources. Could not register unit " + "0x%016Lx on port 0x%016Lx with SCSI stack.\n", + unit->fcp_lun, unit->port->wwpn); + return; + } + + zfcp_unit_get(unit); + atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); + INIT_WORK(&p->work, zfcp_erp_scsi_scan); + p->unit = unit; + schedule_work(&p->work); +} + +static void zfcp_erp_rport_register(struct zfcp_port *port) +{ + struct fc_rport_identifiers ids; + ids.node_name = port->wwnn; + ids.port_name = port->wwpn; + ids.port_id = port->d_id; + ids.roles = FC_RPORT_ROLE_FCP_TARGET; + port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); + if (!port->rport) { + dev_err(&port->adapter->ccw_device->dev, + "Failed registration of rport " + "0x%016Lx.\n", port->wwpn); + return; + } + + scsi_target_unblock(&port->rport->dev); + port->rport->maxframe_size = port->maxframe_size; + port->rport->supported_classes = port->supported_classes; +} + +static void zfcp_erp_rports_del(struct zfcp_adapter *adapter) +{ + struct zfcp_port *port; + list_for_each_entry(port, &adapter->port_list_head, list) + if (port->rport && !(atomic_read(&port->status) & + ZFCP_STATUS_PORT_WKA)) { + fc_remote_port_delete(port->rport); + port->rport = NULL; + } +} + +static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) +{ + struct zfcp_adapter *adapter = act->adapter; + struct zfcp_port *port = act->port; + struct zfcp_unit *unit = act->unit; + + switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_UNIT: - if ((result == ZFCP_ERP_SUCCEEDED) - && (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY, - &unit->status)) - && !unit->device - && port->rport) { + if ((result == ZFCP_ERP_SUCCEEDED) && + !unit->device && port->rport) { atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); - if (atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, - &unit->status) == 0) + if (!(atomic_read(&unit->status) & + ZFCP_STATUS_UNIT_SCSI_WORK_PENDING)) zfcp_erp_schedule_work(unit); } zfcp_unit_put(unit); break; + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: - if (atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, - &port->status)) { + if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN) { zfcp_port_put(port); - break; - } - - if ((result == ZFCP_ERP_SUCCEEDED) - && !port->rport) { - struct fc_rport_identifiers ids; - ids.node_name = port->wwnn; - ids.port_name = port->wwpn; - ids.port_id = port->d_id; - ids.roles = FC_RPORT_ROLE_FCP_TARGET; - port->rport = - fc_remote_port_add(adapter->scsi_host, 0, &ids); - if (!port->rport) - ZFCP_LOG_NORMAL("failed registration of rport" - "(adapter %s, wwpn=0x%016Lx)\n", - zfcp_get_busid_by_port(port), - port->wwpn); - else { - scsi_target_unblock(&port->rport->dev); - port->rport->maxframe_size = port->maxframe_size; - port->rport->supported_classes = - port->supported_classes; - } + return; } + if ((result == ZFCP_ERP_SUCCEEDED) && !port->rport) + zfcp_erp_rport_register(port); if ((result != ZFCP_ERP_SUCCEEDED) && port->rport) { fc_remote_port_delete(port->rport); port->rport = NULL; } zfcp_port_put(port); break; + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: - if (result != ZFCP_ERP_SUCCEEDED) { - list_for_each_entry(port, &adapter->port_list_head, list) - if (port->rport && - !atomic_test_mask(ZFCP_STATUS_PORT_WKA, - &port->status)) { - fc_remote_port_delete(port->rport); - port->rport = NULL; - } - } + if (result != ZFCP_ERP_SUCCEEDED) + zfcp_erp_rports_del(adapter); zfcp_adapter_put(adapter); break; - default: - break; } } +static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) +{ + switch (erp_action->action) { + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + return zfcp_erp_adapter_strategy(erp_action); + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + return zfcp_erp_port_forced_strategy(erp_action); + case ZFCP_ERP_ACTION_REOPEN_PORT: + return zfcp_erp_port_strategy(erp_action); + case ZFCP_ERP_ACTION_REOPEN_UNIT: + return zfcp_erp_unit_strategy(erp_action); + } + return ZFCP_ERP_FAILED; +} -static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) +static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) { - struct zfcp_port *port; + int retval; + struct zfcp_adapter *adapter = erp_action->adapter; + unsigned long flags; - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) - zfcp_erp_action_dismiss(&adapter->erp_action); - else - list_for_each_entry(port, &adapter->port_list_head, list) - zfcp_erp_action_dismiss_port(port); + read_lock_irqsave(&zfcp_data.config_lock, flags); + write_lock(&adapter->erp_lock); + + zfcp_erp_strategy_check_fsfreq(erp_action); + + if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { + zfcp_erp_action_dequeue(erp_action); + retval = ZFCP_ERP_DISMISSED; + goto unlock; + } + + zfcp_erp_action_to_running(erp_action); + + /* no lock to allow for blocking operations */ + write_unlock(&adapter->erp_lock); + read_unlock_irqrestore(&zfcp_data.config_lock, flags); + retval = zfcp_erp_strategy_do_action(erp_action); + read_lock_irqsave(&zfcp_data.config_lock, flags); + write_lock(&adapter->erp_lock); + + if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) + retval = ZFCP_ERP_CONTINUES; + + switch (retval) { + case ZFCP_ERP_NOMEM: + if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) { + ++adapter->erp_low_mem_count; + erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; + } + if (adapter->erp_total_count == adapter->erp_low_mem_count) + _zfcp_erp_adapter_reopen(adapter, 0, 66, NULL); + else { + zfcp_erp_strategy_memwait(erp_action); + retval = ZFCP_ERP_CONTINUES; + } + goto unlock; + + case ZFCP_ERP_CONTINUES: + if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { + --adapter->erp_low_mem_count; + erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; + } + goto unlock; + } + + retval = zfcp_erp_strategy_check_target(erp_action, retval); + zfcp_erp_action_dequeue(erp_action); + retval = zfcp_erp_strategy_statechange(erp_action, retval); + if (retval == ZFCP_ERP_EXIT) + goto unlock; + zfcp_erp_strategy_followup_actions(erp_action); + + unlock: + write_unlock(&adapter->erp_lock); + read_unlock_irqrestore(&zfcp_data.config_lock, flags); + + if (retval != ZFCP_ERP_CONTINUES) + zfcp_erp_action_cleanup(erp_action, retval); + + return retval; } -static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) +static int zfcp_erp_thread(void *data) { - struct zfcp_unit *unit; + struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; + struct list_head *next; + struct zfcp_erp_action *act; + unsigned long flags; - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) - zfcp_erp_action_dismiss(&port->erp_action); + daemonize("zfcperp%s", adapter->ccw_device->dev.bus_id); + /* Block all signals */ + siginitsetinv(¤t->blocked, 0); + atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); + wake_up(&adapter->erp_thread_wqh); + + while (!(atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) { + write_lock_irqsave(&adapter->erp_lock, flags); + next = adapter->erp_ready_head.next; + write_unlock_irqrestore(&adapter->erp_lock, flags); + + if (next != &adapter->erp_ready_head) { + act = list_entry(next, struct zfcp_erp_action, list); + + /* there is more to come after dismission, no notify */ + if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED) + zfcp_erp_wakeup(adapter); + } + + zfcp_rec_dbf_event_thread(4, adapter); + down_interruptible(&adapter->erp_ready_sem); + zfcp_rec_dbf_event_thread(5, adapter); + } + + atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); + wake_up(&adapter->erp_thread_wqh); + + return 0; +} + +/** + * zfcp_erp_thread_setup - Start ERP thread for adapter + * @adapter: Adapter to start the ERP thread for + * + * Returns 0 on success or error code from kernel_thread() + */ +int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) +{ + int retval; + + atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); + retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); + if (retval < 0) { + dev_err(&adapter->ccw_device->dev, + "Creation of ERP thread failed.\n"); + return retval; + } + wait_event(adapter->erp_thread_wqh, + atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_ERP_THREAD_UP); + return 0; +} + +/** + * zfcp_erp_thread_kill - Stop ERP thread. + * @adapter: Adapter where the ERP thread should be stopped. + * + * The caller of this routine ensures that the specified adapter has + * been shut down and that this operation has been completed. Thus, + * there are no pending erp_actions which would need to be handled + * here. + */ +void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) +{ + atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); + up(&adapter->erp_ready_sem); + zfcp_rec_dbf_event_thread_lock(2, adapter); + + wait_event(adapter->erp_thread_wqh, + !(atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)); + + atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, + &adapter->status); +} + +/** + * zfcp_erp_adapter_failed - Set adapter status to failed. + * @adapter: Failed adapter. + * @id: Event id for debug trace. + * @ref: Reference for debug trace. + */ +void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref) +{ + zfcp_erp_modify_adapter_status(adapter, id, ref, + ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); + dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n"); +} + +/** + * zfcp_erp_port_failed - Set port status to failed. + * @port: Failed port. + * @id: Event id for debug trace. + * @ref: Reference for debug trace. + */ +void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref) +{ + zfcp_erp_modify_port_status(port, id, ref, + ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); + + if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA) + dev_err(&port->adapter->ccw_device->dev, + "Port ERP failed for WKA port d_id=0x%06x.\n", + port->d_id); else - list_for_each_entry(unit, &port->unit_list_head, list) - zfcp_erp_action_dismiss_unit(unit); + dev_err(&port->adapter->ccw_device->dev, + "Port ERP failed for port wwpn=0x%016Lx.\n", + port->wwpn); } -static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) +/** + * zfcp_erp_unit_failed - Set unit status to failed. + * @unit: Failed unit. + * @id: Event id for debug trace. + * @ref: Reference for debug trace. + */ +void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref) { - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) - zfcp_erp_action_dismiss(&unit->erp_action); + zfcp_erp_modify_unit_status(unit, id, ref, + ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); + + dev_err(&unit->port->adapter->ccw_device->dev, + "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n", + unit->fcp_lun, unit->port->wwpn); } -static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) +/** + * zfcp_erp_wait - wait for completion of error recovery on an adapter + * @adapter: adapter for which to wait for completion of its error recovery + */ +void zfcp_erp_wait(struct zfcp_adapter *adapter) { - list_move(&erp_action->list, &erp_action->adapter->erp_running_head); - zfcp_rec_dbf_event_action(145, erp_action); + wait_event(adapter->erp_done_wqh, + !(atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_ERP_PENDING)); +} + +/** + * zfcp_erp_modify_adapter_status - change adapter status bits + * @adapter: adapter to change the status + * @id: id for the debug trace + * @ref: reference for the debug trace + * @mask: status bits to change + * @set_or_clear: ZFCP_SET or ZFCP_CLEAR + * + * Changes in common status bits are propagated to attached ports and units. + */ +void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, u8 id, + void *ref, u32 mask, int set_or_clear) +{ + struct zfcp_port *port; + u32 common_mask = mask & ZFCP_COMMON_FLAGS; + + if (set_or_clear == ZFCP_SET) { + if (status_change_set(mask, &adapter->status)) + zfcp_rec_dbf_event_adapter(id, ref, adapter); + atomic_set_mask(mask, &adapter->status); + } else { + if (status_change_clear(mask, &adapter->status)) + zfcp_rec_dbf_event_adapter(id, ref, adapter); + atomic_clear_mask(mask, &adapter->status); + if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) + atomic_set(&adapter->erp_counter, 0); + } + + if (common_mask) + list_for_each_entry(port, &adapter->port_list_head, list) + zfcp_erp_modify_port_status(port, id, ref, common_mask, + set_or_clear); } -static void zfcp_erp_action_to_ready(struct zfcp_erp_action *erp_action) +/** + * zfcp_erp_modify_port_status - change port status bits + * @port: port to change the status bits + * @id: id for the debug trace + * @ref: reference for the debug trace + * @mask: status bits to change + * @set_or_clear: ZFCP_SET or ZFCP_CLEAR + * + * Changes in common status bits are propagated to attached units. + */ +void zfcp_erp_modify_port_status(struct zfcp_port *port, u8 id, void *ref, + u32 mask, int set_or_clear) { - list_move(&erp_action->list, &erp_action->adapter->erp_ready_head); - zfcp_rec_dbf_event_action(146, erp_action); + struct zfcp_unit *unit; + u32 common_mask = mask & ZFCP_COMMON_FLAGS; + + if (set_or_clear == ZFCP_SET) { + if (status_change_set(mask, &port->status)) + zfcp_rec_dbf_event_port(id, ref, port); + atomic_set_mask(mask, &port->status); + } else { + if (status_change_clear(mask, &port->status)) + zfcp_rec_dbf_event_port(id, ref, port); + atomic_clear_mask(mask, &port->status); + if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) + atomic_set(&port->erp_counter, 0); + } + + if (common_mask) + list_for_each_entry(unit, &port->unit_list_head, list) + zfcp_erp_modify_unit_status(unit, id, ref, common_mask, + set_or_clear); } +/** + * zfcp_erp_modify_unit_status - change unit status bits + * @unit: unit to change the status bits + * @id: id for the debug trace + * @ref: reference for the debug trace + * @mask: status bits to change + * @set_or_clear: ZFCP_SET or ZFCP_CLEAR + */ +void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, u8 id, void *ref, + u32 mask, int set_or_clear) +{ + if (set_or_clear == ZFCP_SET) { + if (status_change_set(mask, &unit->status)) + zfcp_rec_dbf_event_unit(id, ref, unit); + atomic_set_mask(mask, &unit->status); + } else { + if (status_change_clear(mask, &unit->status)) + zfcp_rec_dbf_event_unit(id, ref, unit); + atomic_clear_mask(mask, &unit->status); + if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { + atomic_set(&unit->erp_counter, 0); + } + } +} + +/** + * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP + * @port: The "boxed" port. + * @id: The debug trace id. + * @id: Reference for the debug trace. + */ void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref) { unsigned long flags; @@ -3065,6 +1687,12 @@ void zfcp_erp_port_boxed(struct zfcp_port *port, u8 id, void *ref) zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } +/** + * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP + * @port: The "boxed" unit. + * @id: The debug trace id. + * @id: Reference for the debug trace. + */ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref) { zfcp_erp_modify_unit_status(unit, id, ref, @@ -3072,6 +1700,15 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, u8 id, void *ref) zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } +/** + * zfcp_erp_port_access_denied - Adapter denied access to port. + * @port: port where access has been denied + * @id: id for debug trace + * @ref: reference for debug trace + * + * Since the adapter has denied access, stop using the port and the + * attached units. + */ void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref) { unsigned long flags; @@ -3083,6 +1720,14 @@ void zfcp_erp_port_access_denied(struct zfcp_port *port, u8 id, void *ref) read_unlock_irqrestore(&zfcp_data.config_lock, flags); } +/** + * zfcp_erp_unit_access_denied - Adapter denied access to unit. + * @unit: unit where access has been denied + * @id: id for debug trace + * @ref: reference for debug trace + * + * Since the adapter has denied access, stop using the unit. + */ void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref) { zfcp_erp_modify_unit_status(unit, id, ref, @@ -3090,67 +1735,54 @@ void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, u8 id, void *ref) ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); } -void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id, - void *ref) +static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, + void *ref) { - struct zfcp_port *port; - unsigned long flags; - - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) + int status = atomic_read(&unit->status); + if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | + ZFCP_STATUS_COMMON_ACCESS_BOXED))) return; - read_lock_irqsave(&zfcp_data.config_lock, flags); - if (adapter->nameserver_port) - zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref); - list_for_each_entry(port, &adapter->port_list_head, list) - if (port != adapter->nameserver_port) - zfcp_erp_port_access_changed(port, id, ref); - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } -void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, void *ref) +static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id, + void *ref) { - struct zfcp_adapter *adapter = port->adapter; struct zfcp_unit *unit; + int status = atomic_read(&port->status); - if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, - &port->status) && - !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, - &port->status)) { - if (!atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) + if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | + ZFCP_STATUS_COMMON_ACCESS_BOXED))) { + if (!(status & ZFCP_STATUS_PORT_WKA)) list_for_each_entry(unit, &port->unit_list_head, list) zfcp_erp_unit_access_changed(unit, id, ref); return; } - ZFCP_LOG_NORMAL("reopen of port 0x%016Lx on adapter %s " - "(due to ACT update)\n", - port->wwpn, zfcp_get_busid_by_adapter(adapter)); - if (zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref)) - ZFCP_LOG_NORMAL("failed reopen of port" - "(adapter %s, wwpn=0x%016Lx)\n", - zfcp_get_busid_by_adapter(adapter), port->wwpn); + zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } -void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, u8 id, void *ref) +/** + * zfcp_erp_adapter_access_changed - Process change in adapter ACT + * @adapter: Adapter where the Access Control Table (ACT) changed + * @id: Id for debug trace + * @ref: Reference for debug trace + */ +void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id, + void *ref) { - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_port *port; + unsigned long flags; - if (!atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, - &unit->status) && - !atomic_test_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, - &unit->status)) + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) return; - ZFCP_LOG_NORMAL("reopen of unit 0x%016Lx on port 0x%016Lx " - " on adapter %s (due to ACT update)\n", - unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_adapter(adapter)); - if (zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref)) - ZFCP_LOG_NORMAL("failed reopen of unit (adapter %s, " - "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", - zfcp_get_busid_by_adapter(adapter), - unit->port->wwpn, unit->fcp_lun); + read_lock_irqsave(&zfcp_data.config_lock, flags); + if (adapter->nameserver_port) + zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref); + list_for_each_entry(port, &adapter->port_list_head, list) + if (port != adapter->nameserver_port) + zfcp_erp_port_access_changed(port, id, ref); + read_unlock_irqrestore(&zfcp_data.config_lock, flags); } - -#undef ZFCP_LOG_AREA diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 6abf178fda5d..edfdb21591f3 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -1,22 +1,9 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * External function declarations. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ #ifndef ZFCP_EXT_H @@ -24,172 +11,50 @@ #include "zfcp_def.h" -extern struct zfcp_data zfcp_data; - -/******************************** SYSFS *************************************/ -extern struct attribute_group *zfcp_driver_attr_groups[]; -extern int zfcp_sysfs_adapter_create_files(struct device *); -extern void zfcp_sysfs_adapter_remove_files(struct device *); -extern int zfcp_sysfs_port_create_files(struct device *, u32); -extern void zfcp_sysfs_port_remove_files(struct device *, u32); -extern int zfcp_sysfs_unit_create_files(struct device *); -extern void zfcp_sysfs_unit_remove_files(struct device *); -extern void zfcp_sysfs_port_release(struct device *); -extern void zfcp_sysfs_unit_release(struct device *); - -/**************************** CONFIGURATION *********************************/ -extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, fcp_lun_t); -extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, wwn_t); -extern struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *, u32); -struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); -extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); -extern int zfcp_adapter_debug_register(struct zfcp_adapter *); -extern void zfcp_adapter_dequeue(struct zfcp_adapter *); -extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *); -extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, - u32, u32); -extern void zfcp_port_dequeue(struct zfcp_port *); +/* zfcp_aux.c */ +extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, + fcp_lun_t); +extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, + wwn_t); +extern int zfcp_adapter_enqueue(struct ccw_device *); +extern void zfcp_adapter_dequeue(struct zfcp_adapter *); +extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32, + u32); +extern void zfcp_port_dequeue(struct zfcp_port *); extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t); -extern void zfcp_unit_dequeue(struct zfcp_unit *); - -/******************************* S/390 IO ************************************/ -extern int zfcp_ccw_register(void); - -extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); -extern int zfcp_qdio_allocate(struct zfcp_adapter *); -extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); -extern void zfcp_qdio_free_queues(struct zfcp_adapter *); -extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, - struct zfcp_fsf_req *); - -extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req - (struct zfcp_fsf_req *, int, int); -extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr - (struct zfcp_fsf_req *); -extern int zfcp_qdio_sbals_from_sg - (struct zfcp_fsf_req *, unsigned long, struct scatterlist *, int, int); -extern int zfcp_qdio_sbals_from_scsicmnd - (struct zfcp_fsf_req *, unsigned long, struct scsi_cmnd *); - - -/******************************** FSF ****************************************/ -extern int zfcp_fsf_open_port(struct zfcp_erp_action *); -extern int zfcp_fsf_close_port(struct zfcp_erp_action *); -extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); - -extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); -extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); - -extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); -extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *, - struct fsf_qtcb_bottom_config *); -extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); -extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *, - struct fsf_qtcb_bottom_port *); -extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **, - u32, u32, struct zfcp_sg_list *); -extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long); -extern void zfcp_erp_start_timer(struct zfcp_fsf_req *); -extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); -extern int zfcp_fsf_status_read(struct zfcp_adapter *, int); -extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *, - unsigned long *, struct zfcp_fsf_req **); -extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, - struct zfcp_erp_action *); -extern int zfcp_fsf_send_els(struct zfcp_send_els *); -extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, - struct zfcp_unit *, - struct scsi_cmnd *, int, int); -extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *); -extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *); -extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); -extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management( - struct zfcp_adapter *, struct zfcp_unit *, u8, int); -extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command( - unsigned long, struct zfcp_adapter *, struct zfcp_unit *, int); - -/******************************* FC/FCP **************************************/ -extern int zfcp_nameserver_enqueue(struct zfcp_adapter *); -extern int zfcp_ns_gid_pn_request(struct zfcp_erp_action *); -extern int zfcp_check_ct_response(struct ct_hdr *); -extern int zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *); -extern void zfcp_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); - -/******************************* SCSI ****************************************/ -extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); -extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); -extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); -extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); -extern void set_host_byte(int *, char); -extern void set_driver_byte(int *, char); -extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); -extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); - -extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *, - struct scsi_cmnd *, int); -extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int); -extern struct fc_function_template zfcp_transport_functions; - -/******************************** ERP ****************************************/ -extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *, - u32, int); -extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *); -extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *); -extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *); - -extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32, - int); -extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *); -extern int zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *); -extern int zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *); -extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *); -extern int zfcp_erp_port_reopen_all(struct zfcp_adapter *, int, u8, void *); - -extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32, - int); -extern int zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *); -extern int zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *); -extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *); - -extern int zfcp_erp_thread_setup(struct zfcp_adapter *); -extern int zfcp_erp_thread_kill(struct zfcp_adapter *); -extern int zfcp_erp_wait(struct zfcp_adapter *); -extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); - -extern int zfcp_test_link(struct zfcp_port *); - -extern void zfcp_erp_port_boxed(struct zfcp_port *, u8 id, void *ref); -extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8 id, void *ref); -extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8 id, void *ref); -extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8 id, void *ref); -extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *); -extern void zfcp_erp_port_access_changed(struct zfcp_port *, u8, void *); -extern void zfcp_erp_unit_access_changed(struct zfcp_unit *, u8, void *); - -/******************************** AUX ****************************************/ -extern void zfcp_rec_dbf_event_thread(u8 id, struct zfcp_adapter *adapter, - int lock); -extern void zfcp_rec_dbf_event_adapter(u8 id, void *ref, struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_port(u8 id, void *ref, struct zfcp_port *port); -extern void zfcp_rec_dbf_event_unit(u8 id, void *ref, struct zfcp_unit *unit); -extern void zfcp_rec_dbf_event_trigger(u8 id, void *ref, u8 want, u8 need, - void *action, struct zfcp_adapter *, +extern void zfcp_unit_dequeue(struct zfcp_unit *); +extern int zfcp_reqlist_isempty(struct zfcp_adapter *); +extern void zfcp_sg_free_table(struct scatterlist *, int); +extern int zfcp_sg_setup_table(struct scatterlist *, int); + +/* zfcp_ccw.c */ +extern int zfcp_ccw_register(void); + +/* zfcp_cfdc.c */ +extern struct miscdevice zfcp_cfdc_misc; + +/* zfcp_dbf.c */ +extern int zfcp_adapter_debug_register(struct zfcp_adapter *); +extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *); +extern void zfcp_rec_dbf_event_thread(u8, struct zfcp_adapter *); +extern void zfcp_rec_dbf_event_thread_lock(u8, struct zfcp_adapter *); +extern void zfcp_rec_dbf_event_adapter(u8, void *, struct zfcp_adapter *); +extern void zfcp_rec_dbf_event_port(u8, void *, struct zfcp_port *); +extern void zfcp_rec_dbf_event_unit(u8, void *, struct zfcp_unit *); +extern void zfcp_rec_dbf_event_trigger(u8, void *, u8, u8, void *, + struct zfcp_adapter *, struct zfcp_port *, struct zfcp_unit *); -extern void zfcp_rec_dbf_event_action(u8 id, struct zfcp_erp_action *); - +extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *); extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, struct fsf_status_read_buffer *); -extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, - unsigned int, unsigned int, unsigned int, - int, int); - +extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, + int); extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); - extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, struct scsi_cmnd *, struct zfcp_fsf_req *); @@ -198,6 +63,101 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, unsigned long); extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, struct scsi_cmnd *); -extern int zfcp_reqlist_isempty(struct zfcp_adapter *); + +/* zfcp_erp.c */ +extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u8, void *, + u32, int); +extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, u8, void *); +extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, u8, void *); +extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, u8, void *); +extern void zfcp_erp_modify_port_status(struct zfcp_port *, u8, void *, u32, + int); +extern int zfcp_erp_port_reopen(struct zfcp_port *, int, u8, void *); +extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, u8, void *); +extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, u8, void *); +extern void zfcp_erp_port_failed(struct zfcp_port *, u8, void *); +extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, u8, void *, u32, + int); +extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, u8, void *); +extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, u8, void *); +extern void zfcp_erp_unit_failed(struct zfcp_unit *, u8, void *); +extern int zfcp_erp_thread_setup(struct zfcp_adapter *); +extern void zfcp_erp_thread_kill(struct zfcp_adapter *); +extern void zfcp_erp_wait(struct zfcp_adapter *); +extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); +extern void zfcp_erp_port_boxed(struct zfcp_port *, u8, void *); +extern void zfcp_erp_unit_boxed(struct zfcp_unit *, u8, void *); +extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *); +extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *); +extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *); +extern void zfcp_erp_timeout_handler(unsigned long); + +/* zfcp_fc.c */ +extern int zfcp_scan_ports(struct zfcp_adapter *); +extern void _zfcp_scan_ports_later(struct work_struct *); +extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); +extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *); +extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); +extern void zfcp_test_link(struct zfcp_port *); + +/* zfcp_fsf.c */ +extern int zfcp_fsf_open_port(struct zfcp_erp_action *); +extern int zfcp_fsf_close_port(struct zfcp_erp_action *); +extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); +extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); +extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); +extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); +extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *, + struct fsf_qtcb_bottom_config *); +extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); +extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *, + struct fsf_qtcb_bottom_port *); +extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, + struct zfcp_fsf_cfdc *); +extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); +extern int zfcp_fsf_status_read(struct zfcp_adapter *); +extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); +extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, + struct zfcp_erp_action *); +extern int zfcp_fsf_send_els(struct zfcp_send_els *); +extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, + struct zfcp_unit *, + struct scsi_cmnd *, int, int); +extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *); +extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); +extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *, + struct zfcp_unit *, u8, int); +extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, + struct zfcp_adapter *, + struct zfcp_unit *, int); + +/* zfcp_qdio.c */ +extern int zfcp_qdio_allocate(struct zfcp_adapter *); +extern void zfcp_qdio_free(struct zfcp_adapter *); +extern int zfcp_qdio_send(struct zfcp_fsf_req *); +extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req( + struct zfcp_fsf_req *); +extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr( + struct zfcp_fsf_req *); +extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, + struct scatterlist *, int); +extern int zfcp_qdio_open(struct zfcp_adapter *); +extern void zfcp_qdio_close(struct zfcp_adapter *); + +/* zfcp_scsi.c */ +extern struct zfcp_data zfcp_data; +extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); +extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); +extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); +extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); +extern struct fc_function_template zfcp_transport_functions; + +/* zfcp_sysfs.c */ +extern struct attribute_group zfcp_sysfs_unit_attrs; +extern struct attribute_group zfcp_sysfs_adapter_attrs; +extern struct attribute_group zfcp_sysfs_ns_port_attrs; +extern struct attribute_group zfcp_sysfs_port_attrs; +extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; +extern struct device_attribute *zfcp_sysfs_shost_attrs[]; #endif /* ZFCP_EXT_H */ diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c new file mode 100644 index 000000000000..e984469bb98b --- /dev/null +++ b/drivers/s390/scsi/zfcp_fc.c @@ -0,0 +1,567 @@ +/* + * zfcp device driver + * + * Fibre Channel related functions for the zfcp device driver. + * + * Copyright IBM Corporation 2008 + */ + +#include "zfcp_ext.h" + +struct ct_iu_gpn_ft_req { + struct ct_hdr header; + u8 flags; + u8 domain_id_scope; + u8 area_id_scope; + u8 fc4_type; +} __attribute__ ((packed)); + +struct gpn_ft_resp_acc { + u8 control; + u8 port_id[3]; + u8 reserved[4]; + u64 wwpn; +} __attribute__ ((packed)); + +#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \ + / sizeof(struct gpn_ft_resp_acc)) +#define ZFCP_GPN_FT_BUFFERS 4 +#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1) + +struct ct_iu_gpn_ft_resp { + struct ct_hdr header; + struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES]; +} __attribute__ ((packed)); + +struct zfcp_gpn_ft { + struct zfcp_send_ct ct; + struct scatterlist sg_req; + struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS]; +}; + +static struct zfcp_port *zfcp_get_port_by_did(struct zfcp_adapter *adapter, + u32 d_id) +{ + struct zfcp_port *port; + + list_for_each_entry(port, &adapter->port_list_head, list) + if ((port->d_id == d_id) && + !atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) + return port; + return NULL; +} + +static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, + struct fcp_rscn_element *elem) +{ + unsigned long flags; + struct zfcp_port *port; + + read_lock_irqsave(&zfcp_data.config_lock, flags); + list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { + if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status)) + continue; + /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ + if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) + /* Try to connect to unused ports anyway. */ + zfcp_erp_port_reopen(port, + ZFCP_STATUS_COMMON_ERP_FAILED, + 82, fsf_req); + else if ((port->d_id & range) == (elem->nport_did & range)) + /* Check connection status for connected ports */ + zfcp_test_link(port); + } + read_unlock_irqrestore(&zfcp_data.config_lock, flags); +} + +static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) +{ + struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; + struct fcp_rscn_head *fcp_rscn_head; + struct fcp_rscn_element *fcp_rscn_element; + u16 i; + u16 no_entries; + u32 range_mask; + + fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data; + fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head; + + /* see FC-FS */ + no_entries = fcp_rscn_head->payload_len / + sizeof(struct fcp_rscn_element); + + for (i = 1; i < no_entries; i++) { + /* skip head and start with 1st element */ + fcp_rscn_element++; + switch (fcp_rscn_element->addr_format) { + case ZFCP_PORT_ADDRESS: + range_mask = ZFCP_PORTS_RANGE_PORT; + break; + case ZFCP_AREA_ADDRESS: + range_mask = ZFCP_PORTS_RANGE_AREA; + break; + case ZFCP_DOMAIN_ADDRESS: + range_mask = ZFCP_PORTS_RANGE_DOMAIN; + break; + case ZFCP_FABRIC_ADDRESS: + range_mask = ZFCP_PORTS_RANGE_FABRIC; + break; + default: + continue; + } + _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element); + } + schedule_work(&fsf_req->adapter->scan_work); +} + +static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn) +{ + struct zfcp_adapter *adapter = req->adapter; + struct zfcp_port *port; + unsigned long flags; + + read_lock_irqsave(&zfcp_data.config_lock, flags); + list_for_each_entry(port, &adapter->port_list_head, list) + if (port->wwpn == wwpn) + break; + read_unlock_irqrestore(&zfcp_data.config_lock, flags); + + if (port && (port->wwpn == wwpn)) + zfcp_erp_port_forced_reopen(port, 0, 83, req); +} + +static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) +{ + struct fsf_status_read_buffer *status_buffer = + (struct fsf_status_read_buffer *)req->data; + struct fsf_plogi *els_plogi = + (struct fsf_plogi *) status_buffer->payload.data; + + zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn); +} + +static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req) +{ + struct fsf_status_read_buffer *status_buffer = + (struct fsf_status_read_buffer *)req->data; + struct fcp_logo *els_logo = + (struct fcp_logo *) status_buffer->payload.data; + + zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn); +} + +/** + * zfcp_fc_incoming_els - handle incoming ELS + * @fsf_req - request which contains incoming ELS + */ +void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) +{ + struct fsf_status_read_buffer *status_buffer = + (struct fsf_status_read_buffer *) fsf_req->data; + unsigned int els_type = status_buffer->payload.data[0]; + + zfcp_san_dbf_event_incoming_els(fsf_req); + if (els_type == LS_PLOGI) + zfcp_fc_incoming_plogi(fsf_req); + else if (els_type == LS_LOGO) + zfcp_fc_incoming_logo(fsf_req); + else if (els_type == LS_RSCN) + zfcp_fc_incoming_rscn(fsf_req); +} + +static void zfcp_ns_gid_pn_handler(unsigned long data) +{ + struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data; + struct zfcp_send_ct *ct = &gid_pn->ct; + struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req); + struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp); + struct zfcp_port *port = gid_pn->port; + + if (ct->status) + goto out; + if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) { + atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status); + goto out; + } + /* paranoia */ + if (ct_iu_req->wwpn != port->wwpn) + goto out; + /* looks like a valid d_id */ + port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; + atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); +out: + mempool_free(gid_pn, port->adapter->pool.data_gid_pn); +} + +/** + * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request + * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed + * return: -ENOMEM on error, 0 otherwise + */ +int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action) +{ + int ret; + struct zfcp_gid_pn_data *gid_pn; + struct zfcp_adapter *adapter = erp_action->adapter; + + gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); + if (!gid_pn) + return -ENOMEM; + + memset(gid_pn, 0, sizeof(*gid_pn)); + + /* setup parameters for send generic command */ + gid_pn->port = erp_action->port; + gid_pn->ct.port = adapter->nameserver_port; + gid_pn->ct.handler = zfcp_ns_gid_pn_handler; + gid_pn->ct.handler_data = (unsigned long) gid_pn; + gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; + gid_pn->ct.req = &gid_pn->req; + gid_pn->ct.resp = &gid_pn->resp; + gid_pn->ct.req_count = 1; + gid_pn->ct.resp_count = 1; + sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, + sizeof(struct ct_iu_gid_pn_req)); + sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp, + sizeof(struct ct_iu_gid_pn_resp)); + + /* setup nameserver request */ + gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION; + gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; + gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER; + gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; + gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; + gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE; + gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; + + ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, + erp_action); + if (ret) + mempool_free(gid_pn, adapter->pool.data_gid_pn); + return ret; +} + +/** + * zfcp_fc_plogi_evaluate - evaluate PLOGI playload + * @port: zfcp_port structure + * @plogi: plogi payload + * + * Evaluate PLOGI playload and copy important fields into zfcp_port structure + */ +void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi) +{ + port->maxframe_size = plogi->serv_param.common_serv_param[7] | + ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8); + if (plogi->serv_param.class1_serv_param[0] & 0x80) + port->supported_classes |= FC_COS_CLASS1; + if (plogi->serv_param.class2_serv_param[0] & 0x80) + port->supported_classes |= FC_COS_CLASS2; + if (plogi->serv_param.class3_serv_param[0] & 0x80) + port->supported_classes |= FC_COS_CLASS3; + if (plogi->serv_param.class4_serv_param[0] & 0x80) + port->supported_classes |= FC_COS_CLASS4; +} + +struct zfcp_els_adisc { + struct zfcp_send_els els; + struct scatterlist req; + struct scatterlist resp; + struct zfcp_ls_adisc ls_adisc; + struct zfcp_ls_adisc_acc ls_adisc_acc; +}; + +static void zfcp_fc_adisc_handler(unsigned long data) +{ + struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; + struct zfcp_port *port = adisc->els.port; + struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc; + + if (adisc->els.status) { + /* request rejected or timed out */ + zfcp_erp_port_forced_reopen(port, 0, 63, NULL); + goto out; + } + + if (!port->wwnn) + port->wwnn = ls_adisc->wwnn; + + if (port->wwpn != ls_adisc->wwpn) + zfcp_erp_port_reopen(port, 0, 64, NULL); + + out: + zfcp_port_put(port); + kfree(adisc); +} + +static int zfcp_fc_adisc(struct zfcp_port *port) +{ + struct zfcp_els_adisc *adisc; + struct zfcp_adapter *adapter = port->adapter; + + adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC); + if (!adisc) + return -ENOMEM; + + adisc->els.req = &adisc->req; + adisc->els.resp = &adisc->resp; + sg_init_one(adisc->els.req, &adisc->ls_adisc, + sizeof(struct zfcp_ls_adisc)); + sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, + sizeof(struct zfcp_ls_adisc_acc)); + + adisc->els.req_count = 1; + adisc->els.resp_count = 1; + adisc->els.adapter = adapter; + adisc->els.port = port; + adisc->els.d_id = port->d_id; + adisc->els.handler = zfcp_fc_adisc_handler; + adisc->els.handler_data = (unsigned long) adisc; + adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC; + + /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports + without FC-AL-2 capability, so we don't set it */ + adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host); + adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host); + adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host); + + return zfcp_fsf_send_els(&adisc->els); +} + +/** + * zfcp_test_link - lightweight link test procedure + * @port: port to be tested + * + * Test status of a link to a remote port using the ELS command ADISC. + * If there is a problem with the remote port, error recovery steps + * will be triggered. + */ +void zfcp_test_link(struct zfcp_port *port) +{ + int retval; + + zfcp_port_get(port); + retval = zfcp_fc_adisc(port); + if (retval == 0 || retval == -EBUSY) + return; + + /* send of ADISC was not possible */ + zfcp_port_put(port); + zfcp_erp_port_forced_reopen(port, 0, 65, NULL); +} + +static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter) +{ + int ret; + + if (!adapter->nameserver_port) + return -EINTR; + + if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, + &adapter->nameserver_port->status)) { + ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148, + NULL); + if (ret) + return ret; + zfcp_erp_wait(adapter); + zfcp_port_put(adapter->nameserver_port); + } + return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, + &adapter->nameserver_port->status); +} + +static void zfcp_gpn_ft_handler(unsigned long _done) +{ + complete((struct completion *)_done); +} + +static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft) +{ + struct scatterlist *sg = &gpn_ft->sg_req; + + kfree(sg_virt(sg)); /* free request buffer */ + zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS); + + kfree(gpn_ft); +} + +static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void) +{ + struct zfcp_gpn_ft *gpn_ft; + struct ct_iu_gpn_ft_req *req; + + gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); + if (!gpn_ft) + return NULL; + + req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL); + if (!req) { + kfree(gpn_ft); + gpn_ft = NULL; + goto out; + } + sg_init_one(&gpn_ft->sg_req, req, sizeof(*req)); + + if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) { + zfcp_free_sg_env(gpn_ft); + gpn_ft = NULL; + } +out: + return gpn_ft; +} + + +static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, + struct zfcp_adapter *adapter) +{ + struct zfcp_send_ct *ct = &gpn_ft->ct; + struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); + struct completion done; + int ret; + + /* prepare CT IU for GPN_FT */ + req->header.revision = ZFCP_CT_REVISION; + req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; + req->header.gs_subtype = ZFCP_CT_NAME_SERVER; + req->header.options = ZFCP_CT_SYNCHRONOUS; + req->header.cmd_rsp_code = ZFCP_CT_GPN_FT; + req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) * + (ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2; + req->flags = 0; + req->domain_id_scope = 0; + req->area_id_scope = 0; + req->fc4_type = ZFCP_CT_SCSI_FCP; + + /* prepare zfcp_send_ct */ + ct->port = adapter->nameserver_port; + ct->handler = zfcp_gpn_ft_handler; + ct->handler_data = (unsigned long)&done; + ct->timeout = 10; + ct->req = &gpn_ft->sg_req; + ct->resp = gpn_ft->sg_resp; + ct->req_count = 1; + ct->resp_count = ZFCP_GPN_FT_BUFFERS; + + init_completion(&done); + ret = zfcp_fsf_send_ct(ct, NULL, NULL); + if (!ret) + wait_for_completion(&done); + return ret; +} + +static void zfcp_validate_port(struct zfcp_port *port) +{ + struct zfcp_adapter *adapter = port->adapter; + + atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); + + if (port == adapter->nameserver_port) + return; + if ((port->supported_classes != 0) || (port->units != 0)) { + zfcp_port_put(port); + return; + } + zfcp_erp_port_shutdown(port, 0, 151, NULL); + zfcp_erp_wait(adapter); + zfcp_port_put(port); + zfcp_port_dequeue(port); +} + +static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft) +{ + struct zfcp_send_ct *ct = &gpn_ft->ct; + struct scatterlist *sg = gpn_ft->sg_resp; + struct ct_hdr *hdr = sg_virt(sg); + struct gpn_ft_resp_acc *acc = sg_virt(sg); + struct zfcp_adapter *adapter = ct->port->adapter; + struct zfcp_port *port, *tmp; + u32 d_id; + int ret = 0, x; + + if (ct->status) + return -EIO; + + if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) { + if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD) + return -EAGAIN; /* might be a temporary condition */ + return -EIO; + } + + if (hdr->max_res_size) + return -E2BIG; + + down(&zfcp_data.config_sema); + + /* first entry is the header */ + for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES; x++) { + if (x % (ZFCP_GPN_FT_ENTRIES + 1)) + acc++; + else + acc = sg_virt(++sg); + + d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | + acc->port_id[2]; + + /* skip the adapter's port and known remote ports */ + if (acc->wwpn == fc_host_port_name(adapter->scsi_host) || + zfcp_get_port_by_did(adapter, d_id)) + continue; + + port = zfcp_port_enqueue(adapter, acc->wwpn, + ZFCP_STATUS_PORT_DID_DID | + ZFCP_STATUS_COMMON_NOESC, d_id); + if (IS_ERR(port)) + ret = PTR_ERR(port); + else + zfcp_erp_port_reopen(port, 0, 149, NULL); + if (acc->control & 0x80) /* last entry */ + break; + } + + zfcp_erp_wait(adapter); + list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) + zfcp_validate_port(port); + up(&zfcp_data.config_sema); + return ret; +} + +/** + * zfcp_scan_ports - scan remote ports and attach new ports + * @adapter: pointer to struct zfcp_adapter + */ +int zfcp_scan_ports(struct zfcp_adapter *adapter) +{ + int ret, i; + struct zfcp_gpn_ft *gpn_ft; + + zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */ + if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) + return 0; + + ret = zfcp_scan_get_nameserver(adapter); + if (ret) + return ret; + + gpn_ft = zfcp_alloc_sg_env(); + if (!gpn_ft) + return -ENOMEM; + + for (i = 0; i < 3; i++) { + ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter); + if (!ret) { + ret = zfcp_scan_eval_gpn_ft(gpn_ft); + if (ret == -EAGAIN) + ssleep(1); + else + break; + } + } + zfcp_free_sg_env(gpn_ft); + + return ret; +} + + +void _zfcp_scan_ports_later(struct work_struct *work) +{ + zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); +} diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index b2ea4ea051f5..19c1ca913874 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1,54 +1,37 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Implementation of FSF commands. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ #include "zfcp_ext.h" -static int zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *); -static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_open_port_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_close_port_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_send_fcp_command_task_management_handler( - struct zfcp_fsf_req *); -static int zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_status_read_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *); -static int zfcp_fsf_control_file_handler(struct zfcp_fsf_req *); -static inline int zfcp_fsf_req_sbal_check( - unsigned long *, struct zfcp_qdio_queue *, int); -static inline int zfcp_use_one_sbal( - struct scatterlist *, int, struct scatterlist *, int); -static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int); -static int zfcp_fsf_req_send(struct zfcp_fsf_req *); -static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); -static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); -static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); -static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *, u8, - struct fsf_link_down_info *); -static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); +static void zfcp_fsf_request_timeout_handler(unsigned long data) +{ + struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; + zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 62, + NULL); +} + +static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, + unsigned long timeout) +{ + fsf_req->timer.function = zfcp_fsf_request_timeout_handler; + fsf_req->timer.data = (unsigned long) fsf_req->adapter; + fsf_req->timer.expires = jiffies + timeout; + add_timer(&fsf_req->timer); +} + +static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) +{ + BUG_ON(!fsf_req->erp_action); + fsf_req->timer.function = zfcp_erp_timeout_handler; + fsf_req->timer.data = (unsigned long) fsf_req->erp_action; + fsf_req->timer.expires = jiffies + 30 * HZ; + add_timer(&fsf_req->timer); +} /* association between FSF command and FSF QTCB type */ static u32 fsf_qtcb_type[] = { @@ -67,96 +50,77 @@ static u32 fsf_qtcb_type[] = { [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND }; -static const char zfcp_act_subtable_type[5][8] = { +static const char *zfcp_act_subtable_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; -/****************************************************************/ -/*************** FSF related Functions *************************/ -/****************************************************************/ - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF - -/* - * function: zfcp_fsf_req_alloc - * - * purpose: Obtains an fsf_req and potentially a qtcb (for all but - * unsolicited requests) via helper functions - * Does some initial fsf request set-up. - * - * returns: pointer to allocated fsf_req if successfull - * NULL otherwise - * - * locks: none - * - */ -static struct zfcp_fsf_req * -zfcp_fsf_req_alloc(mempool_t *pool, int req_flags) +static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) { - size_t size; - void *ptr; - struct zfcp_fsf_req *fsf_req = NULL; + u16 subtable = table >> 16; + u16 rule = table & 0xffff; - if (req_flags & ZFCP_REQ_NO_QTCB) - size = sizeof(struct zfcp_fsf_req); - else - size = sizeof(struct zfcp_fsf_req_qtcb); - - if (likely(pool)) - ptr = mempool_alloc(pool, GFP_ATOMIC); - else { - if (req_flags & ZFCP_REQ_NO_QTCB) - ptr = kmalloc(size, GFP_ATOMIC); - else - ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, - GFP_ATOMIC); - } - - if (unlikely(!ptr)) - goto out; - - memset(ptr, 0, size); + if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type)) + dev_warn(&adapter->ccw_device->dev, + "Access denied in subtable %s, rule %d.\n", + zfcp_act_subtable_type[subtable], rule); +} - if (req_flags & ZFCP_REQ_NO_QTCB) { - fsf_req = (struct zfcp_fsf_req *) ptr; - } else { - fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req; - fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb; - } +static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, + struct zfcp_port *port) +{ + struct fsf_qtcb_header *header = &req->qtcb->header; + dev_warn(&req->adapter->ccw_device->dev, + "Access denied, cannot send command to port 0x%016Lx.\n", + port->wwpn); + zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); + zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); + zfcp_erp_port_access_denied(port, 55, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; +} - fsf_req->pool = pool; +static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req, + struct zfcp_unit *unit) +{ + struct fsf_qtcb_header *header = &req->qtcb->header; + dev_warn(&req->adapter->ccw_device->dev, + "Access denied for unit 0x%016Lx on port 0x%016Lx.\n", + unit->fcp_lun, unit->port->wwpn); + zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); + zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); + zfcp_erp_unit_access_denied(unit, 59, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; +} - out: - return fsf_req; +static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) +{ + dev_err(&req->adapter->ccw_device->dev, + "Required FC class not supported by adapter, " + "shutting down adapter.\n"); + zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; } -/* - * function: zfcp_fsf_req_free - * - * purpose: Frees the memory of an fsf_req (and potentially a qtcb) or - * returns it into the pool via helper functions. - * - * returns: sod all - * - * locks: none +/** + * zfcp_fsf_req_free - free memory used by fsf request + * @fsf_req: pointer to struct zfcp_fsf_req */ -void -zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) +void zfcp_fsf_req_free(struct zfcp_fsf_req *req) { - if (likely(fsf_req->pool)) { - mempool_free(fsf_req, fsf_req->pool); + if (likely(req->pool)) { + mempool_free(req, req->pool); return; } - if (fsf_req->qtcb) { - kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req); + if (req->qtcb) { + kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req); return; } - - kfree(fsf_req); } -/* +/** + * zfcp_fsf_req_dismiss_all - dismiss all fsf requests + * @adapter: pointer to struct zfcp_adapter + * * Never ever call this without shutting down the adapter first. * Otherwise the adapter would continue using and corrupting s390 storage. * Included BUG_ON() call to ensure this is done. @@ -164,2353 +128,1359 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) */ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) { - struct zfcp_fsf_req *fsf_req, *tmp; + struct zfcp_fsf_req *req, *tmp; unsigned long flags; LIST_HEAD(remove_queue); unsigned int i; - BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)); + BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); spin_lock_irqsave(&adapter->req_list_lock, flags); - atomic_set(&adapter->reqs_active, 0); for (i = 0; i < REQUEST_LIST_SIZE; i++) list_splice_init(&adapter->req_list[i], &remove_queue); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { - list_del(&fsf_req->list); - fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_fsf_req_complete(fsf_req); + list_for_each_entry_safe(req, tmp, &remove_queue, list) { + list_del(&req->list); + req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; + zfcp_fsf_req_complete(req); } } -/* - * function: zfcp_fsf_req_complete - * - * purpose: Updates active counts and timers for openfcp-reqs - * May cleanup request after req_eval returns - * - * returns: 0 - success - * !0 - failure - * - * context: - */ -int -zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) { - int retval = 0; - int cleanup; - - if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { - ZFCP_LOG_DEBUG("Status read response received\n"); - /* - * Note: all cleanup handling is done in the callchain of - * the function call-chain below. - */ - zfcp_fsf_status_read_handler(fsf_req); - goto out; - } else { - del_timer(&fsf_req->timer); - zfcp_fsf_protstatus_eval(fsf_req); - } - - /* - * fsf_req may be deleted due to waking up functions, so - * cleanup is saved here and used later - */ - if (likely(fsf_req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) - cleanup = 1; - else - cleanup = 0; - - fsf_req->status |= ZFCP_STATUS_FSFREQ_COMPLETED; + struct fsf_status_read_buffer *sr_buf = req->data; + struct zfcp_adapter *adapter = req->adapter; + struct zfcp_port *port; + int d_id = sr_buf->d_id & ZFCP_DID_MASK; + unsigned long flags; - /* cleanup request if requested by initiator */ - if (likely(cleanup)) { - ZFCP_LOG_TRACE("removing FSF request %p\n", fsf_req); - /* - * lock must not be held here since it will be - * grabed by the called routine, too - */ - zfcp_fsf_req_free(fsf_req); - } else { - /* notify initiator waiting for the requests completion */ - ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req); - /* - * FIXME: Race! We must not access fsf_req here as it might have been - * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED - * flag. It's an improbable case. But, we have the same paranoia for - * the cleanup flag already. - * Might better be handled using complete()? - * (setting the flag and doing wakeup ought to be atomic - * with regard to checking the flag as long as waitqueue is - * part of the to be released structure) - */ - wake_up(&fsf_req->completion_wq); - } + read_lock_irqsave(&zfcp_data.config_lock, flags); + list_for_each_entry(port, &adapter->port_list_head, list) + if (port->d_id == d_id) { + read_unlock_irqrestore(&zfcp_data.config_lock, flags); + switch (sr_buf->status_subtype) { + case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT: + zfcp_erp_port_reopen(port, 0, 101, req); + break; + case FSF_STATUS_READ_SUB_ERROR_PORT: + zfcp_erp_port_shutdown(port, 0, 122, req); + break; + } + return; + } + read_unlock_irqrestore(&zfcp_data.config_lock, flags); +} - out: - return retval; +static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req) +{ + struct zfcp_adapter *adapter = req->adapter; + struct fsf_status_read_buffer *sr_buf = req->data; + struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; + + dev_warn(&adapter->ccw_device->dev, + "Warning: bit error threshold data " + "received for the adapter: " + "link failures = %i, loss of sync errors = %i, " + "loss of signal errors = %i, " + "primitive sequence errors = %i, " + "invalid transmission word errors = %i, " + "CRC errors = %i).\n", + err->link_failure_error_count, + err->loss_of_sync_error_count, + err->loss_of_signal_error_count, + err->primitive_sequence_error_count, + err->invalid_transmission_word_error_count, + err->crc_error_count); + dev_warn(&adapter->ccw_device->dev, + "Additional bit error threshold data of the adapter: " + "primitive sequence event time-outs = %i, " + "elastic buffer overrun errors = %i, " + "advertised receive buffer-to-buffer credit = %i, " + "current receice buffer-to-buffer credit = %i, " + "advertised transmit buffer-to-buffer credit = %i, " + "current transmit buffer-to-buffer credit = %i).\n", + err->primitive_sequence_event_timeout_count, + err->elastic_buffer_overrun_error_count, + err->advertised_receive_b2b_credit, + err->current_receive_b2b_credit, + err->advertised_transmit_b2b_credit, + err->current_transmit_b2b_credit); } -/* - * function: zfcp_fsf_protstatus_eval - * - * purpose: evaluates the QTCB of the finished FSF request - * and initiates appropriate actions - * (usually calling FSF command specific handlers) - * - * returns: - * - * context: - * - * locks: - */ -static int -zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id, + struct fsf_link_down_info *link_down) { - int retval = 0; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fsf_qtcb *qtcb = fsf_req->qtcb; - union fsf_prot_status_qual *prot_status_qual = - &qtcb->prefix.prot_status_qual; - - zfcp_hba_dbf_event_fsf_response(fsf_req); - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { - ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n", - (unsigned long) fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */ - goto skip_protstatus; - } + struct zfcp_adapter *adapter = req->adapter; - /* evaluate FSF Protocol Status */ - switch (qtcb->prefix.prot_status) { + if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) + return; - case FSF_PROT_GOOD: - case FSF_PROT_FSF_STATUS_PRESENTED: - break; + atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); - case FSF_PROT_QTCB_VERSION_ERROR: - ZFCP_LOG_NORMAL("error: The adapter %s contains " - "microcode of version 0x%x, the device driver " - "only supports 0x%x. Aborting.\n", - zfcp_get_busid_by_adapter(adapter), - prot_status_qual->version_error.fsf_version, - ZFCP_QTCB_VERSION); - zfcp_erp_adapter_shutdown(adapter, 0, 117, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; + if (!link_down) + goto out; - case FSF_PROT_SEQ_NUMB_ERROR: - ZFCP_LOG_NORMAL("bug: Sequence number mismatch between " - "driver (0x%x) and adapter %s (0x%x). " - "Restarting all operations on this adapter.\n", - qtcb->prefix.req_seq_no, - zfcp_get_busid_by_adapter(adapter), - prot_status_qual->sequence_error.exp_req_seq_no); - zfcp_erp_adapter_reopen(adapter, 0, 98, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + switch (link_down->error_code) { + case FSF_PSQ_LINK_NO_LIGHT: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: no light detected.\n"); break; - - case FSF_PROT_UNSUPP_QTCB_TYPE: - ZFCP_LOG_NORMAL("error: Packet header type used by the " - "device driver is incompatible with " - "that used on adapter %s. " - "Stopping all operations on this adapter.\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_shutdown(adapter, 0, 118, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + case FSF_PSQ_LINK_WRAP_PLUG: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: wrap plug detected.\n"); break; - - case FSF_PROT_HOST_CONNECTION_INITIALIZING: - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, - &(adapter->status)); + case FSF_PSQ_LINK_NO_FCP: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: " + "adjacent node on link does not support FCP.\n"); break; - - case FSF_PROT_DUPLICATE_REQUEST_ID: - ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx " - "to the adapter %s is ambiguous. " - "Stopping all operations on this adapter.\n", - *(unsigned long long*) - (&qtcb->bottom.support.req_handle), - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_shutdown(adapter, 0, 78, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + case FSF_PSQ_LINK_FIRMWARE_UPDATE: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: " + "firmware update in progress.\n"); break; - - case FSF_PROT_LINK_DOWN: - zfcp_fsf_link_down_info_eval(fsf_req, 37, - &prot_status_qual->link_down_info); - /* FIXME: reopening adapter now? better wait for link up */ - zfcp_erp_adapter_reopen(adapter, 0, 79, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + case FSF_PSQ_LINK_INVALID_WWPN: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: " + "duplicate or invalid WWPN detected.\n"); break; - - case FSF_PROT_REEST_QUEUE: - ZFCP_LOG_NORMAL("The local link to adapter with " - "%s was re-plugged. " - "Re-starting operations on this adapter.\n", - zfcp_get_busid_by_adapter(adapter)); - /* All ports should be marked as ready to run again */ - zfcp_erp_modify_adapter_status(adapter, 28, NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); - zfcp_erp_adapter_reopen(adapter, - ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED - | ZFCP_STATUS_COMMON_ERP_FAILED, - 99, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + case FSF_PSQ_LINK_NO_NPIV_SUPPORT: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: " + "no support for NPIV by Fabric.\n"); break; - - case FSF_PROT_ERROR_STATE: - ZFCP_LOG_NORMAL("error: The adapter %s " - "has entered the error state. " - "Restarting all operations on this " - "adapter.\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_reopen(adapter, 0, 100, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + case FSF_PSQ_LINK_NO_FCP_RESOURCES: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: " + "out of resource in FCP daughtercard.\n"); + break; + case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: " + "out of resource in Fabric.\n"); + break; + case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: + dev_warn(&req->adapter->ccw_device->dev, + "The local link is down: " + "unable to login to Fabric.\n"); + break; + case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: + dev_warn(&req->adapter->ccw_device->dev, + "WWPN assignment file corrupted on adapter.\n"); + break; + case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: + dev_warn(&req->adapter->ccw_device->dev, + "Mode table corrupted on adapter.\n"); + break; + case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: + dev_warn(&req->adapter->ccw_device->dev, + "No WWPN for assignment table on adapter.\n"); break; - default: - ZFCP_LOG_NORMAL("bug: Transfer protocol status information " - "provided by the adapter %s " - "is not compatible with the device driver. " - "Stopping all operations on this adapter. " - "(debug info 0x%x).\n", - zfcp_get_busid_by_adapter(adapter), - qtcb->prefix.prot_status); - zfcp_erp_adapter_shutdown(adapter, 0, 119, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + dev_warn(&req->adapter->ccw_device->dev, + "The local link to adapter is down.\n"); } +out: + zfcp_erp_adapter_failed(adapter, id, req); +} - skip_protstatus: - /* - * always call specific handlers to give them a chance to do - * something meaningful even in error cases - */ - zfcp_fsf_fsfstatus_eval(fsf_req); - return retval; +static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) +{ + struct zfcp_adapter *adapter = req->adapter; + struct fsf_status_read_buffer *sr_buf = req->data; + struct fsf_link_down_info *ldi = + (struct fsf_link_down_info *) &sr_buf->payload; + + switch (sr_buf->status_subtype) { + case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: + dev_warn(&adapter->ccw_device->dev, + "Physical link is down.\n"); + zfcp_fsf_link_down_info_eval(req, 38, ldi); + break; + case FSF_STATUS_READ_SUB_FDISC_FAILED: + dev_warn(&adapter->ccw_device->dev, + "Local link is down " + "due to failed FDISC login.\n"); + zfcp_fsf_link_down_info_eval(req, 39, ldi); + break; + case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: + dev_warn(&adapter->ccw_device->dev, + "Local link is down " + "due to firmware update on adapter.\n"); + zfcp_fsf_link_down_info_eval(req, 40, NULL); + }; } -/* - * function: zfcp_fsf_fsfstatus_eval - * - * purpose: evaluates FSF status of completed FSF request - * and acts accordingly - * - * returns: - */ -static int -zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) { - int retval = 0; + struct zfcp_adapter *adapter = req->adapter; + struct fsf_status_read_buffer *sr_buf = req->data; - if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { - goto skip_fsfstatus; + if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { + zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); + mempool_free(sr_buf, adapter->pool.data_status_read); + zfcp_fsf_req_free(req); + return; } - /* evaluate FSF Status */ - switch (fsf_req->qtcb->header.fsf_status) { - case FSF_UNKNOWN_COMMAND: - ZFCP_LOG_NORMAL("bug: Command issued by the device driver is " - "not known by the adapter %s " - "Stopping all operations on this adapter. " - "(debug info 0x%x).\n", - zfcp_get_busid_by_adapter(fsf_req->adapter), - fsf_req->qtcb->header.fsf_command); - zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 120, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; + zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf); - case FSF_FCP_RSP_AVAILABLE: - ZFCP_LOG_DEBUG("FCP Sense data will be presented to the " - "SCSI stack.\n"); + switch (sr_buf->status_type) { + case FSF_STATUS_READ_PORT_CLOSED: + zfcp_fsf_status_read_port_closed(req); break; - - case FSF_ADAPTER_STATUS_AVAILABLE: - zfcp_fsf_fsfstatus_qual_eval(fsf_req); + case FSF_STATUS_READ_INCOMING_ELS: + zfcp_fc_incoming_els(req); + break; + case FSF_STATUS_READ_SENSE_DATA_AVAIL: + break; + case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: + zfcp_fsf_bit_error_threshold(req); + break; + case FSF_STATUS_READ_LINK_DOWN: + zfcp_fsf_status_read_link_down(req); + break; + case FSF_STATUS_READ_LINK_UP: + dev_info(&adapter->ccw_device->dev, + "Local link was replugged.\n"); + /* All ports should be marked as ready to run again */ + zfcp_erp_modify_adapter_status(adapter, 30, NULL, + ZFCP_STATUS_COMMON_RUNNING, + ZFCP_SET); + zfcp_erp_adapter_reopen(adapter, + ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | + ZFCP_STATUS_COMMON_ERP_FAILED, + 102, req); + break; + case FSF_STATUS_READ_NOTIFICATION_LOST: + if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) + zfcp_erp_adapter_access_changed(adapter, 135, req); + if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) + schedule_work(&adapter->scan_work); + break; + case FSF_STATUS_READ_CFDC_UPDATED: + zfcp_erp_adapter_access_changed(adapter, 136, req); + break; + case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: + adapter->adapter_features = sr_buf->payload.word[0]; break; } - skip_fsfstatus: - /* - * always call specific handlers to give them a chance to do - * something meaningful even in error cases - */ - zfcp_fsf_req_dispatch(fsf_req); + mempool_free(sr_buf, adapter->pool.data_status_read); + zfcp_fsf_req_free(req); - return retval; + atomic_inc(&adapter->stat_miss); + schedule_work(&adapter->stat_work); } -/* - * function: zfcp_fsf_fsfstatus_qual_eval - * - * purpose: evaluates FSF status-qualifier of completed FSF request - * and acts accordingly - * - * returns: - */ -static int -zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) { - int retval = 0; - - switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { + switch (req->qtcb->header.fsf_status_qual.word[0]) { case FSF_SQ_FCP_RSP_AVAILABLE: - break; - case FSF_SQ_RETRY_IF_POSSIBLE: - /* The SCSI-stack may now issue retries or escalate */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - case FSF_SQ_COMMAND_ABORTED: - /* Carry the aborted state on to upper layer */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED; - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - case FSF_SQ_NO_RECOM: - ZFCP_LOG_NORMAL("bug: No recommendation could be given for a " - "problem on the adapter %s " - "Stopping all operations on this adapter. ", - zfcp_get_busid_by_adapter(fsf_req->adapter)); - zfcp_erp_adapter_shutdown(fsf_req->adapter, 0, 121, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - case FSF_SQ_ULP_PROGRAMMING_ERROR: - ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer " - "(adapter %s)\n", - zfcp_get_busid_by_adapter(fsf_req->adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: case FSF_SQ_NO_RETRY_POSSIBLE: case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* dealt with in the respective functions */ + return; + case FSF_SQ_COMMAND_ABORTED: + req->status |= ZFCP_STATUS_FSFREQ_ABORTED; break; - default: - ZFCP_LOG_NORMAL("bug: Additional status info could " - "not be interpreted properly.\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, - (char *) &fsf_req->qtcb->header.fsf_status_qual, - sizeof (union fsf_status_qual)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + case FSF_SQ_NO_RECOM: + dev_err(&req->adapter->ccw_device->dev, + "No recommendation could be given for a " + "problem on the adapter.\n"); + zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req); break; } - - return retval; + /* all non-return stats set FSFREQ_ERROR*/ + req->status |= ZFCP_STATUS_FSFREQ_ERROR; } -/** - * zfcp_fsf_link_down_info_eval - evaluate link down information block - */ -static void -zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *fsf_req, u8 id, - struct fsf_link_down_info *link_down) +static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) { - struct zfcp_adapter *adapter = fsf_req->adapter; - - if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, - &adapter->status)) + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) return; - atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); - - if (link_down == NULL) - goto out; - - switch (link_down->error_code) { - case FSF_PSQ_LINK_NO_LIGHT: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(no light detected)\n", - zfcp_get_busid_by_adapter(adapter)); - break; - case FSF_PSQ_LINK_WRAP_PLUG: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(wrap plug detected)\n", - zfcp_get_busid_by_adapter(adapter)); - break; - case FSF_PSQ_LINK_NO_FCP: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(adjacent node on link does not support FCP)\n", - zfcp_get_busid_by_adapter(adapter)); + switch (req->qtcb->header.fsf_status) { + case FSF_UNKNOWN_COMMAND: + dev_err(&req->adapter->ccw_device->dev, + "Command issued by the device driver (0x%x) is " + "not known by the adapter.\n", + req->qtcb->header.fsf_command); + zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_PSQ_LINK_FIRMWARE_UPDATE: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(firmware update in progress)\n", - zfcp_get_busid_by_adapter(adapter)); - break; - case FSF_PSQ_LINK_INVALID_WWPN: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(duplicate or invalid WWPN detected)\n", - zfcp_get_busid_by_adapter(adapter)); + case FSF_ADAPTER_STATUS_AVAILABLE: + zfcp_fsf_fsfstatus_qual_eval(req); break; - case FSF_PSQ_LINK_NO_NPIV_SUPPORT: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(no support for NPIV by Fabric)\n", - zfcp_get_busid_by_adapter(adapter)); + } +} + +static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) +{ + struct zfcp_adapter *adapter = req->adapter; + struct fsf_qtcb *qtcb = req->qtcb; + union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; + + zfcp_hba_dbf_event_fsf_response(req); + + if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */ + return; + } + + switch (qtcb->prefix.prot_status) { + case FSF_PROT_GOOD: + case FSF_PROT_FSF_STATUS_PRESENTED: + return; + case FSF_PROT_QTCB_VERSION_ERROR: + dev_err(&adapter->ccw_device->dev, + "The QTCB version requested by zfcp (0x%x) is not " + "supported by the FCP adapter (lowest supported " + "0x%x, highest supported 0x%x).\n", + FSF_QTCB_CURRENT_VERSION, psq->word[0], + psq->word[1]); + zfcp_erp_adapter_shutdown(adapter, 0, 117, req); break; - case FSF_PSQ_LINK_NO_FCP_RESOURCES: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(out of resource in FCP daughtercard)\n", - zfcp_get_busid_by_adapter(adapter)); + case FSF_PROT_ERROR_STATE: + case FSF_PROT_SEQ_NUMB_ERROR: + zfcp_erp_adapter_reopen(adapter, 0, 98, req); + req->status |= ZFCP_STATUS_FSFREQ_RETRY; break; - case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(out of resource in Fabric)\n", - zfcp_get_busid_by_adapter(adapter)); + case FSF_PROT_UNSUPP_QTCB_TYPE: + dev_err(&adapter->ccw_device->dev, + "Packet header type used by the device driver is " + "incompatible with that used on the adapter.\n"); + zfcp_erp_adapter_shutdown(adapter, 0, 118, req); break; - case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(unable to Fabric login)\n", - zfcp_get_busid_by_adapter(adapter)); + case FSF_PROT_HOST_CONNECTION_INITIALIZING: + atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, + &adapter->status); break; - case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: - ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); + case FSF_PROT_DUPLICATE_REQUEST_ID: + dev_err(&adapter->ccw_device->dev, + "The request identifier 0x%Lx is ambiguous.\n", + (unsigned long long)qtcb->bottom.support.req_handle); + zfcp_erp_adapter_shutdown(adapter, 0, 78, req); break; - case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: - ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); + case FSF_PROT_LINK_DOWN: + zfcp_fsf_link_down_info_eval(req, 37, &psq->link_down_info); + /* FIXME: reopening adapter now? better wait for link up */ + zfcp_erp_adapter_reopen(adapter, 0, 79, req); break; - case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: - ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); + case FSF_PROT_REEST_QUEUE: + /* All ports should be marked as ready to run again */ + zfcp_erp_modify_adapter_status(adapter, 28, NULL, + ZFCP_STATUS_COMMON_RUNNING, + ZFCP_SET); + zfcp_erp_adapter_reopen(adapter, + ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | + ZFCP_STATUS_COMMON_ERP_FAILED, 99, req); break; default: - ZFCP_LOG_NORMAL("The local link to adapter %s is down " - "(warning: unknown reason code %d)\n", - zfcp_get_busid_by_adapter(adapter), - link_down->error_code); - } - - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) - ZFCP_LOG_DEBUG("Debug information to link down: " - "primary_status=0x%02x " - "ioerr_code=0x%02x " - "action_code=0x%02x " - "reason_code=0x%02x " - "explanation_code=0x%02x " - "vendor_specific_code=0x%02x\n", - link_down->primary_status, - link_down->ioerr_code, - link_down->action_code, - link_down->reason_code, - link_down->explanation_code, - link_down->vendor_specific_code); - - out: - zfcp_erp_adapter_failed(adapter, id, fsf_req); + dev_err(&adapter->ccw_device->dev, + "Transfer protocol status information" + "provided by the adapter (0x%x) " + "is not compatible with the device driver.\n", + qtcb->prefix.prot_status); + zfcp_erp_adapter_shutdown(adapter, 0, 119, req); + } + req->status |= ZFCP_STATUS_FSFREQ_ERROR; } -/* - * function: zfcp_fsf_req_dispatch - * - * purpose: calls the appropriate command specific handler +/** + * zfcp_fsf_req_complete - process completion of a FSF request + * @fsf_req: The FSF request that has been completed. * - * returns: + * When a request has been completed either from the FCP adapter, + * or it has been dismissed due to a queue shutdown, this function + * is called to process the completion status and trigger further + * events related to the FSF request. */ -static int -zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req) +void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) { - struct zfcp_erp_action *erp_action = fsf_req->erp_action; - struct zfcp_adapter *adapter = fsf_req->adapter; - int retval = 0; + if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { + zfcp_fsf_status_read_handler(req); + return; + } + del_timer(&req->timer); + zfcp_fsf_protstatus_eval(req); + zfcp_fsf_fsfstatus_eval(req); + req->handler(req); - switch (fsf_req->fsf_command) { + if (req->erp_action) + zfcp_erp_notify(req->erp_action, 0); + req->status |= ZFCP_STATUS_FSFREQ_COMPLETED; - case FSF_QTCB_FCP_CMND: - zfcp_fsf_send_fcp_command_handler(fsf_req); - break; + if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) + zfcp_fsf_req_free(req); + else + /* notify initiator waiting for the requests completion */ + /* + * FIXME: Race! We must not access fsf_req here as it might have been + * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED + * flag. It's an improbable case. But, we have the same paranoia for + * the cleanup flag already. + * Might better be handled using complete()? + * (setting the flag and doing wakeup ought to be atomic + * with regard to checking the flag as long as waitqueue is + * part of the to be released structure) + */ + wake_up(&req->completion_wq); +} - case FSF_QTCB_ABORT_FCP_CMND: - zfcp_fsf_abort_fcp_command_handler(fsf_req); - break; +static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) +{ + struct fsf_qtcb_bottom_config *bottom; + struct zfcp_adapter *adapter = req->adapter; + struct Scsi_Host *shost = adapter->scsi_host; - case FSF_QTCB_SEND_GENERIC: - zfcp_fsf_send_ct_handler(fsf_req); - break; + bottom = &req->qtcb->bottom.config; - case FSF_QTCB_OPEN_PORT_WITH_DID: - zfcp_fsf_open_port_handler(fsf_req); - break; + if (req->data) + memcpy(req->data, bottom, sizeof(*bottom)); - case FSF_QTCB_OPEN_LUN: - zfcp_fsf_open_unit_handler(fsf_req); - break; + fc_host_node_name(shost) = bottom->nport_serv_param.wwnn; + fc_host_port_name(shost) = bottom->nport_serv_param.wwpn; + fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; + fc_host_speed(shost) = bottom->fc_link_speed; + fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; - case FSF_QTCB_CLOSE_LUN: - zfcp_fsf_close_unit_handler(fsf_req); - break; + adapter->hydra_version = bottom->adapter_type; + adapter->timer_ticks = bottom->timer_interval; - case FSF_QTCB_CLOSE_PORT: - zfcp_fsf_close_port_handler(fsf_req); - break; + if (fc_host_permanent_port_name(shost) == -1) + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); - case FSF_QTCB_CLOSE_PHYSICAL_PORT: - zfcp_fsf_close_physical_port_handler(fsf_req); - break; + switch (bottom->fc_topology) { + case FSF_TOPO_P2P: + adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; + adapter->peer_wwpn = bottom->plogi_payload.wwpn; + adapter->peer_wwnn = bottom->plogi_payload.wwnn; + fc_host_port_type(shost) = FC_PORTTYPE_PTP; + if (req->erp_action) + dev_info(&adapter->ccw_device->dev, + "Point-to-Point fibrechannel " + "configuration detected.\n"); + break; + case FSF_TOPO_FABRIC: + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; + if (req->erp_action) + dev_info(&adapter->ccw_device->dev, + "Switched fabric fibrechannel " + "network detected.\n"); + break; + case FSF_TOPO_AL: + fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; + dev_err(&adapter->ccw_device->dev, + "Unsupported arbitrated loop fibrechannel " + "topology detected, shutting down " + "adapter.\n"); + zfcp_erp_adapter_shutdown(adapter, 0, 127, req); + return -EIO; + default: + fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; + dev_err(&adapter->ccw_device->dev, + "The fibrechannel topology reported by the" + " adapter is not known by the zfcp driver," + " shutting down adapter.\n"); + zfcp_erp_adapter_shutdown(adapter, 0, 128, req); + return -EIO; + } - case FSF_QTCB_EXCHANGE_CONFIG_DATA: - zfcp_fsf_exchange_config_data_handler(fsf_req); - break; + return 0; +} - case FSF_QTCB_EXCHANGE_PORT_DATA: - zfcp_fsf_exchange_port_data_handler(fsf_req); - break; +static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) +{ + struct zfcp_adapter *adapter = req->adapter; + struct fsf_qtcb *qtcb = req->qtcb; + struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; + struct Scsi_Host *shost = adapter->scsi_host; - case FSF_QTCB_SEND_ELS: - zfcp_fsf_send_els_handler(fsf_req); - break; + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) + return; - case FSF_QTCB_DOWNLOAD_CONTROL_FILE: - zfcp_fsf_control_file_handler(fsf_req); - break; + adapter->fsf_lic_version = bottom->lic_version; + adapter->adapter_features = bottom->adapter_features; + adapter->connection_features = bottom->connection_features; + adapter->peer_wwpn = 0; + adapter->peer_wwnn = 0; + adapter->peer_d_id = 0; - case FSF_QTCB_UPLOAD_CONTROL_FILE: - zfcp_fsf_control_file_handler(fsf_req); + switch (qtcb->header.fsf_status) { + case FSF_GOOD: + if (zfcp_fsf_exchange_config_evaluate(req)) + return; + + if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { + dev_err(&adapter->ccw_device->dev, + "Maximum QTCB size (%d bytes) allowed by " + "the adapter is lower than the minimum " + "required by the driver (%ld bytes).\n", + bottom->max_qtcb_size, + sizeof(struct fsf_qtcb)); + zfcp_erp_adapter_shutdown(adapter, 0, 129, req); + return; + } + atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, + &adapter->status); break; + case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: + fc_host_node_name(shost) = 0; + fc_host_port_name(shost) = 0; + fc_host_port_id(shost) = 0; + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; + adapter->hydra_version = 0; + atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, + &adapter->status); + + zfcp_fsf_link_down_info_eval(req, 42, + &qtcb->header.fsf_status_qual.link_down_info); + break; default: - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - ZFCP_LOG_NORMAL("bug: Command issued by the device driver is " - "not supported by the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command) - ZFCP_LOG_NORMAL - ("bug: Command issued by the device driver differs " - "from the command returned by the adapter %s " - "(debug info 0x%x, 0x%x).\n", - zfcp_get_busid_by_adapter(adapter), - fsf_req->fsf_command, - fsf_req->qtcb->header.fsf_command); + zfcp_erp_adapter_shutdown(adapter, 0, 130, req); + return; } - if (!erp_action) - return retval; - - zfcp_erp_async_handler(erp_action, 0); + if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { + adapter->hardware_version = bottom->hardware_version; + memcpy(fc_host_serial_number(shost), bottom->serial_number, + min(FC_SERIAL_NUMBER_SIZE, 17)); + EBCASC(fc_host_serial_number(shost), + min(FC_SERIAL_NUMBER_SIZE, 17)); + } - return retval; + if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { + dev_err(&adapter->ccw_device->dev, + "The adapter only supports newer control block " + "versions, try updated device driver.\n"); + zfcp_erp_adapter_shutdown(adapter, 0, 125, req); + return; + } + if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { + dev_err(&adapter->ccw_device->dev, + "The adapter only supports older control block " + "versions, consider a microcode upgrade.\n"); + zfcp_erp_adapter_shutdown(adapter, 0, 126, req); + } } -/* - * function: zfcp_fsf_status_read - * - * purpose: initiates a Status Read command at the specified adapter - * - * returns: - */ -int -zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags) +static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) { - struct zfcp_fsf_req *fsf_req; - struct fsf_status_read_buffer *status_buffer; - unsigned long lock_flags; - volatile struct qdio_buffer_element *sbale; - int retval = 0; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, - req_flags | ZFCP_REQ_NO_QTCB, - adapter->pool.fsf_req_status_read, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create unsolicited status " - "buffer for adapter %s.\n", - zfcp_get_busid_by_adapter(adapter)); - goto failed_req_create; - } - - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; - sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; - fsf_req->sbale_curr = 2; + struct zfcp_adapter *adapter = req->adapter; + struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; + struct Scsi_Host *shost = adapter->scsi_host; - status_buffer = - mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); - if (!status_buffer) { - ZFCP_LOG_NORMAL("bug: could not get some buffer\n"); - goto failed_buf; - } - memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer)); - fsf_req->data = (unsigned long) status_buffer; + if (req->data) + memcpy(req->data, bottom, sizeof(*bottom)); - /* insert pointer to respective buffer */ - sbale = zfcp_qdio_sbale_curr(fsf_req); - sbale->addr = (void *) status_buffer; - sbale->length = sizeof(struct fsf_status_read_buffer); + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) + fc_host_permanent_port_name(shost) = bottom->wwpn; + else + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); + fc_host_maxframe_size(shost) = bottom->maximum_frame_size; + fc_host_supported_speeds(shost) = bottom->supported_speed; +} - retval = zfcp_fsf_req_send(fsf_req); - if (retval) { - ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status " - "environment.\n"); - goto failed_req_send; - } +static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) +{ + struct zfcp_adapter *adapter = req->adapter; + struct fsf_qtcb *qtcb = req->qtcb; - ZFCP_LOG_TRACE("Status Read request initiated (adapter%s)\n", - zfcp_get_busid_by_adapter(adapter)); - goto out; + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) + return; - failed_req_send: - mempool_free(status_buffer, adapter->pool.data_status_read); + switch (qtcb->header.fsf_status) { + case FSF_GOOD: + zfcp_fsf_exchange_port_evaluate(req); + atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); + break; + case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: + zfcp_fsf_exchange_port_evaluate(req); + atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); + zfcp_fsf_link_down_info_eval(req, 43, + &qtcb->header.fsf_status_qual.link_down_info); + break; + } +} - failed_buf: - zfcp_fsf_req_free(fsf_req); - failed_req_create: - zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); - out: - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); - return retval; +static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue) +{ + spin_lock(&queue->lock); + if (atomic_read(&queue->count)) + return 1; + spin_unlock(&queue->lock); + return 0; } -static int -zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req) +static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) { - struct fsf_status_read_buffer *status_buffer; - struct zfcp_adapter *adapter; - struct zfcp_port *port; - unsigned long flags; + long ret; + struct zfcp_qdio_queue *req_q = &adapter->req_q; - status_buffer = (struct fsf_status_read_buffer *) fsf_req->data; - adapter = fsf_req->adapter; + spin_unlock(&req_q->lock); + ret = wait_event_interruptible_timeout(adapter->request_wq, + zfcp_fsf_sbal_check(req_q), 5 * HZ); + if (ret > 0) + return 0; - read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &adapter->port_list_head, list) - if (port->d_id == (status_buffer->d_id & ZFCP_DID_MASK)) - break; - read_unlock_irqrestore(&zfcp_data.config_lock, flags); + spin_lock(&req_q->lock); + return -EIO; +} - if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) { - ZFCP_LOG_NORMAL("bug: Reopen port indication received for " - "nonexisting port with d_id 0x%06x on " - "adapter %s. Ignored.\n", - status_buffer->d_id & ZFCP_DID_MASK, - zfcp_get_busid_by_adapter(adapter)); - goto out; - } +static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool) +{ + struct zfcp_fsf_req *req; + req = mempool_alloc(pool, GFP_ATOMIC); + if (!req) + return NULL; + memset(req, 0, sizeof(*req)); + return req; +} - switch (status_buffer->status_subtype) { +static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool) +{ + struct zfcp_fsf_req_qtcb *qtcb; - case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT: - zfcp_erp_port_reopen(port, 0, 101, fsf_req); - break; + if (likely(pool)) + qtcb = mempool_alloc(pool, GFP_ATOMIC); + else + qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, + GFP_ATOMIC); + if (unlikely(!qtcb)) + return NULL; - case FSF_STATUS_READ_SUB_ERROR_PORT: - zfcp_erp_port_shutdown(port, 0, 122, fsf_req); - break; + memset(qtcb, 0, sizeof(*qtcb)); + qtcb->fsf_req.qtcb = &qtcb->qtcb; + qtcb->fsf_req.pool = pool; - default: - ZFCP_LOG_NORMAL("bug: Undefined status subtype received " - "for a reopen indication on port with " - "d_id 0x%06x on the adapter %s. " - "Ignored. (debug info 0x%x)\n", - status_buffer->d_id, - zfcp_get_busid_by_adapter(adapter), - status_buffer->status_subtype); - } - out: - return 0; + return &qtcb->fsf_req; } -/* - * function: zfcp_fsf_status_read_handler - * - * purpose: is called for finished Open Port command - * - * returns: - */ -static int -zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) +static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, + u32 fsf_cmd, int req_flags, + mempool_t *pool) { - int retval = 0; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fsf_status_read_buffer *status_buffer = - (struct fsf_status_read_buffer *) fsf_req->data; - struct fsf_bit_error_payload *fsf_bit_error; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { - zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer); - mempool_free(status_buffer, adapter->pool.data_status_read); - zfcp_fsf_req_free(fsf_req); - goto out; - } + volatile struct qdio_buffer_element *sbale; - zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer); + struct zfcp_fsf_req *req; + struct zfcp_qdio_queue *req_q = &adapter->req_q; - switch (status_buffer->status_type) { + if (req_flags & ZFCP_REQ_NO_QTCB) + req = zfcp_fsf_alloc_noqtcb(pool); + else + req = zfcp_fsf_alloc_qtcb(pool); - case FSF_STATUS_READ_PORT_CLOSED: - zfcp_fsf_status_read_port_closed(fsf_req); - break; + if (unlikely(!req)) + return ERR_PTR(-EIO); - case FSF_STATUS_READ_INCOMING_ELS: - zfcp_fsf_incoming_els(fsf_req); - break; + if (adapter->req_no == 0) + adapter->req_no++; - case FSF_STATUS_READ_SENSE_DATA_AVAIL: - ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n", - zfcp_get_busid_by_adapter(adapter)); - break; + INIT_LIST_HEAD(&req->list); + init_timer(&req->timer); + init_waitqueue_head(&req->completion_wq); - case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: - fsf_bit_error = (struct fsf_bit_error_payload *) - status_buffer->payload; - ZFCP_LOG_NORMAL("Warning: bit error threshold data " - "received (adapter %s, " - "link failures = %i, loss of sync errors = %i, " - "loss of signal errors = %i, " - "primitive sequence errors = %i, " - "invalid transmission word errors = %i, " - "CRC errors = %i)\n", - zfcp_get_busid_by_adapter(adapter), - fsf_bit_error->link_failure_error_count, - fsf_bit_error->loss_of_sync_error_count, - fsf_bit_error->loss_of_signal_error_count, - fsf_bit_error->primitive_sequence_error_count, - fsf_bit_error->invalid_transmission_word_error_count, - fsf_bit_error->crc_error_count); - ZFCP_LOG_INFO("Additional bit error threshold data " - "(adapter %s, " - "primitive sequence event time-outs = %i, " - "elastic buffer overrun errors = %i, " - "advertised receive buffer-to-buffer credit = %i, " - "current receice buffer-to-buffer credit = %i, " - "advertised transmit buffer-to-buffer credit = %i, " - "current transmit buffer-to-buffer credit = %i)\n", - zfcp_get_busid_by_adapter(adapter), - fsf_bit_error->primitive_sequence_event_timeout_count, - fsf_bit_error->elastic_buffer_overrun_error_count, - fsf_bit_error->advertised_receive_b2b_credit, - fsf_bit_error->current_receive_b2b_credit, - fsf_bit_error->advertised_transmit_b2b_credit, - fsf_bit_error->current_transmit_b2b_credit); - break; + req->adapter = adapter; + req->fsf_command = fsf_cmd; + req->req_id = adapter->req_no++; + req->sbal_number = 1; + req->sbal_first = req_q->first; + req->sbal_last = req_q->first; + req->sbale_curr = 1; - case FSF_STATUS_READ_LINK_DOWN: - switch (status_buffer->status_subtype) { - case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: - ZFCP_LOG_INFO("Physical link to adapter %s is down\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_fsf_link_down_info_eval(fsf_req, 38, - (struct fsf_link_down_info *) - &status_buffer->payload); - break; - case FSF_STATUS_READ_SUB_FDISC_FAILED: - ZFCP_LOG_INFO("Local link to adapter %s is down " - "due to failed FDISC login\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_fsf_link_down_info_eval(fsf_req, 39, - (struct fsf_link_down_info *) - &status_buffer->payload); - break; - case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: - ZFCP_LOG_INFO("Local link to adapter %s is down " - "due to firmware update on adapter\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_fsf_link_down_info_eval(fsf_req, 40, NULL); - break; - default: - ZFCP_LOG_INFO("Local link to adapter %s is down " - "due to unknown reason\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_fsf_link_down_info_eval(fsf_req, 41, NULL); - }; - break; + sbale = zfcp_qdio_sbale_req(req); + sbale[0].addr = (void *) req->req_id; + sbale[0].flags |= SBAL_FLAGS0_COMMAND; - case FSF_STATUS_READ_LINK_UP: - ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. " - "Restarting operations on this adapter\n", - zfcp_get_busid_by_adapter(adapter)); - /* All ports should be marked as ready to run again */ - zfcp_erp_modify_adapter_status(adapter, 30, NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); - zfcp_erp_adapter_reopen(adapter, - ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED - | ZFCP_STATUS_COMMON_ERP_FAILED, - 102, fsf_req); - break; + if (likely(req->qtcb)) { + req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; + req->qtcb->prefix.req_id = req->req_id; + req->qtcb->prefix.ulp_info = 26; + req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; + req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; + req->qtcb->header.req_handle = req->req_id; + req->qtcb->header.fsf_command = req->fsf_command; + req->seq_no = adapter->fsf_req_seq_no; + req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; + sbale[1].addr = (void *) req->qtcb; + sbale[1].length = sizeof(struct fsf_qtcb); + } - case FSF_STATUS_READ_NOTIFICATION_LOST: - ZFCP_LOG_NORMAL("Unsolicited status notification(s) lost: " - "adapter %s%s%s%s%s%s%s%s%s\n", - zfcp_get_busid_by_adapter(adapter), - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_INCOMING_ELS) ? - ", incoming ELS" : "", - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_SENSE_DATA) ? - ", sense data" : "", - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_LINK_STATUS) ? - ", link status change" : "", - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_PORT_CLOSED) ? - ", port close" : "", - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD) ? - ", bit error exception" : "", - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_ACT_UPDATED) ? - ", ACT update" : "", - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_ACT_HARDENED) ? - ", ACT hardening" : "", - (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT) ? - ", adapter feature change" : ""); - - if (status_buffer->status_subtype & - FSF_STATUS_READ_SUB_ACT_UPDATED) - zfcp_erp_adapter_access_changed(adapter, 135, fsf_req); - break; + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) { + zfcp_fsf_req_free(req); + return ERR_PTR(-EIO); + } - case FSF_STATUS_READ_CFDC_UPDATED: - ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_access_changed(adapter, 136, fsf_req); - break; + if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - case FSF_STATUS_READ_CFDC_HARDENED: - switch (status_buffer->status_subtype) { - case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE: - ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n", - zfcp_get_busid_by_adapter(adapter)); - break; - case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2: - ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied " - "to the secondary SE\n", - zfcp_get_busid_by_adapter(adapter)); - break; - default: - ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n", - zfcp_get_busid_by_adapter(adapter)); - } - break; + return req; +} - case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: - ZFCP_LOG_INFO("List of supported features on adapter %s has " - "been changed from 0x%08X to 0x%08X\n", - zfcp_get_busid_by_adapter(adapter), - *(u32*) (status_buffer->payload + 4), - *(u32*) (status_buffer->payload)); - adapter->adapter_features = *(u32*) status_buffer->payload; - break; +static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) +{ + struct zfcp_adapter *adapter = req->adapter; + struct zfcp_qdio_queue *req_q = &adapter->req_q; + int idx; - default: - ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown " - "type was received (debug info 0x%x)\n", - status_buffer->status_type); - ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n", - status_buffer); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) status_buffer, - sizeof (struct fsf_status_read_buffer)); - break; - } - mempool_free(status_buffer, adapter->pool.data_status_read); - zfcp_fsf_req_free(fsf_req); - /* - * recycle buffer and start new request repeat until outbound - * queue is empty or adapter shutdown is requested - */ - /* - * FIXME(qdio): - * we may wait in the req_create for 5s during shutdown, so - * qdio_cleanup will have to wait at least that long before returning - * with failure to allow us a proper cleanup under all circumstances - */ - /* - * FIXME: - * allocation failure possible? (Is this code needed?) - */ - retval = zfcp_fsf_status_read(adapter, 0); - if (retval < 0) { - ZFCP_LOG_INFO("Failed to create unsolicited status read " - "request for the adapter %s.\n", - zfcp_get_busid_by_adapter(adapter)); - /* temporary fix to avoid status read buffer shortage */ - adapter->status_read_failed++; - if ((ZFCP_STATUS_READS_RECOM - adapter->status_read_failed) - < ZFCP_STATUS_READ_FAILED_THRESHOLD) { - ZFCP_LOG_INFO("restart adapter %s due to status read " - "buffer shortage\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_reopen(adapter, 0, 103, fsf_req); - } + /* put allocated FSF request into hash table */ + spin_lock(&adapter->req_list_lock); + idx = zfcp_reqlist_hash(req->req_id); + list_add_tail(&req->list, &adapter->req_list[idx]); + spin_unlock(&adapter->req_list_lock); + + req->issued = get_clock(); + if (zfcp_qdio_send(req)) { + /* Queues are down..... */ + del_timer(&req->timer); + spin_lock(&adapter->req_list_lock); + zfcp_reqlist_remove(adapter, req); + spin_unlock(&adapter->req_list_lock); + /* undo changes in request queue made for this request */ + atomic_add(req->sbal_number, &req_q->count); + req_q->first -= req->sbal_number; + req_q->first += QDIO_MAX_BUFFERS_PER_Q; + req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ + zfcp_erp_adapter_reopen(adapter, 0, 116, req); + return -EIO; } - out: - return retval; + + /* Don't increase for unsolicited status */ + if (req->qtcb) + adapter->fsf_req_seq_no++; + + return 0; } -/* - * function: zfcp_fsf_abort_fcp_command - * - * purpose: tells FSF to abort a running SCSI command - * - * returns: address of initiated FSF request - * NULL - request could not be initiated - * - * FIXME(design): should be watched by a timeout !!! - * FIXME(design) shouldn't this be modified to return an int - * also...don't know how though +/** + * zfcp_fsf_status_read - send status read request + * @adapter: pointer to struct zfcp_adapter + * @req_flags: request flags + * Returns: 0 on success, ERROR otherwise */ -struct zfcp_fsf_req * -zfcp_fsf_abort_fcp_command(unsigned long old_req_id, - struct zfcp_adapter *adapter, - struct zfcp_unit *unit, int req_flags) +int zfcp_fsf_status_read(struct zfcp_adapter *adapter) { + struct zfcp_fsf_req *req; + struct fsf_status_read_buffer *sr_buf; volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req = NULL; - unsigned long lock_flags; - int retval = 0; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, - req_flags, adapter->pool.fsf_req_abort, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Failed to create an abort command " - "request for lun 0x%016Lx on port 0x%016Lx " - "on adapter %s.\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_adapter(adapter)); - goto out; - } - - if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &unit->status))) - goto unit_blocked; + int retval = -EIO; - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; - fsf_req->data = (unsigned long) unit; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, + ZFCP_REQ_NO_QTCB, + adapter->pool.fsf_req_status_read); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); + goto out; + } - /* set handles of unit and its parent port in QTCB */ - fsf_req->qtcb->header.lun_handle = unit->handle; - fsf_req->qtcb->header.port_handle = unit->port->handle; + sbale = zfcp_qdio_sbale_req(req); + sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; + sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; + req->sbale_curr = 2; - /* set handle of request which should be aborted */ - fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id; + sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); + if (!sr_buf) { + retval = -ENOMEM; + goto failed_buf; + } + memset(sr_buf, 0, sizeof(*sr_buf)); + req->data = sr_buf; + sbale = zfcp_qdio_sbale_curr(req); + sbale->addr = (void *) sr_buf; + sbale->length = sizeof(*sr_buf); - zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT); - retval = zfcp_fsf_req_send(fsf_req); - if (!retval) - goto out; + retval = zfcp_fsf_req_send(req); + if (retval) + goto failed_req_send; - unit_blocked: - zfcp_fsf_req_free(fsf_req); - fsf_req = NULL; + goto out; - out: - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); - return fsf_req; +failed_req_send: + mempool_free(sr_buf, adapter->pool.data_status_read); +failed_buf: + zfcp_fsf_req_free(req); + zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); +out: + spin_unlock(&adapter->req_q.lock); + return retval; } -/* - * function: zfcp_fsf_abort_fcp_command_handler - * - * purpose: is called for finished Abort FCP Command request - * - * returns: - */ -static int -zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) +static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) { - int retval = -EINVAL; - struct zfcp_unit *unit; - union fsf_status_qual *fsf_stat_qual = - &new_fsf_req->qtcb->header.fsf_status_qual; - - if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */ - goto skip_fsfstatus; - } - - unit = (struct zfcp_unit *) new_fsf_req->data; + struct zfcp_unit *unit = req->data; + union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; - /* evaluate FSF status in QTCB */ - switch (new_fsf_req->qtcb->header.fsf_status) { + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) + return; + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { - /* - * In this case a command that was sent prior to a port - * reopen was aborted (handles are different). This is - * fine. - */ - } else { - ZFCP_LOG_INFO("Temporary port identifier 0x%x for " - "port 0x%016Lx on adapter %s invalid. " - "This may happen occasionally.\n", - unit->port->handle, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - ZFCP_LOG_INFO("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, - (char *) &new_fsf_req->qtcb->header. - fsf_status_qual, - sizeof (union fsf_status_qual)); - /* Let's hope this sorts out the mess */ + if (fsq->word[0] == fsq->word[1]) { zfcp_erp_adapter_reopen(unit->port->adapter, 0, 104, - new_fsf_req); - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; } break; - case FSF_LUN_HANDLE_NOT_VALID: - if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) { - /* - * In this case a command that was sent prior to a unit - * reopen was aborted (handles are different). - * This is fine. - */ - } else { - ZFCP_LOG_INFO - ("Warning: Temporary LUN identifier 0x%x of LUN " - "0x%016Lx on port 0x%016Lx on adapter %s is " - "invalid. This may happen in rare cases. " - "Trying to re-establish link.\n", - unit->handle, - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - ZFCP_LOG_DEBUG("Status qualifier data:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &new_fsf_req->qtcb->header. - fsf_status_qual, - sizeof (union fsf_status_qual)); - /* Let's hope this sorts out the mess */ - zfcp_erp_port_reopen(unit->port, 0, 105, new_fsf_req); - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + if (fsq->word[0] == fsq->word[1]) { + zfcp_erp_port_reopen(unit->port, 0, 105, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; } break; - case FSF_FCP_COMMAND_DOES_NOT_EXIST: - retval = 0; - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; + req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; break; - case FSF_PORT_BOXED: - ZFCP_LOG_INFO("Remote port 0x%016Lx on adapter %s needs to " - "be reopened\n", unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - zfcp_erp_port_boxed(unit->port, 47, new_fsf_req); - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR - | ZFCP_STATUS_FSFREQ_RETRY; + zfcp_erp_port_boxed(unit->port, 47, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; break; - case FSF_LUN_BOXED: - ZFCP_LOG_INFO( - "unit 0x%016Lx on port 0x%016Lx on adapter %s needs " - "to be reopened\n", - unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - zfcp_erp_unit_boxed(unit, 48, new_fsf_req); - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR - | ZFCP_STATUS_FSFREQ_RETRY; + zfcp_erp_unit_boxed(unit, 48, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; break; - case FSF_ADAPTER_STATUS_AVAILABLE: - switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]) { + switch (fsq->word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: zfcp_test_link(unit->port); - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* SCSI stack will escalate */ - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - default: - ZFCP_LOG_NORMAL - ("bug: Wrong status qualifier 0x%x arrived.\n", - new_fsf_req->qtcb->header.fsf_status_qual.word[0]); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } break; - case FSF_GOOD: - retval = 0; - new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; - break; - - default: - ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " - "(debug info 0x%x)\n", - new_fsf_req->qtcb->header.fsf_status); + req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; break; } - skip_fsfstatus: - return retval; } /** - * zfcp_use_one_sbal - checks whether req buffer and resp bother each fit into - * one SBALE - * Two scatter-gather lists are passed, one for the reqeust and one for the - * response. + * zfcp_fsf_abort_fcp_command - abort running SCSI command + * @old_req_id: unsigned long + * @adapter: pointer to struct zfcp_adapter + * @unit: pointer to struct zfcp_unit + * @req_flags: integer specifying the request flags + * Returns: pointer to struct zfcp_fsf_req + * + * FIXME(design): should be watched by a timeout !!! */ -static inline int -zfcp_use_one_sbal(struct scatterlist *req, int req_count, - struct scatterlist *resp, int resp_count) -{ - return ((req_count == 1) && - (resp_count == 1) && - (((unsigned long) zfcp_sg_to_address(&req[0]) & - PAGE_MASK) == - ((unsigned long) (zfcp_sg_to_address(&req[0]) + - req[0].length - 1) & PAGE_MASK)) && - (((unsigned long) zfcp_sg_to_address(&resp[0]) & - PAGE_MASK) == - ((unsigned long) (zfcp_sg_to_address(&resp[0]) + - resp[0].length - 1) & PAGE_MASK))); -} -/** - * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) - * @ct: pointer to struct zfcp_send_ct which conatins all needed data for - * the request - * @pool: pointer to memory pool, if non-null this pool is used to allocate - * a struct zfcp_fsf_req - * @erp_action: pointer to erp_action, if non-null the Generic Service request - * is sent within error recovery - */ -int -zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, - struct zfcp_erp_action *erp_action) +struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, + struct zfcp_adapter *adapter, + struct zfcp_unit *unit, + int req_flags) { volatile struct qdio_buffer_element *sbale; - struct zfcp_port *port; - struct zfcp_adapter *adapter; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int bytes; - int ret = 0; - - port = ct->port; - adapter = port->adapter; - - ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, - ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, - pool, &lock_flags, &fsf_req); - if (ret < 0) { - ZFCP_LOG_INFO("error: Could not create CT request (FC-GS) for " - "adapter: %s\n", - zfcp_get_busid_by_adapter(adapter)); - goto failed_req; - } + struct zfcp_fsf_req *req = NULL; - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - if (zfcp_use_one_sbal(ct->req, ct->req_count, - ct->resp, ct->resp_count)){ - /* both request buffer and response buffer - fit into one sbale each */ - sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; - sbale[2].addr = zfcp_sg_to_address(&ct->req[0]); - sbale[2].length = ct->req[0].length; - sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]); - sbale[3].length = ct->resp[0].length; - sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; - } else if (adapter->adapter_features & - FSF_FEATURE_ELS_CT_CHAINED_SBALS) { - /* try to use chained SBALs */ - bytes = zfcp_qdio_sbals_from_sg(fsf_req, - SBAL_FLAGS0_TYPE_WRITE_READ, - ct->req, ct->req_count, - ZFCP_MAX_SBALS_PER_CT_REQ); - if (bytes <= 0) { - ZFCP_LOG_INFO("error: creation of CT request failed " - "on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - if (bytes == 0) - ret = -ENOMEM; - else - ret = bytes; - - goto failed_send; - } - fsf_req->qtcb->bottom.support.req_buf_length = bytes; - fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; - bytes = zfcp_qdio_sbals_from_sg(fsf_req, - SBAL_FLAGS0_TYPE_WRITE_READ, - ct->resp, ct->resp_count, - ZFCP_MAX_SBALS_PER_CT_REQ); - if (bytes <= 0) { - ZFCP_LOG_INFO("error: creation of CT request failed " - "on adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - if (bytes == 0) - ret = -ENOMEM; - else - ret = bytes; - - goto failed_send; - } - fsf_req->qtcb->bottom.support.resp_buf_length = bytes; - } else { - /* reject send generic request */ - ZFCP_LOG_INFO( - "error: microcode does not support chained SBALs," - "CT request too big (adapter %s)\n", - zfcp_get_busid_by_adapter(adapter)); - ret = -EOPNOTSUPP; - goto failed_send; - } - - /* settings in QTCB */ - fsf_req->qtcb->header.port_handle = port->handle; - fsf_req->qtcb->bottom.support.service_class = - ZFCP_FC_SERVICE_CLASS_DEFAULT; - fsf_req->qtcb->bottom.support.timeout = ct->timeout; - fsf_req->data = (unsigned long) ct; - - zfcp_san_dbf_event_ct_request(fsf_req); + spin_lock(&adapter->req_q.lock); + if (!atomic_read(&adapter->req_q.count)) + goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, + req_flags, adapter->pool.fsf_req_abort); + if (unlikely(IS_ERR(req))) + goto out; - if (erp_action) { - erp_action->fsf_req = fsf_req; - fsf_req->erp_action = erp_action; - zfcp_erp_start_timer(fsf_req); - } else - zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); + if (unlikely(!(atomic_read(&unit->status) & + ZFCP_STATUS_COMMON_UNBLOCKED))) + goto out_error_free; - ret = zfcp_fsf_req_send(fsf_req); - if (ret) { - ZFCP_LOG_DEBUG("error: initiation of CT request failed " - "(adapter %s, port 0x%016Lx)\n", - zfcp_get_busid_by_adapter(adapter), port->wwpn); - goto failed_send; - } + sbale = zfcp_qdio_sbale_req(req); + sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; + sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - ZFCP_LOG_DEBUG("CT request initiated (adapter %s, port 0x%016Lx)\n", - zfcp_get_busid_by_adapter(adapter), port->wwpn); - goto out; + req->data = unit; + req->handler = zfcp_fsf_abort_fcp_command_handler; + req->qtcb->header.lun_handle = unit->handle; + req->qtcb->header.port_handle = unit->port->handle; + req->qtcb->bottom.support.req_handle = (u64) old_req_id; - failed_send: - zfcp_fsf_req_free(fsf_req); - if (erp_action != NULL) { - erp_action->fsf_req = NULL; - } - failed_req: - out: - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); - return ret; + zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); + if (!zfcp_fsf_req_send(req)) + goto out; + +out_error_free: + zfcp_fsf_req_free(req); + req = NULL; +out: + spin_unlock(&adapter->req_q.lock); + return req; } -/** - * zfcp_fsf_send_ct_handler - handler for Generic Service requests - * @fsf_req: pointer to struct zfcp_fsf_req - * - * Data specific for the Generic Service request is passed using - * fsf_req->data. There we find the pointer to struct zfcp_send_ct. - * Usually a specific handler for the CT request is called which is - * found in this structure. - */ -static int -zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) { - struct zfcp_port *port; - struct zfcp_adapter *adapter; - struct zfcp_send_ct *send_ct; - struct fsf_qtcb_header *header; - struct fsf_qtcb_bottom_support *bottom; - int retval = -EINVAL; - u16 subtable, rule, counter; + struct zfcp_adapter *adapter = req->adapter; + struct zfcp_send_ct *send_ct = req->data; + struct zfcp_port *port = send_ct->port; + struct fsf_qtcb_header *header = &req->qtcb->header; - adapter = fsf_req->adapter; - send_ct = (struct zfcp_send_ct *) fsf_req->data; - port = send_ct->port; - header = &fsf_req->qtcb->header; - bottom = &fsf_req->qtcb->bottom.support; + send_ct->status = -EINVAL; - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; - /* evaluate FSF status in QTCB */ switch (header->fsf_status) { - case FSF_GOOD: - zfcp_san_dbf_event_ct_response(fsf_req); - retval = 0; + zfcp_san_dbf_event_ct_response(req); + send_ct->status = 0; break; - case FSF_SERVICE_CLASS_NOT_SUPPORTED: - ZFCP_LOG_INFO("error: adapter %s does not support fc " - "class %d.\n", - zfcp_get_busid_by_port(port), - ZFCP_FC_SERVICE_CLASS_DEFAULT); - /* stop operation for this adapter */ - zfcp_erp_adapter_shutdown(adapter, 0, 123, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_fsf_class_not_supp(req); break; - case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]){ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - /* reopening link to port */ zfcp_test_link(port); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* ERP strategy will escalate */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - default: - ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x " - "arrived.\n", - header->fsf_status_qual.word[0]); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } break; - case FSF_ACCESS_DENIED: - ZFCP_LOG_NORMAL("access denied, cannot send generic service " - "command (adapter %s, port d_id=0x%06x)\n", - zfcp_get_busid_by_port(port), port->d_id); - for (counter = 0; counter < 2; counter++) { - subtable = header->fsf_status_qual.halfword[counter * 2]; - rule = header->fsf_status_qual.halfword[counter * 2 + 1]; - switch (subtable) { - case FSF_SQ_CFDC_SUBTABLE_OS: - case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN: - case FSF_SQ_CFDC_SUBTABLE_PORT_DID: - case FSF_SQ_CFDC_SUBTABLE_LUN: - ZFCP_LOG_INFO("Access denied (%s rule %d)\n", - zfcp_act_subtable_type[subtable], rule); - break; - } - } - zfcp_erp_port_access_denied(port, 55, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_GENERIC_COMMAND_REJECTED: - ZFCP_LOG_INFO("generic service command rejected " - "(adapter %s, port d_id=0x%06x)\n", - zfcp_get_busid_by_port(port), port->d_id); - ZFCP_LOG_INFO("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_PORT_HANDLE_NOT_VALID: - ZFCP_LOG_DEBUG("Temporary port identifier 0x%x for port " - "0x%016Lx on adapter %s invalid. This may " - "happen occasionally.\n", port->handle, - port->wwpn, zfcp_get_busid_by_port(port)); - ZFCP_LOG_INFO("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_adapter_reopen(adapter, 0, 106, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_fsf_access_denied_port(req, port); break; - case FSF_PORT_BOXED: - ZFCP_LOG_INFO("port needs to be reopened " - "(adapter %s, port d_id=0x%06x)\n", - zfcp_get_busid_by_port(port), port->d_id); - zfcp_erp_port_boxed(port, 49, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR - | ZFCP_STATUS_FSFREQ_RETRY; + zfcp_erp_port_boxed(port, 49, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; break; - - /* following states should never occure, all cases avoided - in zfcp_fsf_send_ct - but who knows ... */ + case FSF_PORT_HANDLE_NOT_VALID: + zfcp_erp_adapter_reopen(adapter, 0, 106, req); + case FSF_GENERIC_COMMAND_REJECTED: case FSF_PAYLOAD_SIZE_MISMATCH: - ZFCP_LOG_INFO("payload size mismatch (adapter: %s, " - "req_buf_length=%d, resp_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->req_buf_length, bottom->resp_buf_length); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; case FSF_REQUEST_SIZE_TOO_LARGE: - ZFCP_LOG_INFO("request size too large (adapter: %s, " - "req_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->req_buf_length); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; case FSF_RESPONSE_SIZE_TOO_LARGE: - ZFCP_LOG_INFO("response size too large (adapter: %s, " - "resp_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->resp_buf_length); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; case FSF_SBAL_MISMATCH: - ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, " - "resp_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->req_buf_length, bottom->resp_buf_length); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - default: - ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " - "(debug info 0x%x)\n", header->fsf_status); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } skip_fsfstatus: - send_ct->status = retval; - - if (send_ct->handler != NULL) + if (send_ct->handler) send_ct->handler(send_ct->handler_data); +} - return retval; +static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req, + struct scatterlist *sg_req, + struct scatterlist *sg_resp, int max_sbals) +{ + int bytes; + + bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, + sg_req, max_sbals); + if (bytes <= 0) + return -ENOMEM; + req->qtcb->bottom.support.req_buf_length = bytes; + req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; + + bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, + sg_resp, max_sbals); + if (bytes <= 0) + return -ENOMEM; + req->qtcb->bottom.support.resp_buf_length = bytes; + + return 0; } /** - * zfcp_fsf_send_els - initiate an ELS command (FC-FS) - * @els: pointer to struct zfcp_send_els which contains all needed data for - * the command. + * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) + * @ct: pointer to struct zfcp_send_ct with data for request + * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req + * @erp_action: if non-null the Generic Service request sent within ERP */ -int -zfcp_fsf_send_els(struct zfcp_send_els *els) +int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, + struct zfcp_erp_action *erp_action) { - volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - u32 d_id; - struct zfcp_adapter *adapter; - unsigned long lock_flags; - int bytes; - int ret = 0; - - d_id = els->d_id; - adapter = els->adapter; + struct zfcp_port *port = ct->port; + struct zfcp_adapter *adapter = port->adapter; + struct zfcp_fsf_req *req; + int ret = -EIO; - ret = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, - ZFCP_REQ_AUTO_CLEANUP, - NULL, &lock_flags, &fsf_req); - if (ret < 0) { - ZFCP_LOG_INFO("error: creation of ELS request failed " - "(adapter %s, port d_id: 0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); - goto failed_req; - } + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; - if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &els->port->status))) { - ret = -EBUSY; - goto port_blocked; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, + ZFCP_REQ_AUTO_CLEANUP, pool); + if (unlikely(IS_ERR(req))) { + ret = PTR_ERR(req); + goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - if (zfcp_use_one_sbal(els->req, els->req_count, - els->resp, els->resp_count)){ - /* both request buffer and response buffer - fit into one sbale each */ - sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; - sbale[2].addr = zfcp_sg_to_address(&els->req[0]); - sbale[2].length = els->req[0].length; - sbale[3].addr = zfcp_sg_to_address(&els->resp[0]); - sbale[3].length = els->resp[0].length; - sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; - } else if (adapter->adapter_features & - FSF_FEATURE_ELS_CT_CHAINED_SBALS) { - /* try to use chained SBALs */ - bytes = zfcp_qdio_sbals_from_sg(fsf_req, - SBAL_FLAGS0_TYPE_WRITE_READ, - els->req, els->req_count, - ZFCP_MAX_SBALS_PER_ELS_REQ); - if (bytes <= 0) { - ZFCP_LOG_INFO("error: creation of ELS request failed " - "(adapter %s, port d_id: 0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); - if (bytes == 0) { - ret = -ENOMEM; - } else { - ret = bytes; - } - goto failed_send; - } - fsf_req->qtcb->bottom.support.req_buf_length = bytes; - fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; - bytes = zfcp_qdio_sbals_from_sg(fsf_req, - SBAL_FLAGS0_TYPE_WRITE_READ, - els->resp, els->resp_count, - ZFCP_MAX_SBALS_PER_ELS_REQ); - if (bytes <= 0) { - ZFCP_LOG_INFO("error: creation of ELS request failed " - "(adapter %s, port d_id: 0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); - if (bytes == 0) { - ret = -ENOMEM; - } else { - ret = bytes; - } - goto failed_send; - } - fsf_req->qtcb->bottom.support.resp_buf_length = bytes; - } else { - /* reject request */ - ZFCP_LOG_INFO("error: microcode does not support chained SBALs" - ", ELS request too big (adapter %s, " - "port d_id: 0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); - ret = -EOPNOTSUPP; - goto failed_send; - } - - /* settings in QTCB */ - fsf_req->qtcb->bottom.support.d_id = d_id; - fsf_req->qtcb->bottom.support.service_class = - ZFCP_FC_SERVICE_CLASS_DEFAULT; - fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT; - fsf_req->data = (unsigned long) els; - - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - - zfcp_san_dbf_event_els_request(fsf_req); - - zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); - ret = zfcp_fsf_req_send(fsf_req); - if (ret) { - ZFCP_LOG_DEBUG("error: initiation of ELS request failed " - "(adapter %s, port d_id: 0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); + ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp, + FSF_MAX_SBALS_PER_REQ); + if (ret) goto failed_send; - } - ZFCP_LOG_DEBUG("ELS request initiated (adapter %s, port d_id: " - "0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id); - goto out; + req->handler = zfcp_fsf_send_ct_handler; + req->qtcb->header.port_handle = port->handle; + req->qtcb->bottom.support.service_class = FSF_CLASS_3; + req->qtcb->bottom.support.timeout = ct->timeout; + req->data = ct; - port_blocked: - failed_send: - zfcp_fsf_req_free(fsf_req); + zfcp_san_dbf_event_ct_request(req); - failed_req: - out: - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); + if (erp_action) { + erp_action->fsf_req = req; + req->erp_action = erp_action; + zfcp_fsf_start_erp_timer(req); + } else + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + + ret = zfcp_fsf_req_send(req); + if (ret) + goto failed_send; + + goto out; - return ret; +failed_send: + zfcp_fsf_req_free(req); + if (erp_action) + erp_action->fsf_req = NULL; +out: + spin_unlock(&adapter->req_q.lock); + return ret; } -/** - * zfcp_fsf_send_els_handler - handler for ELS commands - * @fsf_req: pointer to struct zfcp_fsf_req - * - * Data specific for the ELS command is passed using - * fsf_req->data. There we find the pointer to struct zfcp_send_els. - * Usually a specific handler for the ELS command is called which is - * found in this structure. - */ -static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) { - struct zfcp_adapter *adapter; - struct zfcp_port *port; - u32 d_id; - struct fsf_qtcb_header *header; - struct fsf_qtcb_bottom_support *bottom; - struct zfcp_send_els *send_els; - int retval = -EINVAL; - u16 subtable, rule, counter; - - send_els = (struct zfcp_send_els *) fsf_req->data; - adapter = send_els->adapter; - port = send_els->port; - d_id = send_els->d_id; - header = &fsf_req->qtcb->header; - bottom = &fsf_req->qtcb->bottom.support; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) + struct zfcp_send_els *send_els = req->data; + struct zfcp_port *port = send_els->port; + struct fsf_qtcb_header *header = &req->qtcb->header; + + send_els->status = -EINVAL; + + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; switch (header->fsf_status) { - case FSF_GOOD: - zfcp_san_dbf_event_els_response(fsf_req); - retval = 0; + zfcp_san_dbf_event_els_response(req); + send_els->status = 0; break; - case FSF_SERVICE_CLASS_NOT_SUPPORTED: - ZFCP_LOG_INFO("error: adapter %s does not support fc " - "class %d.\n", - zfcp_get_busid_by_adapter(adapter), - ZFCP_FC_SERVICE_CLASS_DEFAULT); - /* stop operation for this adapter */ - zfcp_erp_adapter_shutdown(adapter, 0, 124, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_fsf_class_not_supp(req); break; - case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]){ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: if (port && (send_els->ls_code != ZFCP_LS_ADISC)) zfcp_test_link(port); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; + /*fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = - zfcp_handle_els_rjt(header->fsf_status_qual.word[1], - (struct zfcp_ls_rjt_par *) - &header->fsf_status_qual.word[2]); - break; case FSF_SQ_RETRY_IF_POSSIBLE: - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - default: - ZFCP_LOG_INFO("bug: Wrong status qualifier 0x%x\n", - header->fsf_status_qual.word[0]); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO, - (char*)header->fsf_status_qual.word, 16); } break; - case FSF_ELS_COMMAND_REJECTED: - ZFCP_LOG_INFO("ELS has been rejected because command filter " - "prohibited sending " - "(adapter: %s, port d_id: 0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); - - break; - case FSF_PAYLOAD_SIZE_MISMATCH: - ZFCP_LOG_INFO( - "ELS request size and ELS response size must be either " - "both 0, or both greater than 0 " - "(adapter: %s, req_buf_length=%d resp_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->req_buf_length, - bottom->resp_buf_length); - break; - case FSF_REQUEST_SIZE_TOO_LARGE: - ZFCP_LOG_INFO( - "Length of the ELS request buffer, " - "specified in QTCB bottom, " - "exceeds the size of the buffers " - "that have been allocated for ELS request data " - "(adapter: %s, req_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->req_buf_length); - break; - case FSF_RESPONSE_SIZE_TOO_LARGE: - ZFCP_LOG_INFO( - "Length of the ELS response buffer, " - "specified in QTCB bottom, " - "exceeds the size of the buffers " - "that have been allocated for ELS response data " - "(adapter: %s, resp_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->resp_buf_length); - break; - - case FSF_SBAL_MISMATCH: - /* should never occure, avoided in zfcp_fsf_send_els */ - ZFCP_LOG_INFO("SBAL mismatch (adapter: %s, req_buf_length=%d, " - "resp_buf_length=%d)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->req_buf_length, bottom->resp_buf_length); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_ACCESS_DENIED: - ZFCP_LOG_NORMAL("access denied, cannot send ELS command " - "(adapter %s, port d_id=0x%06x)\n", - zfcp_get_busid_by_adapter(adapter), d_id); - for (counter = 0; counter < 2; counter++) { - subtable = header->fsf_status_qual.halfword[counter * 2]; - rule = header->fsf_status_qual.halfword[counter * 2 + 1]; - switch (subtable) { - case FSF_SQ_CFDC_SUBTABLE_OS: - case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN: - case FSF_SQ_CFDC_SUBTABLE_PORT_DID: - case FSF_SQ_CFDC_SUBTABLE_LUN: - ZFCP_LOG_INFO("Access denied (%s rule %d)\n", - zfcp_act_subtable_type[subtable], rule); - break; - } - } - if (port != NULL) - zfcp_erp_port_access_denied(port, 56, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_fsf_access_denied_port(req, port); break; - + case FSF_SBAL_MISMATCH: + /* should never occure, avoided in zfcp_fsf_send_els */ + /* fall through */ default: - ZFCP_LOG_NORMAL( - "bug: An unknown FSF Status was presented " - "(adapter: %s, fsf_status=0x%08x)\n", - zfcp_get_busid_by_adapter(adapter), - header->fsf_status); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } - skip_fsfstatus: - send_els->status = retval; - if (send_els->handler) send_els->handler(send_els->handler_data); +} - return retval; +/** + * zfcp_fsf_send_els - initiate an ELS command (FC-FS) + * @els: pointer to struct zfcp_send_els with data for the command + */ +int zfcp_fsf_send_els(struct zfcp_send_els *els) +{ + struct zfcp_fsf_req *req; + struct zfcp_adapter *adapter = els->adapter; + struct fsf_qtcb_bottom_support *bottom; + int ret = -EIO; + + if (unlikely(!(atomic_read(&els->port->status) & + ZFCP_STATUS_COMMON_UNBLOCKED))) + return -EBUSY; + + spin_lock(&adapter->req_q.lock); + if (!atomic_read(&adapter->req_q.count)) + goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, + ZFCP_REQ_AUTO_CLEANUP, NULL); + if (unlikely(IS_ERR(req))) { + ret = PTR_ERR(req); + goto out; + } + + ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, + FSF_MAX_SBALS_PER_ELS_REQ); + if (ret) + goto failed_send; + + bottom = &req->qtcb->bottom.support; + req->handler = zfcp_fsf_send_els_handler; + bottom->d_id = els->d_id; + bottom->service_class = FSF_CLASS_3; + bottom->timeout = 2 * R_A_TOV; + req->data = els; + + zfcp_san_dbf_event_els_request(req); + + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + ret = zfcp_fsf_req_send(req); + if (ret) + goto failed_send; + + goto out; + +failed_send: + zfcp_fsf_req_free(req); +out: + spin_unlock(&adapter->req_q.lock); + return ret; } -int -zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) +int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; + struct zfcp_fsf_req *req; struct zfcp_adapter *adapter = erp_action->adapter; - unsigned long lock_flags; - int retval; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, - FSF_QTCB_EXCHANGE_CONFIG_DATA, - ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp, - &lock_flags, &fsf_req); - if (retval) { - ZFCP_LOG_INFO("error: Could not create exchange configuration " - "data request for adapter %s.\n", - zfcp_get_busid_by_adapter(adapter)); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); - return retval; + int retval = -EIO; + + spin_lock(&adapter->req_q.lock); + if (!atomic_read(&adapter->req_q.count)) + goto out; + req = zfcp_fsf_req_create(adapter, + FSF_QTCB_EXCHANGE_CONFIG_DATA, + ZFCP_REQ_AUTO_CLEANUP, + adapter->pool.fsf_req_erp); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); + goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - fsf_req->qtcb->bottom.config.feature_selection = + req->qtcb->bottom.config.feature_selection = FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING | FSF_FEATURE_NOTIFICATION_LOST | FSF_FEATURE_UPDATE_ALERT; - fsf_req->erp_action = erp_action; - erp_action->fsf_req = fsf_req; + req->erp_action = erp_action; + req->handler = zfcp_fsf_exchange_config_data_handler; + erp_action->fsf_req = req; - zfcp_erp_start_timer(fsf_req); - retval = zfcp_fsf_req_send(fsf_req); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); + zfcp_fsf_start_erp_timer(req); + retval = zfcp_fsf_req_send(req); if (retval) { - ZFCP_LOG_INFO("error: Could not send exchange configuration " - "data command on the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_fsf_req_free(fsf_req); + zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; } - else - ZFCP_LOG_DEBUG("exchange configuration data request initiated " - "(adapter %s)\n", - zfcp_get_busid_by_adapter(adapter)); - +out: + spin_unlock(&adapter->req_q.lock); return retval; } -int -zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, - struct fsf_qtcb_bottom_config *data) +int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, + struct fsf_qtcb_bottom_config *data) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int retval; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, - ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags, - &fsf_req); - if (retval) { - ZFCP_LOG_INFO("error: Could not create exchange configuration " - "data request for adapter %s.\n", - zfcp_get_busid_by_adapter(adapter)); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); - return retval; + struct zfcp_fsf_req *req = NULL; + int retval = -EIO; + + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, + 0, NULL); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); + goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + req->handler = zfcp_fsf_exchange_config_data_handler; - fsf_req->qtcb->bottom.config.feature_selection = + req->qtcb->bottom.config.feature_selection = FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING | FSF_FEATURE_NOTIFICATION_LOST | FSF_FEATURE_UPDATE_ALERT; if (data) - fsf_req->data = (unsigned long) data; + req->data = data; - zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); - retval = zfcp_fsf_req_send(fsf_req); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); - if (retval) - ZFCP_LOG_INFO("error: Could not send exchange configuration " - "data command on the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - else - wait_event(fsf_req->completion_wq, - fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + retval = zfcp_fsf_req_send(req); +out: + spin_unlock(&adapter->req_q.lock); + if (!retval) + wait_event(req->completion_wq, + req->status & ZFCP_STATUS_FSFREQ_COMPLETED); - zfcp_fsf_req_free(fsf_req); + zfcp_fsf_req_free(req); return retval; } /** - * zfcp_fsf_exchange_config_evaluate - * @fsf_req: fsf_req which belongs to xchg config data request - * @xchg_ok: specifies if xchg config data was incomplete or complete (0/1) - * - * returns: -EIO on error, 0 otherwise - */ -static int -zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) -{ - struct fsf_qtcb_bottom_config *bottom; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct Scsi_Host *shost = adapter->scsi_host; - - bottom = &fsf_req->qtcb->bottom.config; - ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n", - bottom->low_qtcb_version, bottom->high_qtcb_version); - adapter->fsf_lic_version = bottom->lic_version; - adapter->adapter_features = bottom->adapter_features; - adapter->connection_features = bottom->connection_features; - adapter->peer_wwpn = 0; - adapter->peer_wwnn = 0; - adapter->peer_d_id = 0; - - if (xchg_ok) { - - if (fsf_req->data) - memcpy((struct fsf_qtcb_bottom_config *) fsf_req->data, - bottom, sizeof (struct fsf_qtcb_bottom_config)); - - fc_host_node_name(shost) = bottom->nport_serv_param.wwnn; - fc_host_port_name(shost) = bottom->nport_serv_param.wwpn; - fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; - fc_host_speed(shost) = bottom->fc_link_speed; - fc_host_supported_classes(shost) = - FC_COS_CLASS2 | FC_COS_CLASS3; - adapter->hydra_version = bottom->adapter_type; - if (fc_host_permanent_port_name(shost) == -1) - fc_host_permanent_port_name(shost) = - fc_host_port_name(shost); - if (bottom->fc_topology == FSF_TOPO_P2P) { - adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; - adapter->peer_wwpn = bottom->plogi_payload.wwpn; - adapter->peer_wwnn = bottom->plogi_payload.wwnn; - fc_host_port_type(shost) = FC_PORTTYPE_PTP; - } else if (bottom->fc_topology == FSF_TOPO_FABRIC) - fc_host_port_type(shost) = FC_PORTTYPE_NPORT; - else if (bottom->fc_topology == FSF_TOPO_AL) - fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; - else - fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; - } else { - fc_host_node_name(shost) = 0; - fc_host_port_name(shost) = 0; - fc_host_port_id(shost) = 0; - fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; - fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; - adapter->hydra_version = 0; - } - - if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { - adapter->hardware_version = bottom->hardware_version; - memcpy(fc_host_serial_number(shost), bottom->serial_number, - min(FC_SERIAL_NUMBER_SIZE, 17)); - EBCASC(fc_host_serial_number(shost), - min(FC_SERIAL_NUMBER_SIZE, 17)); - } - - if (fsf_req->erp_action) - ZFCP_LOG_NORMAL("The adapter %s reported the following " - "characteristics:\n" - "WWNN 0x%016Lx, WWPN 0x%016Lx, " - "S_ID 0x%06x,\n" - "adapter version 0x%x, " - "LIC version 0x%x, " - "FC link speed %d Gb/s\n", - zfcp_get_busid_by_adapter(adapter), - (wwn_t) fc_host_node_name(shost), - (wwn_t) fc_host_port_name(shost), - fc_host_port_id(shost), - adapter->hydra_version, - adapter->fsf_lic_version, - fc_host_speed(shost)); - if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) { - ZFCP_LOG_NORMAL("error: the adapter %s " - "only supports newer control block " - "versions in comparison to this device " - "driver (try updated device driver)\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_shutdown(adapter, 0, 125, fsf_req); - return -EIO; - } - if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) { - ZFCP_LOG_NORMAL("error: the adapter %s " - "only supports older control block " - "versions than this device driver uses" - "(consider a microcode upgrade)\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_shutdown(adapter, 0, 126, fsf_req); - return -EIO; - } - return 0; -} - -/** - * function: zfcp_fsf_exchange_config_data_handler - * - * purpose: is called for finished Exchange Configuration Data command - * - * returns: - */ -static int -zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req) -{ - struct fsf_qtcb_bottom_config *bottom; - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fsf_qtcb *qtcb = fsf_req->qtcb; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) - return -EIO; - - switch (qtcb->header.fsf_status) { - - case FSF_GOOD: - if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1)) - return -EIO; - - switch (fc_host_port_type(adapter->scsi_host)) { - case FC_PORTTYPE_PTP: - ZFCP_LOG_NORMAL("Point-to-Point fibrechannel " - "configuration detected at adapter %s\n" - "Peer WWNN 0x%016llx, " - "peer WWPN 0x%016llx, " - "peer d_id 0x%06x\n", - zfcp_get_busid_by_adapter(adapter), - adapter->peer_wwnn, - adapter->peer_wwpn, - adapter->peer_d_id); - break; - case FC_PORTTYPE_NLPORT: - ZFCP_LOG_NORMAL("error: Arbitrated loop fibrechannel " - "topology detected at adapter %s " - "unsupported, shutting down adapter\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req); - return -EIO; - case FC_PORTTYPE_NPORT: - if (fsf_req->erp_action) - ZFCP_LOG_NORMAL("Switched fabric fibrechannel " - "network detected at adapter " - "%s.\n", - zfcp_get_busid_by_adapter(adapter)); - break; - default: - ZFCP_LOG_NORMAL("bug: The fibrechannel topology " - "reported by the exchange " - "configuration command for " - "the adapter %s is not " - "of a type known to the zfcp " - "driver, shutting down adapter\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_erp_adapter_shutdown(adapter, 0, 128, fsf_req); - return -EIO; - } - bottom = &qtcb->bottom.config; - if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { - ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) " - "allowed by the adapter %s " - "is lower than the minimum " - "required by the driver (%ld bytes).\n", - bottom->max_qtcb_size, - zfcp_get_busid_by_adapter(adapter), - sizeof(struct fsf_qtcb)); - zfcp_erp_adapter_shutdown(adapter, 0, 129, fsf_req); - return -EIO; - } - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, - &adapter->status); - break; - case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: - if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0)) - return -EIO; - - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, - &adapter->status); - - zfcp_fsf_link_down_info_eval(fsf_req, 42, - &qtcb->header.fsf_status_qual.link_down_info); - break; - default: - zfcp_erp_adapter_shutdown(adapter, 0, 130, fsf_req); - return -EIO; - } - return 0; -} - -/** * zfcp_fsf_exchange_port_data - request information about local port * @erp_action: ERP action for the adapter for which port data is requested + * Returns: 0 on success, error otherwise */ -int -zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) +int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; + struct zfcp_fsf_req *req; struct zfcp_adapter *adapter = erp_action->adapter; - unsigned long lock_flags; - int retval; + int retval = -EIO; - if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) { - ZFCP_LOG_INFO("error: exchange port data " - "command not supported by adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); + if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - } - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, - ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp, - &lock_flags, &fsf_req); - if (retval) { - ZFCP_LOG_INFO("error: Out of resources. Could not create an " - "exchange port data request for " - "the adapter %s.\n", - zfcp_get_busid_by_adapter(adapter)); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); - return retval; + spin_lock(&adapter->req_q.lock); + if (!atomic_read(&adapter->req_q.count)) + goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, + ZFCP_REQ_AUTO_CLEANUP, + adapter->pool.fsf_req_erp); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); + goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - erp_action->fsf_req = fsf_req; - fsf_req->erp_action = erp_action; - zfcp_erp_start_timer(fsf_req); - - retval = zfcp_fsf_req_send(fsf_req); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); + req->handler = zfcp_fsf_exchange_port_data_handler; + req->erp_action = erp_action; + erp_action->fsf_req = req; + zfcp_fsf_start_erp_timer(req); + retval = zfcp_fsf_req_send(req); if (retval) { - ZFCP_LOG_INFO("error: Could not send an exchange port data " - "command on the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - zfcp_fsf_req_free(fsf_req); + zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; } - else - ZFCP_LOG_DEBUG("exchange port data request initiated " - "(adapter %s)\n", - zfcp_get_busid_by_adapter(adapter)); +out: + spin_unlock(&adapter->req_q.lock); return retval; } - /** * zfcp_fsf_exchange_port_data_sync - request information about local port - * and wait until information is ready + * @adapter: pointer to struct zfcp_adapter + * @data: pointer to struct fsf_qtcb_bottom_port + * Returns: 0 on success, error otherwise */ -int -zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, - struct fsf_qtcb_bottom_port *data) +int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, + struct fsf_qtcb_bottom_port *data) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int retval; - - if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) { - ZFCP_LOG_INFO("error: exchange port data " - "command not supported by adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); + struct zfcp_fsf_req *req = NULL; + int retval = -EIO; + + if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - } - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, - 0, NULL, &lock_flags, &fsf_req); - if (retval) { - ZFCP_LOG_INFO("error: Out of resources. Could not create an " - "exchange port data request for " - "the adapter %s.\n", - zfcp_get_busid_by_adapter(adapter)); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, - lock_flags); - return retval; + spin_lock(&adapter->req_q.lock); + if (!atomic_read(&adapter->req_q.count)) + goto out; + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, + NULL); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); + goto out; } if (data) - fsf_req->data = (unsigned long) data; + req->data = data; - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); - retval = zfcp_fsf_req_send(fsf_req); - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); - - if (retval) - ZFCP_LOG_INFO("error: Could not send an exchange port data " - "command on the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - else - wait_event(fsf_req->completion_wq, - fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); - - zfcp_fsf_req_free(fsf_req); - - return retval; -} - -/** - * zfcp_fsf_exchange_port_evaluate - * @fsf_req: fsf_req which belongs to xchg port data request - * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1) - */ -static void -zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok) -{ - struct zfcp_adapter *adapter; - struct fsf_qtcb_bottom_port *bottom; - struct Scsi_Host *shost; - - adapter = fsf_req->adapter; - bottom = &fsf_req->qtcb->bottom.port; - shost = adapter->scsi_host; - - if (fsf_req->data) - memcpy((struct fsf_qtcb_bottom_port*) fsf_req->data, bottom, - sizeof(struct fsf_qtcb_bottom_port)); - - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) - fc_host_permanent_port_name(shost) = bottom->wwpn; - else - fc_host_permanent_port_name(shost) = fc_host_port_name(shost); - fc_host_maxframe_size(shost) = bottom->maximum_frame_size; - fc_host_supported_speeds(shost) = bottom->supported_speed; -} - -/** - * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request - * @fsf_req: pointer to struct zfcp_fsf_req - */ -static void -zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) -{ - struct zfcp_adapter *adapter; - struct fsf_qtcb *qtcb; - - adapter = fsf_req->adapter; - qtcb = fsf_req->qtcb; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) - return; - - switch (qtcb->header.fsf_status) { - case FSF_GOOD: - zfcp_fsf_exchange_port_evaluate(fsf_req, 1); - atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); - break; - case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: - zfcp_fsf_exchange_port_evaluate(fsf_req, 0); - atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); - zfcp_fsf_link_down_info_eval(fsf_req, 43, - &qtcb->header.fsf_status_qual.link_down_info); - break; - } -} - - -/* - * function: zfcp_fsf_open_port - * - * purpose: - * - * returns: address of initiated FSF request - * NULL - request could not be initiated - */ -int -zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) -{ - volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int retval = 0; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(erp_action->adapter, - FSF_QTCB_OPEN_PORT_WITH_DID, - ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, - erp_action->adapter->pool.fsf_req_erp, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create open port request " - "for port 0x%016Lx on adapter %s.\n", - erp_action->port->wwpn, - zfcp_get_busid_by_adapter(erp_action->adapter)); - goto out; - } - - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - - fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id; - atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); - fsf_req->data = (unsigned long) erp_action->port; - fsf_req->erp_action = erp_action; - erp_action->fsf_req = fsf_req; - - zfcp_erp_start_timer(fsf_req); - retval = zfcp_fsf_req_send(fsf_req); - if (retval) { - ZFCP_LOG_INFO("error: Could not send open port request for " - "port 0x%016Lx on adapter %s.\n", - erp_action->port->wwpn, - zfcp_get_busid_by_adapter(erp_action->adapter)); - zfcp_fsf_req_free(fsf_req); - erp_action->fsf_req = NULL; - goto out; - } + req->handler = zfcp_fsf_exchange_port_data_handler; + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + retval = zfcp_fsf_req_send(req); +out: + spin_unlock(&adapter->req_q.lock); + if (!retval) + wait_event(req->completion_wq, + req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + zfcp_fsf_req_free(req); - ZFCP_LOG_DEBUG("open port request initiated " - "(adapter %s, port 0x%016Lx)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn); - out: - write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, - lock_flags); return retval; } -/* - * function: zfcp_fsf_open_port_handler - * - * purpose: is called for finished Open Port command - * - * returns: - */ -static int -zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) { - int retval = -EINVAL; - struct zfcp_port *port; + struct zfcp_port *port = req->data; + struct fsf_qtcb_header *header = &req->qtcb->header; struct fsf_plogi *plogi; - struct fsf_qtcb_header *header; - u16 subtable, rule, counter; - port = (struct zfcp_port *) fsf_req->data; - header = &fsf_req->qtcb->header; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - /* don't change port status in our bookkeeping */ + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; - } - /* evaluate FSF status in QTCB */ switch (header->fsf_status) { - case FSF_PORT_ALREADY_OPEN: - ZFCP_LOG_NORMAL("bug: remote port 0x%016Lx on adapter %s " - "is already open.\n", - port->wwpn, zfcp_get_busid_by_port(port)); - /* - * This is a bug, however operation should continue normally - * if it is simply ignored - */ break; - case FSF_ACCESS_DENIED: - ZFCP_LOG_NORMAL("Access denied, cannot open port 0x%016Lx " - "on adapter %s\n", - port->wwpn, zfcp_get_busid_by_port(port)); - for (counter = 0; counter < 2; counter++) { - subtable = header->fsf_status_qual.halfword[counter * 2]; - rule = header->fsf_status_qual.halfword[counter * 2 + 1]; - switch (subtable) { - case FSF_SQ_CFDC_SUBTABLE_OS: - case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN: - case FSF_SQ_CFDC_SUBTABLE_PORT_DID: - case FSF_SQ_CFDC_SUBTABLE_LUN: - ZFCP_LOG_INFO("Access denied (%s rule %d)\n", - zfcp_act_subtable_type[subtable], rule); - break; - } - } - zfcp_erp_port_access_denied(port, 57, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_fsf_access_denied_port(req, port); break; - case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: - ZFCP_LOG_INFO("error: The FSF adapter is out of resources. " - "The remote port 0x%016Lx on adapter %s " - "could not be opened. Disabling it.\n", - port->wwpn, zfcp_get_busid_by_port(port)); - zfcp_erp_port_failed(port, 31, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + dev_warn(&req->adapter->ccw_device->dev, + "The adapter is out of resources. The remote port " + "0x%016Lx could not be opened, disabling it.\n", + port->wwpn); + zfcp_erp_port_failed(port, 31, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - /* ERP strategy will escalate */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* ERP strategy will escalate */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_SQ_NO_RETRY_POSSIBLE: - ZFCP_LOG_NORMAL("The remote port 0x%016Lx on " - "adapter %s could not be opened. " - "Disabling it.\n", - port->wwpn, - zfcp_get_busid_by_port(port)); - zfcp_erp_port_failed(port, 32, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - default: - ZFCP_LOG_NORMAL - ("bug: Wrong status qualifier 0x%x arrived.\n", - header->fsf_status_qual.word[0]); + dev_warn(&req->adapter->ccw_device->dev, + "The remote port 0x%016Lx could not be " + "opened. Disabling it.\n", port->wwpn); + zfcp_erp_port_failed(port, 32, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } break; - case FSF_GOOD: - /* save port handle assigned by FSF */ port->handle = header->port_handle; - ZFCP_LOG_INFO("The remote port 0x%016Lx via adapter %s " - "was opened, it's port handle is 0x%x\n", - port->wwpn, zfcp_get_busid_by_port(port), - port->handle); - /* mark port as open */ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED, &port->status); - retval = 0; /* check whether D_ID has changed during open */ /* * FIXME: This check is not airtight, as the FCP channel does @@ -2526,320 +1496,168 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req) * another GID_PN straight after a port has been opened. * Alternately, an ADISC/PDISC ELS should suffice, as well. */ - plogi = (struct fsf_plogi *) fsf_req->qtcb->bottom.support.els; - if (!atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, &port->status)) - { - if (fsf_req->qtcb->bottom.support.els1_length < - sizeof (struct fsf_plogi)) { - ZFCP_LOG_INFO( - "warning: insufficient length of " - "PLOGI payload (%i)\n", - fsf_req->qtcb->bottom.support.els1_length); - /* skip sanity check and assume wwpn is ok */ - } else { - if (plogi->serv_param.wwpn != port->wwpn) { - ZFCP_LOG_INFO("warning: d_id of port " - "0x%016Lx changed during " - "open\n", port->wwpn); - atomic_clear_mask( - ZFCP_STATUS_PORT_DID_DID, - &port->status); - } else { - port->wwnn = plogi->serv_param.wwnn; - zfcp_plogi_evaluate(port, plogi); - } + if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN) + break; + + plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; + if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) { + if (plogi->serv_param.wwpn != port->wwpn) + atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID, + &port->status); + else { + port->wwnn = plogi->serv_param.wwnn; + zfcp_fc_plogi_evaluate(port, plogi); } } break; - case FSF_UNKNOWN_OP_SUBTYPE: - /* should never occure, subtype not set in zfcp_fsf_open_port */ - ZFCP_LOG_INFO("unknown operation subtype (adapter: %s, " - "op_subtype=0x%x)\n", - zfcp_get_busid_by_port(port), - fsf_req->qtcb->bottom.support.operation_subtype); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - default: - ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " - "(debug info 0x%x)\n", - header->fsf_status); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } - skip_fsfstatus: +skip_fsfstatus: atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status); - return retval; } -/* - * function: zfcp_fsf_close_port - * - * purpose: submit FSF command "close port" - * - * returns: address of initiated FSF request - * NULL - request could not be initiated +/** + * zfcp_fsf_open_port - create and send open port request + * @erp_action: pointer to struct zfcp_erp_action + * Returns: 0 on success, error otherwise */ -int -zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) +int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int retval = 0; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(erp_action->adapter, - FSF_QTCB_CLOSE_PORT, - ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, - erp_action->adapter->pool.fsf_req_erp, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create a close port request " - "for port 0x%016Lx on adapter %s.\n", - erp_action->port->wwpn, - zfcp_get_busid_by_adapter(erp_action->adapter)); + struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_fsf_req *req; + int retval = -EIO; + + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; + + req = zfcp_fsf_req_create(adapter, + FSF_QTCB_OPEN_PORT_WITH_DID, + ZFCP_REQ_AUTO_CLEANUP, + adapter->pool.fsf_req_erp); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); - fsf_req->data = (unsigned long) erp_action->port; - fsf_req->erp_action = erp_action; - fsf_req->qtcb->header.port_handle = erp_action->port->handle; - fsf_req->erp_action = erp_action; - erp_action->fsf_req = fsf_req; - - zfcp_erp_start_timer(fsf_req); - retval = zfcp_fsf_req_send(fsf_req); + req->handler = zfcp_fsf_open_port_handler; + req->qtcb->bottom.support.d_id = erp_action->port->d_id; + req->data = erp_action->port; + req->erp_action = erp_action; + erp_action->fsf_req = req; + atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); + + zfcp_fsf_start_erp_timer(req); + retval = zfcp_fsf_req_send(req); if (retval) { - ZFCP_LOG_INFO("error: Could not send a close port request for " - "port 0x%016Lx on adapter %s.\n", - erp_action->port->wwpn, - zfcp_get_busid_by_adapter(erp_action->adapter)); - zfcp_fsf_req_free(fsf_req); + zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; - goto out; } - - ZFCP_LOG_TRACE("close port request initiated " - "(adapter %s, port 0x%016Lx)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn); - out: - write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, - lock_flags); +out: + spin_unlock(&adapter->req_q.lock); return retval; } -/* - * function: zfcp_fsf_close_port_handler - * - * purpose: is called for finished Close Port FSF command - * - * returns: - */ -static int -zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) { - int retval = -EINVAL; - struct zfcp_port *port; + struct zfcp_port *port = req->data; - port = (struct zfcp_port *) fsf_req->data; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - /* don't change port status in our bookkeeping */ + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; - } - - /* evaluate FSF status in QTCB */ - switch (fsf_req->qtcb->header.fsf_status) { + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - ZFCP_LOG_INFO("Temporary port identifier 0x%x for port " - "0x%016Lx on adapter %s invalid. This may happen " - "occasionally.\n", port->handle, - port->wwpn, zfcp_get_busid_by_port(port)); - ZFCP_LOG_DEBUG("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &fsf_req->qtcb->header.fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_adapter_reopen(port->adapter, 0, 107, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_erp_adapter_reopen(port->adapter, 0, 107, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_ADAPTER_STATUS_AVAILABLE: - /* Note: FSF has actually closed the port in this case. - * The status code is just daft. Fingers crossed for a change - */ - retval = 0; break; - case FSF_GOOD: - ZFCP_LOG_TRACE("remote port 0x016%Lx on adapter %s closed, " - "port handle 0x%x\n", port->wwpn, - zfcp_get_busid_by_port(port), port->handle); - zfcp_erp_modify_port_status(port, 33, fsf_req, + zfcp_erp_modify_port_status(port, 33, req, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); - retval = 0; - break; - - default: - ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " - "(debug info 0x%x)\n", - fsf_req->qtcb->header.fsf_status); break; } - skip_fsfstatus: +skip_fsfstatus: atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status); - return retval; } -/* - * function: zfcp_fsf_close_physical_port - * - * purpose: submit FSF command "close physical port" - * - * returns: address of initiated FSF request - * NULL - request could not be initiated +/** + * zfcp_fsf_close_port - create and send close port request + * @erp_action: pointer to struct zfcp_erp_action + * Returns: 0 on success, error otherwise */ -int -zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) +int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int retval = 0; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(erp_action->adapter, - FSF_QTCB_CLOSE_PHYSICAL_PORT, - ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, - erp_action->adapter->pool.fsf_req_erp, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create close physical port " - "request (adapter %s, port 0x%016Lx)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn); + struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_fsf_req *req; + int retval = -EIO; + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, + ZFCP_REQ_AUTO_CLEANUP, + adapter->pool.fsf_req_erp); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - /* mark port as being closed */ - atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, - &erp_action->port->status); - /* save a pointer to this port */ - fsf_req->data = (unsigned long) erp_action->port; - fsf_req->qtcb->header.port_handle = erp_action->port->handle; - fsf_req->erp_action = erp_action; - erp_action->fsf_req = fsf_req; - - zfcp_erp_start_timer(fsf_req); - retval = zfcp_fsf_req_send(fsf_req); + req->handler = zfcp_fsf_close_port_handler; + req->data = erp_action->port; + req->erp_action = erp_action; + req->qtcb->header.port_handle = erp_action->port->handle; + erp_action->fsf_req = req; + atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); + + zfcp_fsf_start_erp_timer(req); + retval = zfcp_fsf_req_send(req); if (retval) { - ZFCP_LOG_INFO("error: Could not send close physical port " - "request (adapter %s, port 0x%016Lx)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn); - zfcp_fsf_req_free(fsf_req); + zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; - goto out; } - - ZFCP_LOG_TRACE("close physical port request initiated " - "(adapter %s, port 0x%016Lx)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn); - out: - write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, - lock_flags); +out: + spin_unlock(&adapter->req_q.lock); return retval; } -/* - * function: zfcp_fsf_close_physical_port_handler - * - * purpose: is called for finished Close Physical Port FSF command - * - * returns: - */ -static int -zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) { - int retval = -EINVAL; - struct zfcp_port *port; + struct zfcp_port *port = req->data; + struct fsf_qtcb_header *header = &req->qtcb->header; struct zfcp_unit *unit; - struct fsf_qtcb_header *header; - u16 subtable, rule, counter; - port = (struct zfcp_port *) fsf_req->data; - header = &fsf_req->qtcb->header; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - /* don't change port status in our bookkeeping */ + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; - } - /* evaluate FSF status in QTCB */ switch (header->fsf_status) { - case FSF_PORT_HANDLE_NOT_VALID: - ZFCP_LOG_INFO("Temporary port identifier 0x%x invalid" - "(adapter %s, port 0x%016Lx). " - "This may happen occasionally.\n", - port->handle, - zfcp_get_busid_by_port(port), - port->wwpn); - ZFCP_LOG_DEBUG("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_adapter_reopen(port->adapter, 0, 108, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_erp_adapter_reopen(port->adapter, 0, 108, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_ACCESS_DENIED: - ZFCP_LOG_NORMAL("Access denied, cannot close " - "physical port 0x%016Lx on adapter %s\n", - port->wwpn, zfcp_get_busid_by_port(port)); - for (counter = 0; counter < 2; counter++) { - subtable = header->fsf_status_qual.halfword[counter * 2]; - rule = header->fsf_status_qual.halfword[counter * 2 + 1]; - switch (subtable) { - case FSF_SQ_CFDC_SUBTABLE_OS: - case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN: - case FSF_SQ_CFDC_SUBTABLE_PORT_DID: - case FSF_SQ_CFDC_SUBTABLE_LUN: - ZFCP_LOG_INFO("Access denied (%s rule %d)\n", - zfcp_act_subtable_type[subtable], rule); - break; - } - } - zfcp_erp_port_access_denied(port, 58, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_fsf_access_denied_port(req, port); break; - case FSF_PORT_BOXED: - ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter " - "%s needs to be reopened but it was attempted " - "to close it physically.\n", - port->wwpn, - zfcp_get_busid_by_port(port)); - zfcp_erp_port_boxed(port, 50, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; - + zfcp_erp_port_boxed(port, 50, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); @@ -2847,154 +1665,88 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req) atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); break; - case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - /* This will now be escalated by ERP */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; + /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* ERP strategy will escalate */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - default: - ZFCP_LOG_NORMAL - ("bug: Wrong status qualifier 0x%x arrived.\n", - header->fsf_status_qual.word[0]); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } break; - case FSF_GOOD: - ZFCP_LOG_DEBUG("Remote port 0x%016Lx via adapter %s " - "physically closed, port handle 0x%x\n", - port->wwpn, - zfcp_get_busid_by_port(port), port->handle); /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); list_for_each_entry(unit, &port->unit_list_head, list) - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); - retval = 0; - break; - - default: - ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " - "(debug info 0x%x)\n", - header->fsf_status); + atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, + &unit->status); break; } - - skip_fsfstatus: +skip_fsfstatus: atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status); - return retval; } -/* - * function: zfcp_fsf_open_unit - * - * purpose: - * - * returns: - * - * assumptions: This routine does not check whether the associated - * remote port has already been opened. This should be - * done by calling routines. Otherwise some status - * may be presented by FSF +/** + * zfcp_fsf_close_physical_port - close physical port + * @erp_action: pointer to struct zfcp_erp_action + * Returns: 0 on success */ -int -zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) +int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int retval = 0; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(erp_action->adapter, - FSF_QTCB_OPEN_LUN, - ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, - erp_action->adapter->pool.fsf_req_erp, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create open unit request for " - "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", - erp_action->unit->fcp_lun, - erp_action->unit->port->wwpn, - zfcp_get_busid_by_adapter(erp_action->adapter)); + struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_fsf_req *req; + int retval = -EIO; + + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, + ZFCP_REQ_AUTO_CLEANUP, + adapter->pool.fsf_req_erp); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; + sbale = zfcp_qdio_sbale_req(req); + sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; + sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - fsf_req->qtcb->header.port_handle = erp_action->port->handle; - fsf_req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; - if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE)) - fsf_req->qtcb->bottom.support.option = - FSF_OPEN_LUN_SUPPRESS_BOXING; - atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); - fsf_req->data = (unsigned long) erp_action->unit; - fsf_req->erp_action = erp_action; - erp_action->fsf_req = fsf_req; + req->data = erp_action->port; + req->qtcb->header.port_handle = erp_action->port->handle; + req->erp_action = erp_action; + req->handler = zfcp_fsf_close_physical_port_handler; + erp_action->fsf_req = req; + atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, + &erp_action->port->status); - zfcp_erp_start_timer(fsf_req); - retval = zfcp_fsf_req_send(erp_action->fsf_req); + zfcp_fsf_start_erp_timer(req); + retval = zfcp_fsf_req_send(req); if (retval) { - ZFCP_LOG_INFO("error: Could not send an open unit request " - "on the adapter %s, port 0x%016Lx for " - "unit 0x%016Lx\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn, - erp_action->unit->fcp_lun); - zfcp_fsf_req_free(fsf_req); + zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; - goto out; } - - ZFCP_LOG_TRACE("Open LUN request initiated (adapter %s, " - "port 0x%016Lx, unit 0x%016Lx)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn, erp_action->unit->fcp_lun); - out: - write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, - lock_flags); +out: + spin_unlock(&adapter->req_q.lock); return retval; } -/* - * function: zfcp_fsf_open_unit_handler - * - * purpose: is called for finished Open LUN command - * - * returns: - */ -static int -zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) { - int retval = -EINVAL; - struct zfcp_adapter *adapter; - struct zfcp_unit *unit; - struct fsf_qtcb_header *header; - struct fsf_qtcb_bottom_support *bottom; - struct fsf_queue_designator *queue_designator; - u16 subtable, rule, counter; + struct zfcp_adapter *adapter = req->adapter; + struct zfcp_unit *unit = req->data; + struct fsf_qtcb_header *header = &req->qtcb->header; + struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; + struct fsf_queue_designator *queue_designator = + &header->fsf_status_qual.fsf_queue_designator; int exclusive, readwrite; - unit = (struct zfcp_unit *) fsf_req->data; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - /* don't change unit status in our bookkeeping */ + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; - } - - adapter = fsf_req->adapter; - header = &fsf_req->qtcb->header; - bottom = &fsf_req->qtcb->bottom.support; - queue_designator = &header->fsf_status_qual.fsf_queue_designator; atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED | @@ -3002,155 +1754,65 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) ZFCP_STATUS_UNIT_READONLY, &unit->status); - /* evaluate FSF status in QTCB */ switch (header->fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - ZFCP_LOG_INFO("Temporary port identifier 0x%x " - "for port 0x%016Lx on adapter %s invalid " - "This may happen occasionally\n", - unit->port->handle, - unit->port->wwpn, zfcp_get_busid_by_unit(unit)); - ZFCP_LOG_DEBUG("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - + zfcp_erp_adapter_reopen(unit->port->adapter, 0, 109, req); + /* fall through */ case FSF_LUN_ALREADY_OPEN: - ZFCP_LOG_NORMAL("bug: Attempted to open unit 0x%016Lx on " - "remote port 0x%016Lx on adapter %s twice.\n", - unit->fcp_lun, - unit->port->wwpn, zfcp_get_busid_by_unit(unit)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_ACCESS_DENIED: - ZFCP_LOG_NORMAL("Access denied, cannot open unit 0x%016Lx on " - "remote port 0x%016Lx on adapter %s\n", - unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - for (counter = 0; counter < 2; counter++) { - subtable = header->fsf_status_qual.halfword[counter * 2]; - rule = header->fsf_status_qual.halfword[counter * 2 + 1]; - switch (subtable) { - case FSF_SQ_CFDC_SUBTABLE_OS: - case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN: - case FSF_SQ_CFDC_SUBTABLE_PORT_DID: - case FSF_SQ_CFDC_SUBTABLE_LUN: - ZFCP_LOG_INFO("Access denied (%s rule %d)\n", - zfcp_act_subtable_type[subtable], rule); - break; - } - } - zfcp_erp_unit_access_denied(unit, 59, fsf_req); + zfcp_fsf_access_denied_unit(req, unit); atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); - atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); break; - case FSF_PORT_BOXED: - ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " - "needs to be reopened\n", - unit->port->wwpn, zfcp_get_busid_by_unit(unit)); - zfcp_erp_port_boxed(unit->port, 51, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + zfcp_erp_port_boxed(unit->port, 51, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; break; - case FSF_LUN_SHARING_VIOLATION: - if (header->fsf_status_qual.word[0] != 0) { - ZFCP_LOG_NORMAL("FCP-LUN 0x%Lx at the remote port " - "with WWPN 0x%Lx " - "connected to the adapter %s " - "is already in use in LPAR%d, CSS%d\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - queue_designator->hla, - queue_designator->cssid); - } else { - subtable = header->fsf_status_qual.halfword[4]; - rule = header->fsf_status_qual.halfword[5]; - switch (subtable) { - case FSF_SQ_CFDC_SUBTABLE_OS: - case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN: - case FSF_SQ_CFDC_SUBTABLE_PORT_DID: - case FSF_SQ_CFDC_SUBTABLE_LUN: - ZFCP_LOG_NORMAL("Access to FCP-LUN 0x%Lx at the " - "remote port with WWPN 0x%Lx " - "connected to the adapter %s " - "is denied (%s rule %d)\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - zfcp_act_subtable_type[subtable], - rule); - break; - } - } - ZFCP_LOG_DEBUG("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_unit_access_denied(unit, 60, fsf_req); + if (header->fsf_status_qual.word[0]) + dev_warn(&adapter->ccw_device->dev, + "FCP-LUN 0x%Lx at the remote port " + "with WWPN 0x%Lx " + "connected to the adapter " + "is already in use in LPAR%d, CSS%d.\n", + unit->fcp_lun, + unit->port->wwpn, + queue_designator->hla, + queue_designator->cssid); + else + zfcp_act_eval_err(adapter, + header->fsf_status_qual.word[2]); + zfcp_erp_unit_access_denied(unit, 60, req); atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: - ZFCP_LOG_INFO("error: The adapter ran out of resources. " - "There is no handle (temporary port identifier) " - "available for unit 0x%016Lx on port 0x%016Lx " - "on adapter %s\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - zfcp_erp_unit_failed(unit, 34, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + dev_warn(&adapter->ccw_device->dev, + "The adapter ran out of resources. There is no " + "handle available for unit 0x%016Lx on port 0x%016Lx.", + unit->fcp_lun, unit->port->wwpn); + zfcp_erp_unit_failed(unit, 34, req); + /* fall through */ + case FSF_INVALID_COMMAND_OPTION: + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - /* Re-establish link to port */ zfcp_test_link(unit->port); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; + /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* ERP strategy will escalate */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - default: - ZFCP_LOG_NORMAL - ("bug: Wrong status qualifier 0x%x arrived.\n", - header->fsf_status_qual.word[0]); } break; - case FSF_INVALID_COMMAND_OPTION: - ZFCP_LOG_NORMAL( - "Invalid option 0x%x has been specified " - "in QTCB bottom sent to the adapter %s\n", - bottom->option, - zfcp_get_busid_by_adapter(adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EINVAL; - break; - case FSF_GOOD: - /* save LUN handle assigned by FSF */ unit->handle = header->lun_handle; - ZFCP_LOG_TRACE("unit 0x%016Lx on remote port 0x%016Lx on " - "adapter %s opened, port handle 0x%x\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - unit->handle); - /* mark unit as open */ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && @@ -3168,1528 +1830,629 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req) if (!readwrite) { atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); - ZFCP_LOG_NORMAL("read-only access for unit " - "(adapter %s, wwpn=0x%016Lx, " - "fcp_lun=0x%016Lx)\n", - zfcp_get_busid_by_unit(unit), - unit->port->wwpn, - unit->fcp_lun); + dev_info(&adapter->ccw_device->dev, + "Read-only access for unit 0x%016Lx " + "on port 0x%016Lx.\n", + unit->fcp_lun, unit->port->wwpn); } if (exclusive && !readwrite) { - ZFCP_LOG_NORMAL("exclusive access of read-only " - "unit not supported\n"); - zfcp_erp_unit_failed(unit, 35, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_unit_shutdown(unit, 0, 80, fsf_req); + dev_err(&adapter->ccw_device->dev, + "Exclusive access of read-only unit " + "0x%016Lx on port 0x%016Lx not " + "supported, disabling unit.\n", + unit->fcp_lun, unit->port->wwpn); + zfcp_erp_unit_failed(unit, 35, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_erp_unit_shutdown(unit, 0, 80, req); } else if (!exclusive && readwrite) { - ZFCP_LOG_NORMAL("shared access of read-write " - "unit not supported\n"); - zfcp_erp_unit_failed(unit, 36, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_unit_shutdown(unit, 0, 81, fsf_req); + dev_err(&adapter->ccw_device->dev, + "Shared access of read-write unit " + "0x%016Lx on port 0x%016Lx not " + "supported, disabling unit.\n", + unit->fcp_lun, unit->port->wwpn); + zfcp_erp_unit_failed(unit, 36, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_erp_unit_shutdown(unit, 0, 81, req); } } - - retval = 0; - break; - - default: - ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " - "(debug info 0x%x)\n", - header->fsf_status); break; } - skip_fsfstatus: +skip_fsfstatus: atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status); - return retval; } -/* - * function: zfcp_fsf_close_unit - * - * purpose: - * - * returns: address of fsf_req - request successfully initiated - * NULL - - * - * assumptions: This routine does not check whether the associated - * remote port/lun has already been opened. This should be - * done by calling routines. Otherwise some status - * may be presented by FSF +/** + * zfcp_fsf_open_unit - open unit + * @erp_action: pointer to struct zfcp_erp_action + * Returns: 0 on success, error otherwise */ -int -zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) +int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) { volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req; - unsigned long lock_flags; - int retval = 0; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(erp_action->adapter, - FSF_QTCB_CLOSE_LUN, - ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, - erp_action->adapter->pool.fsf_req_erp, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create close unit request for " - "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", - erp_action->unit->fcp_lun, - erp_action->port->wwpn, - zfcp_get_busid_by_adapter(erp_action->adapter)); + struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_fsf_req *req; + int retval = -EIO; + + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, + ZFCP_REQ_AUTO_CLEANUP, + adapter->pool.fsf_req_erp); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); goto out; } - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - fsf_req->qtcb->header.port_handle = erp_action->port->handle; - fsf_req->qtcb->header.lun_handle = erp_action->unit->handle; - atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); - fsf_req->data = (unsigned long) erp_action->unit; - fsf_req->erp_action = erp_action; - erp_action->fsf_req = fsf_req; + req->qtcb->header.port_handle = erp_action->port->handle; + req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; + req->handler = zfcp_fsf_open_unit_handler; + req->data = erp_action->unit; + req->erp_action = erp_action; + erp_action->fsf_req = req; - zfcp_erp_start_timer(fsf_req); - retval = zfcp_fsf_req_send(erp_action->fsf_req); + if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) + req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; + + atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); + + zfcp_fsf_start_erp_timer(req); + retval = zfcp_fsf_req_send(req); if (retval) { - ZFCP_LOG_INFO("error: Could not send a close unit request for " - "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n", - erp_action->unit->fcp_lun, - erp_action->port->wwpn, - zfcp_get_busid_by_adapter(erp_action->adapter)); - zfcp_fsf_req_free(fsf_req); + zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; - goto out; } - - ZFCP_LOG_TRACE("Close LUN request initiated (adapter %s, " - "port 0x%016Lx, unit 0x%016Lx)\n", - zfcp_get_busid_by_adapter(erp_action->adapter), - erp_action->port->wwpn, erp_action->unit->fcp_lun); - out: - write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, - lock_flags); +out: + spin_unlock(&adapter->req_q.lock); return retval; } -/* - * function: zfcp_fsf_close_unit_handler - * - * purpose: is called for finished Close LUN FSF command - * - * returns: - */ -static int -zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) { - int retval = -EINVAL; - struct zfcp_unit *unit; - - unit = (struct zfcp_unit *) fsf_req->data; + struct zfcp_unit *unit = req->data; - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - /* don't change unit status in our bookkeeping */ + if (req->status & ZFCP_STATUS_FSFREQ_ERROR) goto skip_fsfstatus; - } - - /* evaluate FSF status in QTCB */ - switch (fsf_req->qtcb->header.fsf_status) { + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - ZFCP_LOG_INFO("Temporary port identifier 0x%x for port " - "0x%016Lx on adapter %s invalid. This may " - "happen in rare circumstances\n", - unit->port->handle, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - ZFCP_LOG_DEBUG("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &fsf_req->qtcb->header.fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_erp_adapter_reopen(unit->port->adapter, 0, 110, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_LUN_HANDLE_NOT_VALID: - ZFCP_LOG_INFO("Temporary LUN identifier 0x%x of unit " - "0x%016Lx on port 0x%016Lx on adapter %s is " - "invalid. This may happen occasionally.\n", - unit->handle, - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - ZFCP_LOG_DEBUG("Status qualifier data:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &fsf_req->qtcb->header.fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_port_reopen(unit->port, 0, 111, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + zfcp_erp_port_reopen(unit->port, 0, 111, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - case FSF_PORT_BOXED: - ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " - "needs to be reopened\n", - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - zfcp_erp_port_boxed(unit->port, 52, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; + zfcp_erp_port_boxed(unit->port, 52, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; break; - case FSF_ADAPTER_STATUS_AVAILABLE: - switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { + switch (req->qtcb->header.fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - /* re-establish link to port */ zfcp_test_link(unit->port); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; + /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* ERP strategy will escalate */ - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - default: - ZFCP_LOG_NORMAL - ("bug: Wrong status qualifier 0x%x arrived.\n", - fsf_req->qtcb->header.fsf_status_qual.word[0]); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } break; - case FSF_GOOD: - ZFCP_LOG_TRACE("unit 0x%016Lx on port 0x%016Lx on adapter %s " - "closed, port handle 0x%x\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - unit->handle); - /* mark unit as closed */ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); - retval = 0; - break; - - default: - ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented " - "(debug info 0x%x)\n", - fsf_req->qtcb->header.fsf_status); break; } - - skip_fsfstatus: +skip_fsfstatus: atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status); - return retval; } /** - * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) - * @adapter: adapter where scsi command is issued - * @unit: unit where command is sent to - * @scsi_cmnd: scsi command to be sent - * @timer: timer to be started when request is initiated - * @req_flags: flags for fsf_request + * zfcp_fsf_close_unit - close zfcp unit + * @erp_action: pointer to struct zfcp_unit + * Returns: 0 on success, error otherwise */ -int -zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, - struct zfcp_unit *unit, - struct scsi_cmnd * scsi_cmnd, - int use_timer, int req_flags) +int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) { - struct zfcp_fsf_req *fsf_req = NULL; - struct fcp_cmnd_iu *fcp_cmnd_iu; - unsigned int sbtype; - unsigned long lock_flags; - int real_bytes = 0; - int retval = 0; - int mask; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, - adapter->pool.fsf_req_scsi, - &lock_flags, &fsf_req); - if (unlikely(retval < 0)) { - ZFCP_LOG_DEBUG("error: Could not create FCP command request " - "for unit 0x%016Lx on port 0x%016Lx on " - "adapter %s\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_adapter(adapter)); - goto failed_req_create; - } - - if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &unit->status))) { - retval = -EBUSY; - goto unit_blocked; - } - - zfcp_unit_get(unit); - fsf_req->unit = unit; - - /* associate FSF request with SCSI request (for look up on abort) */ - scsi_cmnd->host_scribble = (unsigned char *) fsf_req->req_id; - - /* associate SCSI command with FSF request */ - fsf_req->data = (unsigned long) scsi_cmnd; - - /* set handles of unit and its parent port in QTCB */ - fsf_req->qtcb->header.lun_handle = unit->handle; - fsf_req->qtcb->header.port_handle = unit->port->handle; - - /* FSF does not define the structure of the FCP_CMND IU */ - fcp_cmnd_iu = (struct fcp_cmnd_iu *) - &(fsf_req->qtcb->bottom.io.fcp_cmnd); - - /* - * set depending on data direction: - * data direction bits in SBALE (SB Type) - * data direction bits in QTCB - * data direction bits in FCP_CMND IU - */ - switch (scsi_cmnd->sc_data_direction) { - case DMA_NONE: - fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; - /* - * FIXME(qdio): - * what is the correct type for commands - * without 'real' data buffers? - */ - sbtype = SBAL_FLAGS0_TYPE_READ; - break; - case DMA_FROM_DEVICE: - fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; - sbtype = SBAL_FLAGS0_TYPE_READ; - fcp_cmnd_iu->rddata = 1; - break; - case DMA_TO_DEVICE: - fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; - sbtype = SBAL_FLAGS0_TYPE_WRITE; - fcp_cmnd_iu->wddata = 1; - break; - case DMA_BIDIRECTIONAL: - default: - /* - * dummy, catch this condition earlier - * in zfcp_scsi_queuecommand - */ - goto failed_scsi_cmnd; - } - - /* set FC service class in QTCB (3 per default) */ - fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT; - - /* set FCP_LUN in FCP_CMND IU in QTCB */ - fcp_cmnd_iu->fcp_lun = unit->fcp_lun; - - mask = ZFCP_STATUS_UNIT_READONLY | ZFCP_STATUS_UNIT_SHARED; - - /* set task attributes in FCP_CMND IU in QTCB */ - if (likely((scsi_cmnd->device->simple_tags) || - (atomic_test_mask(mask, &unit->status)))) - fcp_cmnd_iu->task_attribute = SIMPLE_Q; - else - fcp_cmnd_iu->task_attribute = UNTAGGED; - - /* set additional length of FCP_CDB in FCP_CMND IU in QTCB, if needed */ - if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) { - fcp_cmnd_iu->add_fcp_cdb_length - = (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2; - ZFCP_LOG_TRACE("SCSI CDB length is 0x%x, " - "additional FCP_CDB length is 0x%x " - "(shifted right 2 bits)\n", - scsi_cmnd->cmd_len, - fcp_cmnd_iu->add_fcp_cdb_length); - } - /* - * copy SCSI CDB (including additional length, if any) to - * FCP_CDB in FCP_CMND IU in QTCB - */ - memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); - - /* FCP CMND IU length in QTCB */ - fsf_req->qtcb->bottom.io.fcp_cmnd_length = - sizeof (struct fcp_cmnd_iu) + - fcp_cmnd_iu->add_fcp_cdb_length + sizeof (fcp_dl_t); + volatile struct qdio_buffer_element *sbale; + struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_fsf_req *req; + int retval = -EIO; - /* generate SBALEs from data buffer */ - real_bytes = zfcp_qdio_sbals_from_scsicmnd(fsf_req, sbtype, scsi_cmnd); - if (unlikely(real_bytes < 0)) { - if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) { - ZFCP_LOG_DEBUG( - "Data did not fit into available buffer(s), " - "waiting for more...\n"); - retval = -EIO; - } else { - ZFCP_LOG_NORMAL("error: No truncation implemented but " - "required. Shutting down unit " - "(adapter %s, port 0x%016Lx, " - "unit 0x%016Lx)\n", - zfcp_get_busid_by_unit(unit), - unit->port->wwpn, - unit->fcp_lun); - zfcp_erp_unit_shutdown(unit, 0, 131, fsf_req); - retval = -EINVAL; - } - goto no_fit; + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, + ZFCP_REQ_AUTO_CLEANUP, + adapter->pool.fsf_req_erp); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); + goto out; } - /* set length of FCP data length in FCP_CMND IU in QTCB */ - zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes); + sbale = zfcp_qdio_sbale_req(req); + sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; + sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - ZFCP_LOG_DEBUG("Sending SCSI command:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + req->qtcb->header.port_handle = erp_action->port->handle; + req->qtcb->header.lun_handle = erp_action->unit->handle; + req->handler = zfcp_fsf_close_unit_handler; + req->data = erp_action->unit; + req->erp_action = erp_action; + erp_action->fsf_req = req; + atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); - if (use_timer) - zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); - - retval = zfcp_fsf_req_send(fsf_req); - if (unlikely(retval < 0)) { - ZFCP_LOG_INFO("error: Could not send FCP command request " - "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n", - zfcp_get_busid_by_adapter(adapter), - unit->port->wwpn, - unit->fcp_lun); - goto send_failed; + zfcp_fsf_start_erp_timer(req); + retval = zfcp_fsf_req_send(req); + if (retval) { + zfcp_fsf_req_free(req); + erp_action->fsf_req = NULL; } - - ZFCP_LOG_TRACE("Send FCP Command initiated (adapter %s, " - "port 0x%016Lx, unit 0x%016Lx)\n", - zfcp_get_busid_by_adapter(adapter), - unit->port->wwpn, - unit->fcp_lun); - goto success; - - send_failed: - no_fit: - failed_scsi_cmnd: - zfcp_unit_put(unit); - unit_blocked: - zfcp_fsf_req_free(fsf_req); - fsf_req = NULL; - scsi_cmnd->host_scribble = NULL; - success: - failed_req_create: - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); +out: + spin_unlock(&adapter->req_q.lock); return retval; } -struct zfcp_fsf_req * -zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter, - struct zfcp_unit *unit, - u8 tm_flags, int req_flags) +static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat) { - struct zfcp_fsf_req *fsf_req = NULL; - int retval = 0; - struct fcp_cmnd_iu *fcp_cmnd_iu; - unsigned long lock_flags; - volatile struct qdio_buffer_element *sbale; - - /* setup new FSF request */ - retval = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, - adapter->pool.fsf_req_scsi, - &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create FCP command (task " - "management) request for adapter %s, port " - " 0x%016Lx, unit 0x%016Lx.\n", - zfcp_get_busid_by_adapter(adapter), - unit->port->wwpn, unit->fcp_lun); - goto out; - } - - if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, - &unit->status))) - goto unit_blocked; - - /* - * Used to decide on proper handler in the return path, - * could be either zfcp_fsf_send_fcp_command_task_handler or - * zfcp_fsf_send_fcp_command_task_management_handler */ - - fsf_req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; - - /* - * hold a pointer to the unit being target of this - * task management request - */ - fsf_req->data = (unsigned long) unit; - - /* set FSF related fields in QTCB */ - fsf_req->qtcb->header.lun_handle = unit->handle; - fsf_req->qtcb->header.port_handle = unit->port->handle; - fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; - fsf_req->qtcb->bottom.io.service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT; - fsf_req->qtcb->bottom.io.fcp_cmnd_length = - sizeof (struct fcp_cmnd_iu) + sizeof (fcp_dl_t); - - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - - /* set FCP related fields in FCP_CMND IU in QTCB */ - fcp_cmnd_iu = (struct fcp_cmnd_iu *) - &(fsf_req->qtcb->bottom.io.fcp_cmnd); - fcp_cmnd_iu->fcp_lun = unit->fcp_lun; - fcp_cmnd_iu->task_management_flags = tm_flags; - - zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT); - retval = zfcp_fsf_req_send(fsf_req); - if (!retval) - goto out; - - unit_blocked: - zfcp_fsf_req_free(fsf_req); - fsf_req = NULL; - - out: - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); - return fsf_req; + lat_rec->sum += lat; + lat_rec->min = min(lat_rec->min, lat); + lat_rec->max = max(lat_rec->max, lat); } -/* - * function: zfcp_fsf_send_fcp_command_handler - * - * purpose: is called for finished Send FCP Command - * - * returns: - */ -static int -zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req) { - int retval = -EINVAL; - struct zfcp_unit *unit; - struct fsf_qtcb_header *header; - u16 subtable, rule, counter; - - header = &fsf_req->qtcb->header; - - if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) - unit = (struct zfcp_unit *) fsf_req->data; - else - unit = fsf_req->unit; - - if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { - /* go directly to calls of special handlers */ - goto skip_fsfstatus; - } - - /* evaluate FSF status in QTCB */ - switch (header->fsf_status) { - - case FSF_PORT_HANDLE_NOT_VALID: - ZFCP_LOG_INFO("Temporary port identifier 0x%x for port " - "0x%016Lx on adapter %s invalid\n", - unit->port->handle, - unit->port->wwpn, zfcp_get_busid_by_unit(unit)); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_LUN_HANDLE_NOT_VALID: - ZFCP_LOG_INFO("Temporary LUN identifier 0x%x for unit " - "0x%016Lx on port 0x%016Lx on adapter %s is " - "invalid. This may happen occasionally.\n", - unit->handle, - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - ZFCP_LOG_NORMAL("Status qualifier data:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_port_reopen(unit->port, 0, 113, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_HANDLE_MISMATCH: - ZFCP_LOG_NORMAL("bug: The port handle 0x%x has changed " - "unexpectedly. (adapter %s, port 0x%016Lx, " - "unit 0x%016Lx)\n", - unit->port->handle, - zfcp_get_busid_by_unit(unit), - unit->port->wwpn, - unit->fcp_lun); - ZFCP_LOG_NORMAL("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_adapter_reopen(unit->port->adapter, 0, 114, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_SERVICE_CLASS_NOT_SUPPORTED: - ZFCP_LOG_INFO("error: adapter %s does not support fc " - "class %d.\n", - zfcp_get_busid_by_unit(unit), - ZFCP_FC_SERVICE_CLASS_DEFAULT); - /* stop operation for this adapter */ - zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 132, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_FCPLUN_NOT_VALID: - ZFCP_LOG_NORMAL("bug: unit 0x%016Lx on port 0x%016Lx on " - "adapter %s does not have correct unit " - "handle 0x%x\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - unit->handle); - ZFCP_LOG_DEBUG("status qualifier:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &header->fsf_status_qual, - sizeof (union fsf_status_qual)); - zfcp_erp_port_reopen(unit->port, 0, 115, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_ACCESS_DENIED: - ZFCP_LOG_NORMAL("Access denied, cannot send FCP command to " - "unit 0x%016Lx on port 0x%016Lx on " - "adapter %s\n", unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - for (counter = 0; counter < 2; counter++) { - subtable = header->fsf_status_qual.halfword[counter * 2]; - rule = header->fsf_status_qual.halfword[counter * 2 + 1]; - switch (subtable) { - case FSF_SQ_CFDC_SUBTABLE_OS: - case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN: - case FSF_SQ_CFDC_SUBTABLE_PORT_DID: - case FSF_SQ_CFDC_SUBTABLE_LUN: - ZFCP_LOG_INFO("Access denied (%s rule %d)\n", - zfcp_act_subtable_type[subtable], rule); - break; - } - } - zfcp_erp_unit_access_denied(unit, 61, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_DIRECTION_INDICATOR_NOT_VALID: - ZFCP_LOG_INFO("bug: Invalid data direction given for unit " - "0x%016Lx on port 0x%016Lx on adapter %s " - "(debug info %d)\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - fsf_req->qtcb->bottom.io.data_direction); - /* stop operation for this adapter */ - zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; - - case FSF_CMND_LENGTH_NOT_VALID: - ZFCP_LOG_NORMAL - ("bug: An invalid control-data-block length field " - "was found in a command for unit 0x%016Lx on port " - "0x%016Lx on adapter %s " "(debug info %d)\n", - unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - fsf_req->qtcb->bottom.io.fcp_cmnd_length); - /* stop operation for this adapter */ - zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - break; + struct fsf_qual_latency_info *lat_inf; + struct latency_cont *lat; + struct zfcp_unit *unit = req->unit; + unsigned long flags; - case FSF_PORT_BOXED: - ZFCP_LOG_DEBUG("The remote port 0x%016Lx on adapter %s " - "needs to be reopened\n", - unit->port->wwpn, zfcp_get_busid_by_unit(unit)); - zfcp_erp_port_boxed(unit->port, 53, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | - ZFCP_STATUS_FSFREQ_RETRY; - break; + lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info; - case FSF_LUN_BOXED: - ZFCP_LOG_NORMAL("unit needs to be reopened (adapter %s, " - "wwpn=0x%016Lx, fcp_lun=0x%016Lx)\n", - zfcp_get_busid_by_unit(unit), - unit->port->wwpn, unit->fcp_lun); - zfcp_erp_unit_boxed(unit, 54, fsf_req); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR - | ZFCP_STATUS_FSFREQ_RETRY; - break; - - case FSF_ADAPTER_STATUS_AVAILABLE: - switch (header->fsf_status_qual.word[0]) { - case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - /* re-establish link to port */ - zfcp_test_link(unit->port); - break; - case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: - /* FIXME(hw) need proper specs for proper action */ - /* let scsi stack deal with retries and escalation */ - break; - default: - ZFCP_LOG_NORMAL - ("Unknown status qualifier 0x%x arrived.\n", - header->fsf_status_qual.word[0]); - break; - } - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; + switch (req->qtcb->bottom.io.data_direction) { + case FSF_DATADIR_READ: + lat = &unit->latencies.read; break; - - case FSF_GOOD: + case FSF_DATADIR_WRITE: + lat = &unit->latencies.write; break; - - case FSF_FCP_RSP_AVAILABLE: + case FSF_DATADIR_CMND: + lat = &unit->latencies.cmd; break; + default: + return; } - skip_fsfstatus: - if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) { - retval = - zfcp_fsf_send_fcp_command_task_management_handler(fsf_req); - } else { - retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req); - fsf_req->unit = NULL; - zfcp_unit_put(unit); - } - return retval; + spin_lock_irqsave(&unit->latencies.lock, flags); + zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat); + zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat); + lat->counter++; + spin_unlock_irqrestore(&unit->latencies.lock, flags); } -/* - * function: zfcp_fsf_send_fcp_command_task_handler - * - * purpose: evaluates FCP_RSP IU - * - * returns: - */ -static int -zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) { - int retval = 0; - struct scsi_cmnd *scpnt; + struct scsi_cmnd *scpnt = req->data; struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) - &(fsf_req->qtcb->bottom.io.fcp_rsp); - struct fcp_cmnd_iu *fcp_cmnd_iu = (struct fcp_cmnd_iu *) - &(fsf_req->qtcb->bottom.io.fcp_cmnd); + &(req->qtcb->bottom.io.fcp_rsp); u32 sns_len; - char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); + char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; unsigned long flags; - struct zfcp_unit *unit = fsf_req->unit; - - read_lock_irqsave(&fsf_req->adapter->abort_lock, flags); - scpnt = (struct scsi_cmnd *) fsf_req->data; - if (unlikely(!scpnt)) { - ZFCP_LOG_DEBUG - ("Command with fsf_req %p is not associated to " - "a scsi command anymore. Aborted?\n", fsf_req); - goto out; - } - if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { - /* FIXME: (design) mid-layer should handle DID_ABORT like - * DID_SOFT_ERROR by retrying the request for devices - * that allow retries. - */ - ZFCP_LOG_DEBUG("Setting DID_SOFT_ERROR and SUGGEST_RETRY\n"); - set_host_byte(&scpnt->result, DID_SOFT_ERROR); - set_driver_byte(&scpnt->result, SUGGEST_RETRY); + + if (unlikely(!scpnt)) + return; + + read_lock_irqsave(&req->adapter->abort_lock, flags); + + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { + set_host_byte(scpnt, DID_SOFT_ERROR); + set_driver_byte(scpnt, SUGGEST_RETRY); goto skip_fsfstatus; } - if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { - ZFCP_LOG_DEBUG("Setting DID_ERROR\n"); - set_host_byte(&scpnt->result, DID_ERROR); + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { + set_host_byte(scpnt, DID_ERROR); goto skip_fsfstatus; } - /* set message byte of result in SCSI command */ - scpnt->result |= COMMAND_COMPLETE << 8; + set_msg_byte(scpnt, COMMAND_COMPLETE); - /* - * copy SCSI status code of FCP_STATUS of FCP_RSP IU to status byte - * of result in SCSI command - */ scpnt->result |= fcp_rsp_iu->scsi_status; - if (unlikely(fcp_rsp_iu->scsi_status)) { - /* DEBUG */ - ZFCP_LOG_DEBUG("status for SCSI Command:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - scpnt->cmnd, scpnt->cmd_len); - ZFCP_LOG_DEBUG("SCSI status code 0x%x\n", - fcp_rsp_iu->scsi_status); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (void *) fcp_rsp_iu, sizeof (struct fcp_rsp_iu)); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), - fcp_rsp_iu->fcp_sns_len); - } - /* check FCP_RSP_INFO */ + if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) + zfcp_fsf_req_latency(req); + if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { - ZFCP_LOG_DEBUG("rsp_len is valid\n"); - switch (fcp_rsp_info[3]) { - case RSP_CODE_GOOD: - /* ok, continue */ - ZFCP_LOG_TRACE("no failure or Task Management " - "Function complete\n"); - set_host_byte(&scpnt->result, DID_OK); - break; - case RSP_CODE_LENGTH_MISMATCH: - /* hardware bug */ - ZFCP_LOG_NORMAL("bug: FCP response code indictates " - "that the fibrechannel protocol data " - "length differs from the burst length. " - "The problem occured on unit 0x%016Lx " - "on port 0x%016Lx on adapter %s", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - /* dump SCSI CDB as prepared by zfcp */ - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &fsf_req->qtcb-> - bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); - set_host_byte(&scpnt->result, DID_ERROR); - goto skip_fsfstatus; - case RSP_CODE_FIELD_INVALID: - /* driver or hardware bug */ - ZFCP_LOG_NORMAL("bug: FCP response code indictates " - "that the fibrechannel protocol data " - "fields were incorrectly set up. " - "The problem occured on the unit " - "0x%016Lx on port 0x%016Lx on " - "adapter %s", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - /* dump SCSI CDB as prepared by zfcp */ - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &fsf_req->qtcb-> - bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); - set_host_byte(&scpnt->result, DID_ERROR); - goto skip_fsfstatus; - case RSP_CODE_RO_MISMATCH: - /* hardware bug */ - ZFCP_LOG_NORMAL("bug: The FCP response code indicates " - "that conflicting values for the " - "fibrechannel payload offset from the " - "header were found. " - "The problem occured on unit 0x%016Lx " - "on port 0x%016Lx on adapter %s.\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - /* dump SCSI CDB as prepared by zfcp */ - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &fsf_req->qtcb-> - bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); - set_host_byte(&scpnt->result, DID_ERROR); - goto skip_fsfstatus; - default: - ZFCP_LOG_NORMAL("bug: An invalid FCP response " - "code was detected for a command. " - "The problem occured on the unit " - "0x%016Lx on port 0x%016Lx on " - "adapter %s (debug info 0x%x)\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - fcp_rsp_info[3]); - /* dump SCSI CDB as prepared by zfcp */ - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, - (char *) &fsf_req->qtcb-> - bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); - set_host_byte(&scpnt->result, DID_ERROR); + if (fcp_rsp_info[3] == RSP_CODE_GOOD) + set_host_byte(scpnt, DID_OK); + else { + set_host_byte(scpnt, DID_ERROR); goto skip_fsfstatus; } } - /* check for sense data */ if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) { - sns_len = FSF_FCP_RSP_SIZE - - sizeof (struct fcp_rsp_iu) + fcp_rsp_iu->fcp_rsp_len; - ZFCP_LOG_TRACE("room for %i bytes sense data in QTCB\n", - sns_len); + sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) + + fcp_rsp_iu->fcp_rsp_len; sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE); - ZFCP_LOG_TRACE("room for %i bytes sense data in SCSI command\n", - SCSI_SENSE_BUFFERSIZE); sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len); - ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n", - scpnt->result); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, - scpnt->cmnd, scpnt->cmd_len); - ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n", - fcp_rsp_iu->fcp_sns_len); memcpy(scpnt->sense_buffer, zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, - (void *)scpnt->sense_buffer, sns_len); - } - - /* check for overrun */ - if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_over)) { - ZFCP_LOG_INFO("A data overrun was detected for a command. " - "unit 0x%016Lx, port 0x%016Lx, adapter %s. " - "The response data length is " - "%d, the original length was %d.\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - fcp_rsp_iu->fcp_resid, - (int) zfcp_get_fcp_dl(fcp_cmnd_iu)); } - /* check for underrun */ if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) { - ZFCP_LOG_INFO("A data underrun was detected for a command. " - "unit 0x%016Lx, port 0x%016Lx, adapter %s. " - "The response data length is " - "%d, the original length was %d.\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - fcp_rsp_iu->fcp_resid, - (int) zfcp_get_fcp_dl(fcp_cmnd_iu)); - scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid); if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) < scpnt->underflow) - set_host_byte(&scpnt->result, DID_ERROR); + set_host_byte(scpnt, DID_ERROR); } - - skip_fsfstatus: - ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); - +skip_fsfstatus: if (scpnt->result != 0) - zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req); + zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req); else if (scpnt->retries > 0) - zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req); + zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req); else - zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req); + zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req); - /* cleanup pointer (need this especially for abort) */ scpnt->host_scribble = NULL; - - /* always call back */ (scpnt->scsi_done) (scpnt); - /* * We must hold this lock until scsi_done has been called. * Otherwise we may call scsi_done after abort regarding this * command has completed. * Note: scsi_done must not block! */ - out: - read_unlock_irqrestore(&fsf_req->adapter->abort_lock, flags); - return retval; + read_unlock_irqrestore(&req->adapter->abort_lock, flags); } -/* - * function: zfcp_fsf_send_fcp_command_task_management_handler - * - * purpose: evaluates FCP_RSP IU - * - * returns: - */ -static int -zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req) +static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) { - int retval = 0; struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) - &(fsf_req->qtcb->bottom.io.fcp_rsp); - char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); - struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data; - - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; - goto skip_fsfstatus; - } + &(req->qtcb->bottom.io.fcp_rsp); + char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; - /* check FCP_RSP_INFO */ - switch (fcp_rsp_info[3]) { - case RSP_CODE_GOOD: - /* ok, continue */ - ZFCP_LOG_DEBUG("no failure or Task Management " - "Function complete\n"); - break; - case RSP_CODE_TASKMAN_UNSUPP: - ZFCP_LOG_NORMAL("bug: A reuested task management function " - "is not supported on the target device " - "unit 0x%016Lx, port 0x%016Lx, adapter %s\n ", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP; - break; - case RSP_CODE_TASKMAN_FAILED: - ZFCP_LOG_NORMAL("bug: A reuested task management function " - "failed to complete successfully. " - "unit 0x%016Lx, port 0x%016Lx, adapter %s.\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; - break; - default: - ZFCP_LOG_NORMAL("bug: An invalid FCP response " - "code was detected for a command. " - "unit 0x%016Lx, port 0x%016Lx, adapter %s " - "(debug info 0x%x)\n", - unit->fcp_lun, - unit->port->wwpn, - zfcp_get_busid_by_unit(unit), - fcp_rsp_info[3]); - fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; - } - - skip_fsfstatus: - return retval; + if ((fcp_rsp_info[3] != RSP_CODE_GOOD) || + (req->status & ZFCP_STATUS_FSFREQ_ERROR)) + req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; } -/* - * function: zfcp_fsf_control_file - * - * purpose: Initiator of the control file upload/download FSF requests - * - * returns: 0 - FSF request is successfuly created and queued - * -EOPNOTSUPP - The FCP adapter does not have Control File support - * -EINVAL - Invalid direction specified - * -ENOMEM - Insufficient memory - * -EPERM - Cannot create FSF request or place it in QDIO queue - */ -int -zfcp_fsf_control_file(struct zfcp_adapter *adapter, - struct zfcp_fsf_req **fsf_req_ptr, - u32 fsf_command, - u32 option, - struct zfcp_sg_list *sg_list) +static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) { - struct zfcp_fsf_req *fsf_req; - struct fsf_qtcb_bottom_support *bottom; - volatile struct qdio_buffer_element *sbale; - unsigned long lock_flags; - int req_flags = 0; - int direction; - int retval = 0; - - if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) { - ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n", - zfcp_get_busid_by_adapter(adapter)); - retval = -EOPNOTSUPP; - goto out; - } - - switch (fsf_command) { - - case FSF_QTCB_DOWNLOAD_CONTROL_FILE: - direction = SBAL_FLAGS0_TYPE_WRITE; - if ((option != FSF_CFDC_OPTION_FULL_ACCESS) && - (option != FSF_CFDC_OPTION_RESTRICTED_ACCESS)) - req_flags = ZFCP_WAIT_FOR_SBAL; - break; - - case FSF_QTCB_UPLOAD_CONTROL_FILE: - direction = SBAL_FLAGS0_TYPE_READ; - break; - - default: - ZFCP_LOG_INFO("Invalid FSF command code 0x%08x\n", fsf_command); - retval = -EINVAL; - goto out; - } - - retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags, - NULL, &lock_flags, &fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("error: Could not create FSF request for the " - "adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - retval = -EPERM; - goto unlock_queue_lock; - } - - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); - sbale[0].flags |= direction; - - bottom = &fsf_req->qtcb->bottom.support; - bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; - bottom->option = option; - - if (sg_list->count > 0) { - int bytes; - - bytes = zfcp_qdio_sbals_from_sg(fsf_req, direction, - sg_list->sg, sg_list->count, - ZFCP_MAX_SBALS_PER_REQ); - if (bytes != ZFCP_CFDC_MAX_CONTROL_FILE_SIZE) { - ZFCP_LOG_INFO( - "error: Could not create sufficient number of " - "SBALS for an FSF request to the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - retval = -ENOMEM; - goto free_fsf_req; - } - } else - sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - - zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT); - retval = zfcp_fsf_req_send(fsf_req); - if (retval < 0) { - ZFCP_LOG_INFO("initiation of cfdc up/download failed" - "(adapter %s)\n", - zfcp_get_busid_by_adapter(adapter)); - retval = -EPERM; - goto free_fsf_req; - } - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); - - ZFCP_LOG_NORMAL("Control file %s FSF request has been sent to the " - "adapter %s\n", - fsf_command == FSF_QTCB_DOWNLOAD_CONTROL_FILE ? - "download" : "upload", - zfcp_get_busid_by_adapter(adapter)); - - wait_event(fsf_req->completion_wq, - fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); - - *fsf_req_ptr = fsf_req; - goto out; - - free_fsf_req: - zfcp_fsf_req_free(fsf_req); - unlock_queue_lock: - write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); - out: - return retval; -} - + struct zfcp_unit *unit; + struct fsf_qtcb_header *header = &req->qtcb->header; -/* - * function: zfcp_fsf_control_file_handler - * - * purpose: Handler of the control file upload/download FSF requests - * - * returns: 0 - FSF request successfuly processed - * -EAGAIN - Operation has to be repeated because of a temporary problem - * -EACCES - There is no permission to execute an operation - * -EPERM - The control file is not in a right format - * -EIO - There is a problem with the FCP adapter - * -EINVAL - Invalid operation - * -EFAULT - User space memory I/O operation fault - */ -static int -zfcp_fsf_control_file_handler(struct zfcp_fsf_req *fsf_req) -{ - struct zfcp_adapter *adapter = fsf_req->adapter; - struct fsf_qtcb_header *header = &fsf_req->qtcb->header; - struct fsf_qtcb_bottom_support *bottom = &fsf_req->qtcb->bottom.support; - int retval = 0; + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) + unit = req->data; + else + unit = req->unit; - if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { - retval = -EINVAL; + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) goto skip_fsfstatus; - } switch (header->fsf_status) { - - case FSF_GOOD: - ZFCP_LOG_NORMAL( - "The FSF request has been successfully completed " - "on the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - break; - - case FSF_OPERATION_PARTIALLY_SUCCESSFUL: - if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) { - switch (header->fsf_status_qual.word[0]) { - - case FSF_SQ_CFDC_HARDENED_ON_SE: - ZFCP_LOG_NORMAL( - "CFDC on the adapter %s has being " - "hardened on primary and secondary SE\n", - zfcp_get_busid_by_adapter(adapter)); - break; - - case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE: - ZFCP_LOG_NORMAL( - "CFDC of the adapter %s could not " - "be saved on the SE\n", - zfcp_get_busid_by_adapter(adapter)); - break; - - case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2: - ZFCP_LOG_NORMAL( - "CFDC of the adapter %s could not " - "be copied to the secondary SE\n", - zfcp_get_busid_by_adapter(adapter)); - break; - - default: - ZFCP_LOG_NORMAL( - "CFDC could not be hardened " - "on the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - } - } - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EAGAIN; - break; - - case FSF_AUTHORIZATION_FAILURE: - ZFCP_LOG_NORMAL( - "Adapter %s does not accept privileged commands\n", - zfcp_get_busid_by_adapter(adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EACCES; + case FSF_HANDLE_MISMATCH: + case FSF_PORT_HANDLE_NOT_VALID: + zfcp_erp_adapter_reopen(unit->port->adapter, 0, 112, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - - case FSF_CFDC_ERROR_DETECTED: - ZFCP_LOG_NORMAL( - "Error at position %d in the CFDC, " - "CFDC is discarded by the adapter %s\n", - header->fsf_status_qual.word[0], - zfcp_get_busid_by_adapter(adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EPERM; + case FSF_FCPLUN_NOT_VALID: + case FSF_LUN_HANDLE_NOT_VALID: + zfcp_erp_port_reopen(unit->port, 0, 113, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - - case FSF_CONTROL_FILE_UPDATE_ERROR: - ZFCP_LOG_NORMAL( - "Adapter %s cannot harden the control file, " - "file is discarded\n", - zfcp_get_busid_by_adapter(adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EIO; + case FSF_SERVICE_CLASS_NOT_SUPPORTED: + zfcp_fsf_class_not_supp(req); break; - - case FSF_CONTROL_FILE_TOO_LARGE: - ZFCP_LOG_NORMAL( - "Control file is too large, file is discarded " - "by the adapter %s\n", - zfcp_get_busid_by_adapter(adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EIO; + case FSF_ACCESS_DENIED: + zfcp_fsf_access_denied_unit(req, unit); break; - - case FSF_ACCESS_CONFLICT_DETECTED: - if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) - ZFCP_LOG_NORMAL( - "CFDC has been discarded by the adapter %s, " - "because activation would impact " - "%d active connection(s)\n", - zfcp_get_busid_by_adapter(adapter), - header->fsf_status_qual.word[0]); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EIO; + case FSF_DIRECTION_INDICATOR_NOT_VALID: + dev_err(&req->adapter->ccw_device->dev, + "Invalid data direction (%d) given for unit " + "0x%016Lx on port 0x%016Lx, shutting down " + "adapter.\n", + req->qtcb->bottom.io.data_direction, + unit->fcp_lun, unit->port->wwpn); + zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - - case FSF_CONFLICTS_OVERRULED: - if (bottom->operation_subtype == FSF_CFDC_OPERATION_SUBTYPE) - ZFCP_LOG_NORMAL( - "CFDC has been activated on the adapter %s, " - "but activation has impacted " - "%d active connection(s)\n", - zfcp_get_busid_by_adapter(adapter), - header->fsf_status_qual.word[0]); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EIO; + case FSF_CMND_LENGTH_NOT_VALID: + dev_err(&req->adapter->ccw_device->dev, + "An invalid control-data-block length field (%d) " + "was found in a command for unit 0x%016Lx on port " + "0x%016Lx. Shutting down adapter.\n", + req->qtcb->bottom.io.fcp_cmnd_length, + unit->fcp_lun, unit->port->wwpn); + zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; - - case FSF_UNKNOWN_OP_SUBTYPE: - ZFCP_LOG_NORMAL("unknown operation subtype (adapter: %s, " - "op_subtype=0x%x)\n", - zfcp_get_busid_by_adapter(adapter), - bottom->operation_subtype); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EINVAL; + case FSF_PORT_BOXED: + zfcp_erp_port_boxed(unit->port, 53, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; break; - - case FSF_INVALID_COMMAND_OPTION: - ZFCP_LOG_NORMAL( - "Invalid option 0x%x has been specified " - "in QTCB bottom sent to the adapter %s\n", - bottom->option, - zfcp_get_busid_by_adapter(adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EINVAL; + case FSF_LUN_BOXED: + zfcp_erp_unit_boxed(unit, 54, req); + req->status |= ZFCP_STATUS_FSFREQ_ERROR | + ZFCP_STATUS_FSFREQ_RETRY; break; - - default: - ZFCP_LOG_NORMAL( - "bug: An unknown/unexpected FSF status 0x%08x " - "was presented on the adapter %s\n", - header->fsf_status, - zfcp_get_busid_by_adapter(adapter)); - fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; - retval = -EINVAL; + case FSF_ADAPTER_STATUS_AVAILABLE: + if (header->fsf_status_qual.word[0] == + FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) + zfcp_test_link(unit->port); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } - skip_fsfstatus: - return retval; -} - -static inline int -zfcp_fsf_req_sbal_check(unsigned long *flags, - struct zfcp_qdio_queue *queue, int needed) -{ - write_lock_irqsave(&queue->queue_lock, *flags); - if (likely(atomic_read(&queue->free_count) >= needed)) - return 1; - write_unlock_irqrestore(&queue->queue_lock, *flags); - return 0; -} - -/* - * set qtcb pointer in fsf_req and initialize QTCB - */ -static void -zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) -{ - if (likely(fsf_req->qtcb != NULL)) { - fsf_req->qtcb->prefix.req_seq_no = - fsf_req->adapter->fsf_req_seq_no; - fsf_req->qtcb->prefix.req_id = fsf_req->req_id; - fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; - fsf_req->qtcb->prefix.qtcb_type = - fsf_qtcb_type[fsf_req->fsf_command]; - fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; - fsf_req->qtcb->header.req_handle = fsf_req->req_id; - fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; + if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) + zfcp_fsf_send_fcp_ctm_handler(req); + else { + zfcp_fsf_send_fcp_command_task_handler(req); + req->unit = NULL; + zfcp_unit_put(unit); } } /** - * zfcp_fsf_req_sbal_get - try to get one SBAL in the request queue - * @adapter: adapter for which request queue is examined - * @req_flags: flags indicating whether to wait for needed SBAL or not - * @lock_flags: lock_flags if queue_lock is taken - * Return: 0 on success, otherwise -EIO, or -ERESTARTSYS - * Locks: lock adapter->request_queue->queue_lock on success - */ -static int -zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter, int req_flags, - unsigned long *lock_flags) -{ - long ret; - struct zfcp_qdio_queue *req_queue = &adapter->request_queue; - - if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) { - ret = wait_event_interruptible_timeout(adapter->request_wq, - zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1), - ZFCP_SBAL_TIMEOUT); - if (ret < 0) - return ret; - if (!ret) - return -EIO; - } else if (!zfcp_fsf_req_sbal_check(lock_flags, req_queue, 1)) - return -EIO; - - return 0; -} - -/* - * function: zfcp_fsf_req_create - * - * purpose: create an FSF request at the specified adapter and - * setup common fields - * - * returns: -ENOMEM if there was insufficient memory for a request - * -EIO if no qdio buffers could be allocate to the request - * -EINVAL/-EPERM on bug conditions in req_dequeue - * 0 in success - * - * note: The created request is returned by reference. - * - * locks: lock of concerned request queue must not be held, - * but is held on completion (write, irqsave) + * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) + * @adapter: adapter where scsi command is issued + * @unit: unit where command is sent to + * @scsi_cmnd: scsi command to be sent + * @timer: timer to be started when request is initiated + * @req_flags: flags for fsf_request */ -int -zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, - mempool_t *pool, unsigned long *lock_flags, - struct zfcp_fsf_req **fsf_req_p) +int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, + struct zfcp_unit *unit, + struct scsi_cmnd *scsi_cmnd, + int use_timer, int req_flags) { - volatile struct qdio_buffer_element *sbale; - struct zfcp_fsf_req *fsf_req = NULL; - int ret = 0; - struct zfcp_qdio_queue *req_queue = &adapter->request_queue; - - /* allocate new FSF request */ - fsf_req = zfcp_fsf_req_alloc(pool, req_flags); - if (unlikely(NULL == fsf_req)) { - ZFCP_LOG_DEBUG("error: Could not put an FSF request into " - "the outbound (send) queue.\n"); - ret = -ENOMEM; - goto failed_fsf_req; - } - - fsf_req->adapter = adapter; - fsf_req->fsf_command = fsf_cmd; - INIT_LIST_HEAD(&fsf_req->list); - init_timer(&fsf_req->timer); + struct zfcp_fsf_req *req; + struct fcp_cmnd_iu *fcp_cmnd_iu; + unsigned int sbtype; + int real_bytes, retval = -EIO; - /* initialize waitqueue which may be used to wait on - this request completion */ - init_waitqueue_head(&fsf_req->completion_wq); + if (unlikely(!(atomic_read(&unit->status) & + ZFCP_STATUS_COMMON_UNBLOCKED))) + return -EBUSY; - ret = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags); - if (ret < 0) - goto failed_sbals; + spin_lock(&adapter->req_q.lock); + if (!atomic_read(&adapter->req_q.count)) + goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, + adapter->pool.fsf_req_scsi); + if (unlikely(IS_ERR(req))) { + retval = PTR_ERR(req); + goto out; + } - /* this is serialized (we are holding req_queue-lock of adapter) */ - if (adapter->req_no == 0) - adapter->req_no++; - fsf_req->req_id = adapter->req_no++; + zfcp_unit_get(unit); + req->unit = unit; + req->data = scsi_cmnd; + req->handler = zfcp_fsf_send_fcp_command_handler; + req->qtcb->header.lun_handle = unit->handle; + req->qtcb->header.port_handle = unit->port->handle; + req->qtcb->bottom.io.service_class = FSF_CLASS_3; - zfcp_fsf_req_qtcb_init(fsf_req); + scsi_cmnd->host_scribble = (unsigned char *) req->req_id; + fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd); + fcp_cmnd_iu->fcp_lun = unit->fcp_lun; /* - * We hold queue_lock here. Check if QDIOUP is set and let request fail - * if it is not set (see also *_open_qdio and *_close_qdio). + * set depending on data direction: + * data direction bits in SBALE (SB Type) + * data direction bits in QTCB + * data direction bits in FCP_CMND IU */ - - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) { - write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags); - ret = -EIO; - goto failed_sbals; + switch (scsi_cmnd->sc_data_direction) { + case DMA_NONE: + req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; + sbtype = SBAL_FLAGS0_TYPE_READ; + break; + case DMA_FROM_DEVICE: + req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; + sbtype = SBAL_FLAGS0_TYPE_READ; + fcp_cmnd_iu->rddata = 1; + break; + case DMA_TO_DEVICE: + req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; + sbtype = SBAL_FLAGS0_TYPE_WRITE; + fcp_cmnd_iu->wddata = 1; + break; + case DMA_BIDIRECTIONAL: + default: + retval = -EIO; + goto failed_scsi_cmnd; } - if (fsf_req->qtcb) { - fsf_req->seq_no = adapter->fsf_req_seq_no; - fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; - } - fsf_req->sbal_number = 1; - fsf_req->sbal_first = req_queue->free_index; - fsf_req->sbal_curr = req_queue->free_index; - fsf_req->sbale_curr = 1; + if (likely((scsi_cmnd->device->simple_tags) || + ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) && + (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED)))) + fcp_cmnd_iu->task_attribute = SIMPLE_Q; + else + fcp_cmnd_iu->task_attribute = UNTAGGED; - if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) { - fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - } + if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH)) + fcp_cmnd_iu->add_fcp_cdb_length = + (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2; - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); - /* setup common SBALE fields */ - sbale[0].addr = (void *) fsf_req->req_id; - sbale[0].flags |= SBAL_FLAGS0_COMMAND; - if (likely(fsf_req->qtcb != NULL)) { - sbale[1].addr = (void *) fsf_req->qtcb; - sbale[1].length = sizeof(struct fsf_qtcb); + req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + + fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t); + + real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, + scsi_sglist(scsi_cmnd), + FSF_MAX_SBALS_PER_REQ); + if (unlikely(real_bytes < 0)) { + if (req->sbal_number < FSF_MAX_SBALS_PER_REQ) + retval = -EIO; + else { + dev_err(&adapter->ccw_device->dev, + "SCSI request too large. " + "Shutting down unit 0x%016Lx on port " + "0x%016Lx.\n", unit->fcp_lun, + unit->port->wwpn); + zfcp_erp_unit_shutdown(unit, 0, 131, req); + retval = -EINVAL; + } + goto failed_scsi_cmnd; } - ZFCP_LOG_TRACE("got %i free BUFFERs starting at index %i\n", - fsf_req->sbal_number, fsf_req->sbal_first); + zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes); - goto success; + if (use_timer) + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); - failed_sbals: -/* dequeue new FSF request previously enqueued */ - zfcp_fsf_req_free(fsf_req); - fsf_req = NULL; + retval = zfcp_fsf_req_send(req); + if (unlikely(retval)) + goto failed_scsi_cmnd; - failed_fsf_req: - write_lock_irqsave(&req_queue->queue_lock, *lock_flags); - success: - *fsf_req_p = fsf_req; - return ret; + goto out; + +failed_scsi_cmnd: + zfcp_unit_put(unit); + zfcp_fsf_req_free(req); + scsi_cmnd->host_scribble = NULL; +out: + spin_unlock(&adapter->req_q.lock); + return retval; } -/* - * function: zfcp_fsf_req_send - * - * purpose: start transfer of FSF request via QDIO - * - * returns: 0 - request transfer succesfully started - * !0 - start of request transfer failed +/** + * zfcp_fsf_send_fcp_ctm - send SCSI task management command + * @adapter: pointer to struct zfcp-adapter + * @unit: pointer to struct zfcp_unit + * @tm_flags: unsigned byte for task management flags + * @req_flags: int request flags + * Returns: on success pointer to struct fsf_req, NULL otherwise */ -static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req) +struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter, + struct zfcp_unit *unit, + u8 tm_flags, int req_flags) { - struct zfcp_adapter *adapter; - struct zfcp_qdio_queue *req_queue; volatile struct qdio_buffer_element *sbale; - int inc_seq_no; - int new_distance_from_int; - int retval = 0; + struct zfcp_fsf_req *req = NULL; + struct fcp_cmnd_iu *fcp_cmnd_iu; - adapter = fsf_req->adapter; - req_queue = &adapter->request_queue, + if (unlikely(!(atomic_read(&unit->status) & + ZFCP_STATUS_COMMON_UNBLOCKED))) + return NULL; + spin_lock(&adapter->req_q.lock); + if (!atomic_read(&adapter->req_q.count)) + goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, + adapter->pool.fsf_req_scsi); + if (unlikely(IS_ERR(req))) + goto out; - /* FIXME(debug): remove it later */ - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_first, 0); - ZFCP_LOG_DEBUG("SBALE0 flags=0x%x\n", sbale[0].flags); - ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n"); - ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, - sbale[1].length); + req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; + req->data = unit; + req->handler = zfcp_fsf_send_fcp_command_handler; + req->qtcb->header.lun_handle = unit->handle; + req->qtcb->header.port_handle = unit->port->handle; + req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; + req->qtcb->bottom.io.service_class = FSF_CLASS_3; + req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + + sizeof(fcp_dl_t); + + sbale = zfcp_qdio_sbale_req(req); + sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; + sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; - /* put allocated FSF request into hash table */ - spin_lock(&adapter->req_list_lock); - zfcp_reqlist_add(adapter, fsf_req); - spin_unlock(&adapter->req_list_lock); + fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd; + fcp_cmnd_iu->fcp_lun = unit->fcp_lun; + fcp_cmnd_iu->task_management_flags = tm_flags; - inc_seq_no = (fsf_req->qtcb != NULL); + zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); + if (!zfcp_fsf_req_send(req)) + goto out; - ZFCP_LOG_TRACE("request queue of adapter %s: " - "next free SBAL is %i, %i free SBALs\n", - zfcp_get_busid_by_adapter(adapter), - req_queue->free_index, - atomic_read(&req_queue->free_count)); + zfcp_fsf_req_free(req); + req = NULL; +out: + spin_unlock(&adapter->req_q.lock); + return req; +} - ZFCP_LOG_DEBUG("calling do_QDIO adapter %s, flags=0x%x, queue_no=%i, " - "index_in_queue=%i, count=%i, buffers=%p\n", - zfcp_get_busid_by_adapter(adapter), - QDIO_FLAG_SYNC_OUTPUT, - 0, fsf_req->sbal_first, fsf_req->sbal_number, - &req_queue->buffer[fsf_req->sbal_first]); +static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req) +{ + if (req->qtcb->header.fsf_status != FSF_GOOD) + req->status |= ZFCP_STATUS_FSFREQ_ERROR; +} - /* - * adjust the number of free SBALs in request queue as well as - * position of first one - */ - atomic_sub(fsf_req->sbal_number, &req_queue->free_count); - ZFCP_LOG_TRACE("free_count=%d\n", atomic_read(&req_queue->free_count)); - req_queue->free_index += fsf_req->sbal_number; /* increase */ - req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */ - new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req); +/** + * zfcp_fsf_control_file - control file upload/download + * @adapter: pointer to struct zfcp_adapter + * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc + * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise + */ +struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, + struct zfcp_fsf_cfdc *fsf_cfdc) +{ + volatile struct qdio_buffer_element *sbale; + struct zfcp_fsf_req *req = NULL; + struct fsf_qtcb_bottom_support *bottom; + int direction, retval = -EIO, bytes; + + if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) + return ERR_PTR(-EOPNOTSUPP); + + switch (fsf_cfdc->command) { + case FSF_QTCB_DOWNLOAD_CONTROL_FILE: + direction = SBAL_FLAGS0_TYPE_WRITE; + break; + case FSF_QTCB_UPLOAD_CONTROL_FILE: + direction = SBAL_FLAGS0_TYPE_READ; + break; + default: + return ERR_PTR(-EINVAL); + } - fsf_req->issued = get_clock(); + spin_lock(&adapter->req_q.lock); + if (zfcp_fsf_req_sbal_get(adapter)) + goto out; - retval = do_QDIO(adapter->ccw_device, - QDIO_FLAG_SYNC_OUTPUT, - 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); + req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL); + if (unlikely(IS_ERR(req))) { + retval = -EPERM; + goto out; + } - if (unlikely(retval)) { - /* Queues are down..... */ - retval = -EIO; - del_timer(&fsf_req->timer); - spin_lock(&adapter->req_list_lock); - zfcp_reqlist_remove(adapter, fsf_req); - spin_unlock(&adapter->req_list_lock); - /* undo changes in request queue made for this request */ - zfcp_qdio_zero_sbals(req_queue->buffer, - fsf_req->sbal_first, fsf_req->sbal_number); - atomic_add(fsf_req->sbal_number, &req_queue->free_count); - req_queue->free_index -= fsf_req->sbal_number; - req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; - req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ - zfcp_erp_adapter_reopen(adapter, 0, 116, fsf_req); - } else { - req_queue->distance_from_int = new_distance_from_int; - /* - * increase FSF sequence counter - - * this must only be done for request successfully enqueued to - * QDIO this rejected requests may be cleaned up by calling - * routines resulting in missing sequence counter values - * otherwise, - */ + req->handler = zfcp_fsf_control_file_handler; + + sbale = zfcp_qdio_sbale_req(req); + sbale[0].flags |= direction; - /* Don't increase for unsolicited status */ - if (inc_seq_no) - adapter->fsf_req_seq_no++; + bottom = &req->qtcb->bottom.support; + bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; + bottom->option = fsf_cfdc->option; - /* count FSF requests pending */ - atomic_inc(&adapter->reqs_active); + bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, + FSF_MAX_SBALS_PER_REQ); + if (bytes != ZFCP_CFDC_MAX_SIZE) { + retval = -ENOMEM; + zfcp_fsf_req_free(req); + goto out; } - return retval; -} -#undef ZFCP_LOG_AREA + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + retval = zfcp_fsf_req_send(req); +out: + spin_unlock(&adapter->req_q.lock); + + if (!retval) { + wait_event(req->completion_wq, + req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + return req; + } + return ERR_PTR(retval); +} diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 099970b27001..bf94b4da0763 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -1,27 +1,16 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Interface to the FSF support functions. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ #ifndef FSF_H #define FSF_H +#include <linux/pfn.h> + #define FSF_QTCB_CURRENT_VERSION 0x00000001 /* FSF commands */ @@ -258,6 +247,16 @@ #define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 #define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 +/* FSF interface for CFDC */ +#define ZFCP_CFDC_MAX_SIZE 127 * 1024 +#define ZFCP_CFDC_PAGES PFN_UP(ZFCP_CFDC_MAX_SIZE) + +struct zfcp_fsf_cfdc { + struct scatterlist sg[ZFCP_CFDC_PAGES]; + u32 command; + u32 option; +}; + struct fsf_queue_designator { u8 cssid; u8 chpid; @@ -288,6 +287,18 @@ struct fsf_bit_error_payload { u32 current_transmit_b2b_credit; } __attribute__ ((packed)); +struct fsf_link_down_info { + u32 error_code; + u32 res1; + u8 res2[2]; + u8 primary_status; + u8 ioerr_code; + u8 action_code; + u8 reason_code; + u8 explanation_code; + u8 vendor_specific_code; +} __attribute__ ((packed)); + struct fsf_status_read_buffer { u32 status_type; u32 status_subtype; @@ -298,7 +309,12 @@ struct fsf_status_read_buffer { u32 class; u64 fcp_lun; u8 res3[24]; - u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE]; + union { + u8 data[FSF_STATUS_READ_PAYLOAD_SIZE]; + u32 word[FSF_STATUS_READ_PAYLOAD_SIZE/sizeof(u32)]; + struct fsf_link_down_info link_down_info; + struct fsf_bit_error_payload bit_error; + } payload; } __attribute__ ((packed)); struct fsf_qual_version_error { @@ -311,23 +327,19 @@ struct fsf_qual_sequence_error { u32 res1[3]; } __attribute__ ((packed)); -struct fsf_link_down_info { - u32 error_code; - u32 res1; - u8 res2[2]; - u8 primary_status; - u8 ioerr_code; - u8 action_code; - u8 reason_code; - u8 explanation_code; - u8 vendor_specific_code; +struct fsf_qual_latency_info { + u32 channel_lat; + u32 fabric_lat; + u8 res1[8]; } __attribute__ ((packed)); union fsf_prot_status_qual { + u32 word[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u32)]; u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)]; struct fsf_qual_version_error version_error; struct fsf_qual_sequence_error sequence_error; struct fsf_link_down_info link_down_info; + struct fsf_qual_latency_info latency_info; } __attribute__ ((packed)); struct fsf_qtcb_prefix { @@ -437,7 +449,9 @@ struct fsf_qtcb_bottom_config { u32 fc_link_speed; u32 adapter_type; u32 peer_d_id; - u8 res2[12]; + u8 res1[2]; + u16 timer_interval; + u8 res2[8]; u32 s_id; struct fsf_nport_serv_param nport_serv_param; u8 reserved_nport_serv_param[16]; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 8ca5f074c687..d6dbd653fde9 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -1,241 +1,101 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Setup and helper functions to access QDIO. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ #include "zfcp_ext.h" -static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); -static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get - (struct zfcp_qdio_queue *, int, int); -static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp - (struct zfcp_fsf_req *, int, int); -static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain - (struct zfcp_fsf_req *, unsigned long); -static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next - (struct zfcp_fsf_req *, unsigned long); -static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); -static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); -static void zfcp_qdio_sbale_fill - (struct zfcp_fsf_req *, unsigned long, void *, int); -static int zfcp_qdio_sbals_from_segment - (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); - -static qdio_handler_t zfcp_qdio_request_handler; -static qdio_handler_t zfcp_qdio_response_handler; -static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, - unsigned int, unsigned int, unsigned int, int, int); - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO - -/* - * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array - * in the adapter struct sbuf is the pointer array. - * - * locks: must only be called with zfcp_data.config_sema taken - */ -static void -zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf) -{ - int pos; - - for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) - free_page((unsigned long) sbuf[pos]); -} +/* FIXME(tune): free space should be one max. SBAL chain plus what? */ +#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q \ + - (FSF_MAX_SBALS_PER_REQ + 4)) +#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) -/* - * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t - * array in the adapter struct. - * Cur_buf is the pointer array - * - * returns: zero on success else -ENOMEM - * locks: must only be called with zfcp_data.config_sema taken - */ -static int -zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf) +static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) { int pos; for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { - sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); - if (!sbuf[pos]) { - zfcp_qdio_buffers_dequeue(sbuf); + sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); + if (!sbal[pos]) return -ENOMEM; - } } for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) if (pos % QBUFF_PER_PAGE) - sbuf[pos] = sbuf[pos - 1] + 1; + sbal[pos] = sbal[pos - 1] + 1; return 0; } -/* locks: must only be called with zfcp_data.config_sema taken */ -int -zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter) +static volatile struct qdio_buffer_element * +zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) { - int ret; - - ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer); - if (ret) - return ret; - return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer); + return &q->sbal[sbal_idx]->element[sbale_idx]; } -/* locks: must only be called with zfcp_data.config_sema taken */ -void -zfcp_qdio_free_queues(struct zfcp_adapter *adapter) +/** + * zfcp_qdio_free - free memory used by request- and resposne queue + * @adapter: pointer to the zfcp_adapter structure + */ +void zfcp_qdio_free(struct zfcp_adapter *adapter) { - ZFCP_LOG_TRACE("freeing request_queue buffers\n"); - zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer); + struct qdio_buffer **sbal_req, **sbal_resp; + int p; - ZFCP_LOG_TRACE("freeing response_queue buffers\n"); - zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer); -} + if (adapter->ccw_device) + qdio_free(adapter->ccw_device); -int -zfcp_qdio_allocate(struct zfcp_adapter *adapter) -{ - struct qdio_initialize *init_data; + sbal_req = adapter->req_q.sbal; + sbal_resp = adapter->resp_q.sbal; - init_data = &adapter->qdio_init_data; + for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { + free_page((unsigned long) sbal_req[p]); + free_page((unsigned long) sbal_resp[p]); + } +} - init_data->cdev = adapter->ccw_device; - init_data->q_format = QDIO_SCSI_QFMT; - memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); - ASCEBC(init_data->adapter_name, 8); - init_data->qib_param_field_format = 0; - init_data->qib_param_field = NULL; - init_data->input_slib_elements = NULL; - init_data->output_slib_elements = NULL; - init_data->min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD; - init_data->max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD; - init_data->min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD; - init_data->max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD; - init_data->no_input_qs = 1; - init_data->no_output_qs = 1; - init_data->input_handler = zfcp_qdio_response_handler; - init_data->output_handler = zfcp_qdio_request_handler; - init_data->int_parm = (unsigned long) adapter; - init_data->flags = QDIO_INBOUND_0COPY_SBALS | - QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; - init_data->input_sbal_addr_array = - (void **) (adapter->response_queue.buffer); - init_data->output_sbal_addr_array = - (void **) (adapter->request_queue.buffer); +static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) +{ + dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); - return qdio_allocate(init_data); + zfcp_erp_adapter_reopen(adapter, + ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | + ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); } -/* - * function: zfcp_qdio_handler_error_check - * - * purpose: called by the response handler to determine error condition - * - * returns: error flag - * - */ -static int -zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, - unsigned int qdio_error, unsigned int siga_error, - int first_element, int elements_processed) +static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) { - int retval = 0; - - if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { - retval = -EIO; - - ZFCP_LOG_INFO("QDIO problem occurred (status=0x%x, " - "qdio_error=0x%x, siga_error=0x%x)\n", - status, qdio_error, siga_error); - - zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error, - first_element, elements_processed); - /* - * Restarting IO on the failed adapter from scratch. - * Since we have been using this adapter, it is save to assume - * that it is not failed but recoverable. The card seems to - * report link-up events by self-initiated queue shutdown. - * That is why we need to clear the link-down flag - * which is set again in case we have missed by a mile. - */ - zfcp_erp_adapter_reopen(adapter, - ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | - ZFCP_STATUS_COMMON_ERP_FAILED, 140, - NULL); + int i, sbal_idx; + + for (i = first; i < first + cnt; i++) { + sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; + memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); } - return retval; } -/* - * function: zfcp_qdio_request_handler - * - * purpose: is called by QDIO layer for completed SBALs in request queue - * - * returns: (void) - */ -static void -zfcp_qdio_request_handler(struct ccw_device *ccw_device, - unsigned int status, - unsigned int qdio_error, - unsigned int siga_error, - unsigned int queue_number, - int first_element, - int elements_processed, - unsigned long int_parm) +static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, + int queue_no, int first, int count, + unsigned long parm) { - struct zfcp_adapter *adapter; - struct zfcp_qdio_queue *queue; - - adapter = (struct zfcp_adapter *) int_parm; - queue = &adapter->request_queue; + struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; + struct zfcp_qdio_queue *queue = &adapter->req_q; - ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", - zfcp_get_busid_by_adapter(adapter), - first_element, elements_processed); - - if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, - siga_error, first_element, - elements_processed))) - goto out; - /* - * we stored address of struct zfcp_adapter data structure - * associated with irq in int_parm - */ + if (unlikely(qdio_err)) { + zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); + zfcp_qdio_handler_error(adapter, 140); + return; + } /* cleanup all SBALs being program-owned now */ - zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); + zfcp_qdio_zero_sbals(queue->sbal, first, count); - /* increase free space in outbound queue */ - atomic_add(elements_processed, &queue->free_count); - ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count)); + atomic_add(count, &queue->count); wake_up(&adapter->request_wq); - ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n", - elements_processed, atomic_read(&queue->free_count)); - out: - return; } -/** - * zfcp_qdio_reqid_check - checks for valid reqids. - */ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, - unsigned long req_id) + unsigned long req_id, int sbal_idx) { struct zfcp_fsf_req *fsf_req; unsigned long flags; @@ -248,203 +108,114 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, * Unknown request means that we have potentially memory * corruption and must stop the machine immediatly. */ - panic("error: unknown request id (%ld) on adapter %s.\n", + panic("error: unknown request id (%lx) on adapter %s.\n", req_id, zfcp_get_busid_by_adapter(adapter)); zfcp_reqlist_remove(adapter, fsf_req); - atomic_dec(&adapter->reqs_active); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - /* finish the FSF request */ + fsf_req->sbal_response = sbal_idx; zfcp_fsf_req_complete(fsf_req); } -/* - * function: zfcp_qdio_response_handler - * - * purpose: is called by QDIO layer for completed SBALs in response queue - * - * returns: (void) - */ -static void -zfcp_qdio_response_handler(struct ccw_device *ccw_device, - unsigned int status, - unsigned int qdio_error, - unsigned int siga_error, - unsigned int queue_number, - int first_element, - int elements_processed, - unsigned long int_parm) +static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) { - struct zfcp_adapter *adapter; - struct zfcp_qdio_queue *queue; - int buffer_index; - int i; - struct qdio_buffer *buffer; - int retval = 0; - u8 count; - u8 start; - volatile struct qdio_buffer_element *buffere = NULL; - int buffere_index; - - adapter = (struct zfcp_adapter *) int_parm; - queue = &adapter->response_queue; - - if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, - siga_error, first_element, - elements_processed))) - goto out; + struct zfcp_qdio_queue *queue = &adapter->resp_q; + struct ccw_device *cdev = adapter->ccw_device; + u8 count, start = queue->first; + unsigned int retval; - /* - * we stored address of struct zfcp_adapter data structure - * associated with irq in int_parm - */ + count = atomic_read(&queue->count) + processed; + + retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count); + + if (unlikely(retval)) { + atomic_set(&queue->count, count); + /* FIXME: Recover this with an adapter reopen? */ + } else { + queue->first += count; + queue->first %= QDIO_MAX_BUFFERS_PER_Q; + atomic_set(&queue->count, 0); + } +} + +static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, + int queue_no, int first, int count, + unsigned long parm) +{ + struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; + struct zfcp_qdio_queue *queue = &adapter->resp_q; + volatile struct qdio_buffer_element *sbale; + int sbal_idx, sbale_idx, sbal_no; + + if (unlikely(qdio_err)) { + zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); + zfcp_qdio_handler_error(adapter, 147); + return; + } - buffere = &(queue->buffer[first_element]->element[0]); - ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags); /* * go through all SBALs from input queue currently * returned by QDIO layer */ - - for (i = 0; i < elements_processed; i++) { - - buffer_index = first_element + i; - buffer_index %= QDIO_MAX_BUFFERS_PER_Q; - buffer = queue->buffer[buffer_index]; + for (sbal_no = 0; sbal_no < count; sbal_no++) { + sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; /* go through all SBALEs of SBAL */ - for (buffere_index = 0; - buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER; - buffere_index++) { - - /* look for QDIO request identifiers in SB */ - buffere = &buffer->element[buffere_index]; + for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; + sbale_idx++) { + sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); zfcp_qdio_reqid_check(adapter, - (unsigned long) buffere->addr); - - /* - * A single used SBALE per inbound SBALE has been - * implemented by QDIO so far. Hope they will - * do some optimisation. Will need to change to - * unlikely() then. - */ - if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY)) + (unsigned long) sbale->addr, + sbal_idx); + if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) break; }; - if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) { - ZFCP_LOG_NORMAL("bug: End of inbound data " - "not marked!\n"); - } + if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) + dev_warn(&adapter->ccw_device->dev, + "Protocol violation by adapter. " + "Continuing operations.\n"); } /* * put range of SBALs back to response queue * (including SBALs which have already been free before) */ - count = atomic_read(&queue->free_count) + elements_processed; - start = queue->free_index; - - ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, " - "queue_no=%i, index_in_queue=%i, count=%i, " - "buffers=0x%lx\n", - zfcp_get_busid_by_adapter(adapter), - QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, - 0, start, count, (unsigned long) &queue->buffer[start]); - - retval = do_QDIO(ccw_device, - QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, - 0, start, count, NULL); - - if (unlikely(retval)) { - atomic_set(&queue->free_count, count); - ZFCP_LOG_DEBUG("clearing of inbound data regions failed, " - "queues may be down " - "(count=%d, start=%d, retval=%d)\n", - count, start, retval); - } else { - queue->free_index += count; - queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; - atomic_set(&queue->free_count, 0); - ZFCP_LOG_TRACE("%i buffers enqueued to response " - "queue at position %i\n", count, start); - } - out: - return; + zfcp_qdio_resp_put_back(adapter, count); } /** - * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue - * @queue: queue from which SBALE should be returned - * @sbal: specifies number of SBAL in queue - * @sbale: specifes number of SBALE in SBAL - */ -static inline volatile struct qdio_buffer_element * -zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) -{ - return &queue->buffer[sbal]->element[sbale]; -} - -/** - * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for - * a struct zfcp_fsf_req + * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req + * @fsf_req: pointer to struct fsf_req + * Returns: pointer to qdio_buffer_element (SBALE) structure */ volatile struct qdio_buffer_element * -zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) +zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) { - return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, - sbal, sbale); + return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); } /** - * zfcp_qdio_sbale_resp - return pointer to SBALE of response_queue for - * a struct zfcp_fsf_req - */ -static inline volatile struct qdio_buffer_element * -zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) -{ - return zfcp_qdio_sbale_get(&fsf_req->adapter->response_queue, - sbal, sbale); -} - -/** - * zfcp_qdio_sbale_curr - return current SBALE on request_queue for - * a struct zfcp_fsf_req + * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req + * @fsf_req: pointer to struct fsf_req + * Returns: pointer to qdio_buffer_element (SBALE) structure */ volatile struct qdio_buffer_element * -zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) +zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) { - return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, - fsf_req->sbale_curr); + return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, + req->sbale_curr); } -/** - * zfcp_qdio_sbal_limit - determine maximum number of SBALs that can be used - * on the request_queue for a struct zfcp_fsf_req - * @fsf_req: the number of the last SBAL that can be used is stored herein - * @max_sbals: used to pass an upper limit for the number of SBALs - * - * Note: We can assume at least one free SBAL in the request_queue when called. - */ -static void -zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) +static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) { - int count = atomic_read(&fsf_req->adapter->request_queue.free_count); + int count = atomic_read(&fsf_req->adapter->req_q.count); count = min(count, max_sbals); - fsf_req->sbal_last = fsf_req->sbal_first; - fsf_req->sbal_last += (count - 1); - fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; + fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) + % QDIO_MAX_BUFFERS_PER_Q; } -/** - * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a - * request - * @fsf_req: zfcp_fsf_req to be processed - * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL - * - * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. - */ static volatile struct qdio_buffer_element * zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { @@ -455,16 +226,16 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) sbale->flags |= SBAL_FLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ - if (fsf_req->sbal_curr == fsf_req->sbal_last) + if (fsf_req->sbal_last == fsf_req->sbal_limit) return NULL; /* set chaining flag in first SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + sbale = zfcp_qdio_sbale_req(fsf_req); sbale->flags |= SBAL_FLAGS0_MORE_SBALS; /* calculate index of next SBAL */ - fsf_req->sbal_curr++; - fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q; + fsf_req->sbal_last++; + fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; /* keep this requests number of SBALs up-to-date */ fsf_req->sbal_number++; @@ -479,214 +250,246 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) return sbale; } -/** - * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed - */ static volatile struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) return zfcp_qdio_sbal_chain(fsf_req, sbtype); - fsf_req->sbale_curr++; - return zfcp_qdio_sbale_curr(fsf_req); } -/** - * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue - * with zero from - */ -static int -zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) -{ - struct qdio_buffer **buf = queue->buffer; - int curr = first; - int count = 0; - - for(;;) { - curr %= QDIO_MAX_BUFFERS_PER_Q; - count++; - memset(buf[curr], 0, sizeof(struct qdio_buffer)); - if (curr == last) - break; - curr++; - } - return count; -} - - -/** - * zfcp_qdio_sbals_wipe - reset all changes in SBALs for an fsf_req - */ -static inline int -zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) +static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) { - return zfcp_qdio_sbals_zero(&fsf_req->adapter->request_queue, - fsf_req->sbal_first, fsf_req->sbal_curr); + struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; + int first = fsf_req->sbal_first; + int last = fsf_req->sbal_last; + int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % + QDIO_MAX_BUFFERS_PER_Q + 1; + zfcp_qdio_zero_sbals(sbal, first, count); } - -/** - * zfcp_qdio_sbale_fill - set address and length in current SBALE - * on request_queue - */ -static void -zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, - void *addr, int length) +static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, + unsigned int sbtype, void *start_addr, + unsigned int total_length) { volatile struct qdio_buffer_element *sbale; - - sbale = zfcp_qdio_sbale_curr(fsf_req); - sbale->addr = addr; - sbale->length = length; -} - -/** - * zfcp_qdio_sbals_from_segment - map memory segment to SBALE(s) - * @fsf_req: request to be processed - * @sbtype: SBALE flags - * @start_addr: address of memory segment - * @total_length: length of memory segment - * - * Alignment and length of the segment determine how many SBALEs are needed - * for the memory segment. - */ -static int -zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, - void *start_addr, unsigned long total_length) -{ unsigned long remaining, length; void *addr; - /* split segment up heeding page boundaries */ + /* split segment up */ for (addr = start_addr, remaining = total_length; remaining > 0; addr += length, remaining -= length) { - /* get next free SBALE for new piece */ - if (NULL == zfcp_qdio_sbale_next(fsf_req, sbtype)) { - /* no SBALE left, clean up and leave */ - zfcp_qdio_sbals_wipe(fsf_req); + sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); + if (!sbale) { + zfcp_qdio_undo_sbals(fsf_req); return -EINVAL; } - /* calculate length of new piece */ + + /* new piece must not exceed next page boundary */ length = min(remaining, - (PAGE_SIZE - ((unsigned long) addr & + (PAGE_SIZE - ((unsigned long)addr & (PAGE_SIZE - 1)))); - /* fill current SBALE with calculated piece */ - zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length); + sbale->addr = addr; + sbale->length = length; } - return total_length; + return 0; } - /** * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list * @fsf_req: request to be processed * @sbtype: SBALE flags * @sg: scatter-gather list - * @sg_count: number of elements in scatter-gather list * @max_sbals: upper bound for number of SBALs to be used + * Returns: number of bytes, or error (negativ) */ -int -zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, - struct scatterlist *sgl, int sg_count, int max_sbals) +int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, + struct scatterlist *sg, int max_sbals) { - int sg_index; - struct scatterlist *sg_segment; - int retval; volatile struct qdio_buffer_element *sbale; - int bytes = 0; + int retval, bytes = 0; /* figure out last allowed SBAL */ zfcp_qdio_sbal_limit(fsf_req, max_sbals); - /* set storage-block type for current SBAL */ - sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); + /* set storage-block type for this request */ + sbale = zfcp_qdio_sbale_req(fsf_req); sbale->flags |= sbtype; - /* process all segements of scatter-gather list */ - for_each_sg(sgl, sg_segment, sg_count, sg_index) { - retval = zfcp_qdio_sbals_from_segment( - fsf_req, - sbtype, - zfcp_sg_to_address(sg_segment), - sg_segment->length); - if (retval < 0) { - bytes = retval; - goto out; - } else - bytes += retval; + for (; sg; sg = sg_next(sg)) { + retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), + sg->length); + if (retval < 0) + return retval; + bytes += sg->length; } + /* assume that no other SBALEs are to follow in the same SBAL */ sbale = zfcp_qdio_sbale_curr(fsf_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; -out: + return bytes; } - /** - * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command - * @fsf_req: request to be processed - * @sbtype: SBALE flags - * @scsi_cmnd: either scatter-gather list or buffer contained herein is used - * to fill SBALs + * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO + * @fsf_req: pointer to struct zfcp_fsf_req + * Returns: 0 on success, error otherwise */ -int -zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, - unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) +int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) { - return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, scsi_sglist(scsi_cmnd), - scsi_sg_count(scsi_cmnd), - ZFCP_MAX_SBALS_PER_REQ); + struct zfcp_adapter *adapter = fsf_req->adapter; + struct zfcp_qdio_queue *req_q = &adapter->req_q; + int first = fsf_req->sbal_first; + int count = fsf_req->sbal_number; + int retval, pci, pci_batch; + volatile struct qdio_buffer_element *sbale; + + /* acknowledgements for transferred buffers */ + pci_batch = req_q->pci_batch + count; + if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { + pci_batch %= ZFCP_QDIO_PCI_INTERVAL; + pci = first + count - (pci_batch + 1); + pci %= QDIO_MAX_BUFFERS_PER_Q; + sbale = zfcp_qdio_sbale(req_q, pci, 0); + sbale->flags |= SBAL_FLAGS0_PCI; + } + + retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, + count); + if (unlikely(retval)) { + zfcp_qdio_zero_sbals(req_q->sbal, first, count); + return retval; + } + + /* account for transferred buffers */ + atomic_sub(count, &req_q->count); + req_q->first += count; + req_q->first %= QDIO_MAX_BUFFERS_PER_Q; + req_q->pci_batch = pci_batch; + return 0; } /** - * zfcp_qdio_determine_pci - set PCI flag in first SBALE on qdio queue if needed + * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data + * @adapter: pointer to struct zfcp_adapter + * Returns: -ENOMEM on memory allocation error or return value from + * qdio_allocate */ -int -zfcp_qdio_determine_pci(struct zfcp_qdio_queue *req_queue, - struct zfcp_fsf_req *fsf_req) +int zfcp_qdio_allocate(struct zfcp_adapter *adapter) { - int new_distance_from_int; - int pci_pos; - volatile struct qdio_buffer_element *sbale; + struct qdio_initialize *init_data; - new_distance_from_int = req_queue->distance_from_int + - fsf_req->sbal_number; - - if (unlikely(new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL)) { - new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL; - pci_pos = fsf_req->sbal_first; - pci_pos += fsf_req->sbal_number; - pci_pos -= new_distance_from_int; - pci_pos -= 1; - pci_pos %= QDIO_MAX_BUFFERS_PER_Q; - sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0); - sbale->flags |= SBAL_FLAGS0_PCI; - } - return new_distance_from_int; + if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || + zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) + return -ENOMEM; + + init_data = &adapter->qdio_init_data; + + init_data->cdev = adapter->ccw_device; + init_data->q_format = QDIO_ZFCP_QFMT; + memcpy(init_data->adapter_name, zfcp_get_busid_by_adapter(adapter), 8); + ASCEBC(init_data->adapter_name, 8); + init_data->qib_param_field_format = 0; + init_data->qib_param_field = NULL; + init_data->input_slib_elements = NULL; + init_data->output_slib_elements = NULL; + init_data->no_input_qs = 1; + init_data->no_output_qs = 1; + init_data->input_handler = zfcp_qdio_int_resp; + init_data->output_handler = zfcp_qdio_int_req; + init_data->int_parm = (unsigned long) adapter; + init_data->flags = QDIO_INBOUND_0COPY_SBALS | + QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; + init_data->input_sbal_addr_array = + (void **) (adapter->resp_q.sbal); + init_data->output_sbal_addr_array = + (void **) (adapter->req_q.sbal); + + return qdio_allocate(init_data); } -/* - * function: zfcp_zero_sbals - * - * purpose: zeros specified range of SBALs - * - * returns: +/** + * zfcp_close_qdio - close qdio queues for an adapter */ -void -zfcp_qdio_zero_sbals(struct qdio_buffer *buf[], int first, int clean_count) +void zfcp_qdio_close(struct zfcp_adapter *adapter) { - int cur_pos; - int index; - - for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++) { - index = cur_pos % QDIO_MAX_BUFFERS_PER_Q; - memset(buf[index], 0, sizeof (struct qdio_buffer)); - ZFCP_LOG_TRACE("zeroing BUFFER %d at address %p\n", - index, buf[index]); + struct zfcp_qdio_queue *req_q; + int first, count; + + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) + return; + + /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ + req_q = &adapter->req_q; + spin_lock(&req_q->lock); + atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); + spin_unlock(&req_q->lock); + + qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); + + /* cleanup used outbound sbals */ + count = atomic_read(&req_q->count); + if (count < QDIO_MAX_BUFFERS_PER_Q) { + first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; + count = QDIO_MAX_BUFFERS_PER_Q - count; + zfcp_qdio_zero_sbals(req_q->sbal, first, count); } + req_q->first = 0; + atomic_set(&req_q->count, 0); + req_q->pci_batch = 0; + adapter->resp_q.first = 0; + atomic_set(&adapter->resp_q.count, 0); } -#undef ZFCP_LOG_AREA +/** + * zfcp_qdio_open - prepare and initialize response queue + * @adapter: pointer to struct zfcp_adapter + * Returns: 0 on success, otherwise -EIO + */ +int zfcp_qdio_open(struct zfcp_adapter *adapter) +{ + volatile struct qdio_buffer_element *sbale; + int cc; + + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) + return -EIO; + + if (qdio_establish(&adapter->qdio_init_data)) { + dev_err(&adapter->ccw_device->dev, + "Establish of QDIO queues failed.\n"); + return -EIO; + } + + if (qdio_activate(adapter->ccw_device)) { + dev_err(&adapter->ccw_device->dev, + "Activate of QDIO queues failed.\n"); + goto failed_qdio; + } + + for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { + sbale = &(adapter->resp_q.sbal[cc]->element[0]); + sbale->length = 0; + sbale->flags = SBAL_FLAGS_LAST_ENTRY; + sbale->addr = NULL; + } + + if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, + QDIO_MAX_BUFFERS_PER_Q)) { + dev_err(&adapter->ccw_device->dev, + "Init of QDIO response queue failed.\n"); + goto failed_qdio; + } + + /* set index of first avalable SBALS / number of available SBALS */ + adapter->req_q.first = 0; + atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); + adapter->req_q.pci_batch = 0; + + return 0; + +failed_qdio: + qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; +} diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 01687559dc06..aeae56b00b45 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -1,220 +1,65 @@ /* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. + * zfcp device driver * - * (C) Copyright IBM Corp. 2002, 2006 + * Interface to Linux SCSI midlayer. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Copyright IBM Corporation 2002, 2008 */ -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI - #include "zfcp_ext.h" #include <asm/atomic.h> -static void zfcp_scsi_slave_destroy(struct scsi_device *sdp); -static int zfcp_scsi_slave_alloc(struct scsi_device *sdp); -static int zfcp_scsi_slave_configure(struct scsi_device *sdp); -static int zfcp_scsi_queuecommand(struct scsi_cmnd *, - void (*done) (struct scsi_cmnd *)); -static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); -static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); -static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *); -static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); -static int zfcp_task_management_function(struct zfcp_unit *, u8, - struct scsi_cmnd *); - -static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, - unsigned int, unsigned int); - -static struct device_attribute *zfcp_sysfs_sdev_attrs[]; -static struct device_attribute *zfcp_a_stats_attrs[]; - -struct zfcp_data zfcp_data = { - .scsi_host_template = { - .name = ZFCP_NAME, - .module = THIS_MODULE, - .proc_name = "zfcp", - .slave_alloc = zfcp_scsi_slave_alloc, - .slave_configure = zfcp_scsi_slave_configure, - .slave_destroy = zfcp_scsi_slave_destroy, - .queuecommand = zfcp_scsi_queuecommand, - .eh_abort_handler = zfcp_scsi_eh_abort_handler, - .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, - .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, - .can_queue = 4096, - .this_id = -1, - .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, - .cmd_per_lun = 1, - .use_clustering = 1, - .sdev_attrs = zfcp_sysfs_sdev_attrs, - .max_sectors = ZFCP_MAX_SECTORS, - .shost_attrs = zfcp_a_stats_attrs, - }, - .driver_version = ZFCP_VERSION, -}; - -/* Find start of Response Information in FCP response unit*/ -char * -zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) -{ - char *fcp_rsp_info_ptr; - - fcp_rsp_info_ptr = - (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu)); - - return fcp_rsp_info_ptr; -} - /* Find start of Sense Information in FCP response unit*/ -char * -zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) +char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) { char *fcp_sns_info_ptr; - fcp_sns_info_ptr = - (unsigned char *) fcp_rsp_iu + (sizeof (struct fcp_rsp_iu)); + fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1]; if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) - fcp_sns_info_ptr = (char *) fcp_sns_info_ptr + - fcp_rsp_iu->fcp_rsp_len; + fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len; return fcp_sns_info_ptr; } -static fcp_dl_t * -zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) +void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl) { - int additional_length = fcp_cmd->add_fcp_cdb_length << 2; - fcp_dl_t *fcp_dl_addr; + fcp_dl_t *fcp_dl_ptr; - fcp_dl_addr = (fcp_dl_t *) - ((unsigned char *) fcp_cmd + - sizeof (struct fcp_cmnd_iu) + additional_length); /* * fcp_dl_addr = start address of fcp_cmnd structure + * size of fixed part + size of dynamically sized add_dcp_cdb field * SEE FCP-2 documentation */ - return fcp_dl_addr; + fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] + + (fcp_cmd->add_fcp_cdb_length << 2)); + *fcp_dl_ptr = fcp_dl; } -fcp_dl_t -zfcp_get_fcp_dl(struct fcp_cmnd_iu * fcp_cmd) -{ - return *zfcp_get_fcp_dl_ptr(fcp_cmd); -} - -void -zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl) -{ - *zfcp_get_fcp_dl_ptr(fcp_cmd) = fcp_dl; -} - -/* - * note: it's a bit-or operation not an assignment - * regarding the specified byte - */ -static inline void -set_byte(int *result, char status, char pos) -{ - *result |= status << (pos * 8); -} - -void -set_host_byte(int *result, char status) -{ - set_byte(result, status, 2); -} - -void -set_driver_byte(int *result, char status) -{ - set_byte(result, status, 3); -} - -static int -zfcp_scsi_slave_alloc(struct scsi_device *sdp) -{ - struct zfcp_adapter *adapter; - struct zfcp_unit *unit; - unsigned long flags; - int retval = -ENXIO; - - adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; - if (!adapter) - goto out; - - read_lock_irqsave(&zfcp_data.config_lock, flags); - unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); - if (unit && atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED, - &unit->status)) { - sdp->hostdata = unit; - unit->device = sdp; - zfcp_unit_get(unit); - retval = 0; - } - read_unlock_irqrestore(&zfcp_data.config_lock, flags); - out: - return retval; -} - -/** - * zfcp_scsi_slave_destroy - called when scsi device is removed - * - * Remove reference to associated scsi device for an zfcp_unit. - * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs - * or a scan for this device might have failed. - */ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; - + WARN_ON(!unit); if (unit) { atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); sdpnt->hostdata = NULL; unit->device = NULL; zfcp_erp_unit_failed(unit, 12, NULL); zfcp_unit_put(unit); - } else - ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " - "address %p\n", sdpnt); + } } -/* - * called from scsi midlayer to allow finetuning of a device. - */ -static int -zfcp_scsi_slave_configure(struct scsi_device *sdp) +static int zfcp_scsi_slave_configure(struct scsi_device *sdp) { if (sdp->tagged_supported) - scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, ZFCP_CMND_PER_LUN); + scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32); else scsi_adjust_queue_depth(sdp, 0, 1); return 0; } -/** - * zfcp_scsi_command_fail - set result in scsi_cmnd and call scsi_done function - * @scpnt: pointer to struct scsi_cmnd where result is set - * @result: result to be set in scpnt (e.g. DID_ERROR) - */ -static void -zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) +static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) { - set_host_byte(&scpnt->result, result); + set_host_byte(scpnt, result); if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) zfcp_scsi_dbf_event_result("fail", 4, (struct zfcp_adapter*) scpnt->device->host->hostdata[0], @@ -223,114 +68,13 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) scpnt->scsi_done(scpnt); } -/** - * zfcp_scsi_command_async - worker for zfcp_scsi_queuecommand and - * zfcp_scsi_command_sync - * @adapter: adapter where scsi command is issued - * @unit: unit to which scsi command is sent - * @scpnt: scsi command to be sent - * @timer: timer to be started if request is successfully initiated - * - * Note: In scsi_done function must be set in scpnt. - */ -int -zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit, - struct scsi_cmnd *scpnt, int use_timer) -{ - int tmp; - int retval; - - retval = 0; - - BUG_ON((adapter == NULL) || (adapter != unit->port->adapter)); - BUG_ON(scpnt->scsi_done == NULL); - - if (unlikely(NULL == unit)) { - zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); - goto out; - } - - if (unlikely( - atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status) || - !atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status))) { - ZFCP_LOG_DEBUG("stopping SCSI I/O on unit 0x%016Lx on port " - "0x%016Lx on adapter %s\n", - unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_adapter(adapter)); - zfcp_scsi_command_fail(scpnt, DID_ERROR); - goto out; - } - - tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer, - ZFCP_REQ_AUTO_CLEANUP); - if (unlikely(tmp == -EBUSY)) { - ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx " - "on port 0x%016Lx in recovery\n", - zfcp_get_busid_by_unit(unit), - unit->fcp_lun, unit->port->wwpn); - zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); - goto out; - } - - if (unlikely(tmp < 0)) { - ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n"); - retval = SCSI_MLQUEUE_HOST_BUSY; - } - -out: - return retval; -} - -static void -zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) -{ - struct completion *wait = (struct completion *) scpnt->SCp.ptr; - complete(wait); -} - - -/** - * zfcp_scsi_command_sync - send a SCSI command and wait for completion - * @unit: unit where command is sent to - * @scpnt: scsi command to be sent - * @use_timer: indicates whether timer should be setup or not - * Return: 0 - * - * Errors are indicated in scpnt->result - */ -int -zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, - int use_timer) -{ - int ret; - DECLARE_COMPLETION_ONSTACK(wait); - - scpnt->SCp.ptr = (void *) &wait; /* silent re-use */ - scpnt->scsi_done = zfcp_scsi_command_sync_handler; - ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt, - use_timer); - if (ret == 0) - wait_for_completion(&wait); - - scpnt->SCp.ptr = NULL; - - return 0; -} - -/* - * function: zfcp_scsi_queuecommand - * - * purpose: enqueues a SCSI command to the specified target device - * - * returns: 0 - success, SCSI command enqueued - * !0 - failure - */ -static int -zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, - void (*done) (struct scsi_cmnd *)) +static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, + void (*done) (struct scsi_cmnd *)) { struct zfcp_unit *unit; struct zfcp_adapter *adapter; + int status; + int ret; /* reset the status for this request */ scpnt->result = 0; @@ -342,44 +86,76 @@ zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, * (stored there by zfcp_scsi_slave_alloc) */ adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; - unit = (struct zfcp_unit *) scpnt->device->hostdata; + unit = scpnt->device->hostdata; + + BUG_ON(!adapter || (adapter != unit->port->adapter)); + BUG_ON(!scpnt->scsi_done); - return zfcp_scsi_command_async(adapter, unit, scpnt, 0); + if (unlikely(!unit)) { + zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); + return 0; + } + + status = atomic_read(&unit->status); + if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) || + !(status & ZFCP_STATUS_COMMON_RUNNING))) { + zfcp_scsi_command_fail(scpnt, DID_ERROR); + return 0;; + } + + ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0, + ZFCP_REQ_AUTO_CLEANUP); + if (unlikely(ret == -EBUSY)) + zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); + else if (unlikely(ret < 0)) + return SCSI_MLQUEUE_HOST_BUSY; + + return ret; } -static struct zfcp_unit * -zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id, - unsigned int lun) +static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, + int channel, unsigned int id, + unsigned int lun) { struct zfcp_port *port; - struct zfcp_unit *unit, *retval = NULL; + struct zfcp_unit *unit; list_for_each_entry(port, &adapter->port_list_head, list) { if (!port->rport || (id != port->rport->scsi_target_id)) continue; list_for_each_entry(unit, &port->unit_list_head, list) - if (lun == unit->scsi_lun) { - retval = unit; - goto out; - } + if (lun == unit->scsi_lun) + return unit; } - out: + + return NULL; +} + +static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) +{ + struct zfcp_adapter *adapter; + struct zfcp_unit *unit; + unsigned long flags; + int retval = -ENXIO; + + adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; + if (!adapter) + goto out; + + read_lock_irqsave(&zfcp_data.config_lock, flags); + unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); + if (unit && + (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_REGISTERED)) { + sdp->hostdata = unit; + unit->device = sdp; + zfcp_unit_get(unit); + retval = 0; + } + read_unlock_irqrestore(&zfcp_data.config_lock, flags); +out: return retval; } -/** - * zfcp_scsi_eh_abort_handler - abort the specified SCSI command - * @scpnt: pointer to scsi_cmnd to be aborted - * Return: SUCCESS - command has been aborted and cleaned up in internal - * bookkeeping, SCSI stack won't be called for aborted command - * FAILED - otherwise - * - * We do not need to care for a SCSI command which completes normally - * but late during this abort routine runs. We are allowed to return - * late commands to the SCSI stack. It tracks the state of commands and - * will handle late commands. (Usually, the normal completion of late - * commands is ignored with respect to the running abort operation.) - */ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) { struct Scsi_Host *scsi_host; @@ -387,44 +163,37 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) struct zfcp_unit *unit; struct zfcp_fsf_req *fsf_req; unsigned long flags; - unsigned long old_req_id; + unsigned long old_req_id = (unsigned long) scpnt->host_scribble; int retval = SUCCESS; scsi_host = scpnt->device->host; adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; - unit = (struct zfcp_unit *) scpnt->device->hostdata; - - ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n", - scpnt, zfcp_get_busid_by_adapter(adapter)); + unit = scpnt->device->hostdata; /* avoid race condition between late normal completion and abort */ write_lock_irqsave(&adapter->abort_lock, flags); /* Check whether corresponding fsf_req is still pending */ spin_lock(&adapter->req_list_lock); - fsf_req = zfcp_reqlist_find(adapter, - (unsigned long) scpnt->host_scribble); + fsf_req = zfcp_reqlist_find(adapter, old_req_id); spin_unlock(&adapter->req_list_lock); if (!fsf_req) { write_unlock_irqrestore(&adapter->abort_lock, flags); zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0); - retval = SUCCESS; - goto out; + return retval; } - fsf_req->data = 0; + fsf_req->data = NULL; fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; - old_req_id = fsf_req->req_id; /* don't access old fsf_req after releasing the abort_lock */ write_unlock_irqrestore(&adapter->abort_lock, flags); fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0); if (!fsf_req) { - ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, old_req_id); retval = FAILED; - goto out; + return retval; } __wait_event(fsf_req->completion_wq, @@ -432,66 +201,29 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0); - retval = SUCCESS; } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0); - retval = SUCCESS; } else { zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0); retval = FAILED; } zfcp_fsf_req_free(fsf_req); - out: - return retval; -} - -static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) -{ - int retval; - struct zfcp_unit *unit = scpnt->device->hostdata; - if (!unit) { - WARN_ON(1); - return SUCCESS; - } - retval = zfcp_task_management_function(unit, - FCP_LOGICAL_UNIT_RESET, - scpnt); - return retval ? FAILED : SUCCESS; -} - -static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) -{ - int retval; - struct zfcp_unit *unit = scpnt->device->hostdata; - - if (!unit) { - WARN_ON(1); - return SUCCESS; - } - retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt); - return retval ? FAILED : SUCCESS; + return retval; } -static int -zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, - struct scsi_cmnd *scpnt) +static int zfcp_task_mgmt_function(struct zfcp_unit *unit, u8 tm_flags, + struct scsi_cmnd *scpnt) { struct zfcp_adapter *adapter = unit->port->adapter; struct zfcp_fsf_req *fsf_req; - int retval = 0; + int retval = SUCCESS; /* issue task management function */ - fsf_req = zfcp_fsf_send_fcp_command_task_management - (adapter, unit, tm_flags, 0); + fsf_req = zfcp_fsf_send_fcp_ctm(adapter, unit, tm_flags, 0); if (!fsf_req) { - ZFCP_LOG_INFO("error: creation of task management request " - "failed for unit 0x%016Lx on port 0x%016Lx on " - "adapter %s\n", unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_adapter(adapter)); zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt); - retval = -ENOMEM; - goto out; + return FAILED; } __wait_event(fsf_req->completion_wq, @@ -502,87 +234,90 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, */ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); - retval = -EIO; + retval = FAILED; } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt); - retval = -ENOTSUPP; + retval = FAILED; } else zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); zfcp_fsf_req_free(fsf_req); - out: + return retval; } -/** - * zfcp_scsi_eh_host_reset_handler - handler for host reset - */ +static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) +{ + struct zfcp_unit *unit = scpnt->device->hostdata; + + if (!unit) { + WARN_ON(1); + return SUCCESS; + } + return zfcp_task_mgmt_function(unit, FCP_LOGICAL_UNIT_RESET, scpnt); +} + +static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) +{ + struct zfcp_unit *unit = scpnt->device->hostdata; + + if (!unit) { + WARN_ON(1); + return SUCCESS; + } + return zfcp_task_mgmt_function(unit, FCP_TARGET_RESET, scpnt); +} + static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_unit *unit; struct zfcp_adapter *adapter; - unit = (struct zfcp_unit*) scpnt->device->hostdata; + unit = scpnt->device->hostdata; adapter = unit->port->adapter; - - ZFCP_LOG_NORMAL("host reset because of problems with " - "unit 0x%016Lx on port 0x%016Lx, adapter %s\n", - unit->fcp_lun, unit->port->wwpn, - zfcp_get_busid_by_adapter(unit->port->adapter)); - zfcp_erp_adapter_reopen(adapter, 0, 141, scpnt); zfcp_erp_wait(adapter); return SUCCESS; } -int -zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) +int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) { - int retval = 0; - static unsigned int unique_id = 0; + struct ccw_dev_id dev_id; if (adapter->scsi_host) - goto out; + return 0; + ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { - ZFCP_LOG_NORMAL("error: registration with SCSI stack failed " - "for adapter %s ", - zfcp_get_busid_by_adapter(adapter)); - retval = -EIO; - goto out; + dev_err(&adapter->ccw_device->dev, + "registration with SCSI stack failed."); + return -EIO; } - ZFCP_LOG_DEBUG("host registered, scsi_host=%p\n", adapter->scsi_host); /* tell the SCSI stack some characteristics of this adapter */ adapter->scsi_host->max_id = 1; adapter->scsi_host->max_lun = 1; adapter->scsi_host->max_channel = 0; - adapter->scsi_host->unique_id = unique_id++; /* FIXME */ - adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH; + adapter->scsi_host->unique_id = dev_id.devno; + adapter->scsi_host->max_cmd_len = 255; adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; - /* - * save a pointer to our own adapter data structure within - * hostdata field of SCSI host data structure - */ adapter->scsi_host->hostdata[0] = (unsigned long) adapter; if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { scsi_host_put(adapter->scsi_host); - retval = -EIO; - goto out; + return -EIO; } atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status); - out: - return retval; + + return 0; } -void -zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) +void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; @@ -590,10 +325,12 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) shost = adapter->scsi_host; if (!shost) return; + read_lock_irq(&zfcp_data.config_lock); list_for_each_entry(port, &adapter->port_list_head, list) if (port->rport) port->rport = NULL; + read_unlock_irq(&zfcp_data.config_lock); fc_remove_host(shost); scsi_remove_host(shost); @@ -604,9 +341,6 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) return; } -/* - * Support functions for FC transport class - */ static struct fc_host_statistics* zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) { @@ -622,13 +356,12 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) return adapter->fc_stats; } -static void -zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, - struct fsf_qtcb_bottom_port *data, - struct fsf_qtcb_bottom_port *old) +static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, + struct fsf_qtcb_bottom_port *data, + struct fsf_qtcb_bottom_port *old) { - fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - - old->seconds_since_last_reset; + fc_stats->seconds_since_last_reset = + data->seconds_since_last_reset - old->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames - old->tx_frames; fc_stats->tx_words = data->tx_words - old->tx_words; fc_stats->rx_frames = data->rx_frames - old->rx_frames; @@ -639,26 +372,25 @@ zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; fc_stats->link_failure_count = data->link_failure - old->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; - fc_stats->loss_of_signal_count = data->loss_of_signal - - old->loss_of_signal; - fc_stats->prim_seq_protocol_err_count = data->psp_error_counts - - old->psp_error_counts; - fc_stats->invalid_tx_word_count = data->invalid_tx_words - - old->invalid_tx_words; + fc_stats->loss_of_signal_count = + data->loss_of_signal - old->loss_of_signal; + fc_stats->prim_seq_protocol_err_count = + data->psp_error_counts - old->psp_error_counts; + fc_stats->invalid_tx_word_count = + data->invalid_tx_words - old->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; - fc_stats->fcp_input_requests = data->input_requests - - old->input_requests; - fc_stats->fcp_output_requests = data->output_requests - - old->output_requests; - fc_stats->fcp_control_requests = data->control_requests - - old->control_requests; + fc_stats->fcp_input_requests = + data->input_requests - old->input_requests; + fc_stats->fcp_output_requests = + data->output_requests - old->output_requests; + fc_stats->fcp_control_requests = + data->control_requests - old->control_requests; fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; } -static void -zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, - struct fsf_qtcb_bottom_port *data) +static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, + struct fsf_qtcb_bottom_port *data) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames; @@ -682,22 +414,14 @@ zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, fc_stats->fcp_output_megabytes = data->output_mb; } -/** - * zfcp_get_fc_host_stats - provide fc_host_statistics for scsi_transport_fc - * - * assumption: scsi_transport_fc synchronizes calls of - * get_fc_host_stats and reset_fc_host_stats - * (XXX to be checked otherwise introduce locking) - */ -static struct fc_host_statistics * -zfcp_get_fc_host_stats(struct Scsi_Host *shost) +static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) { struct zfcp_adapter *adapter; struct fc_host_statistics *fc_stats; struct fsf_qtcb_bottom_port *data; int ret; - adapter = (struct zfcp_adapter *)shost->hostdata[0]; + adapter = (struct zfcp_adapter *)host->hostdata[0]; fc_stats = zfcp_init_fc_host_stats(adapter); if (!fc_stats) return NULL; @@ -709,26 +433,25 @@ zfcp_get_fc_host_stats(struct Scsi_Host *shost) ret = zfcp_fsf_exchange_port_data_sync(adapter, data); if (ret) { kfree(data); - return NULL; /* XXX return zeroed fc_stats? */ + return NULL; } if (adapter->stats_reset && ((jiffies/HZ - adapter->stats_reset) < - data->seconds_since_last_reset)) { + data->seconds_since_last_reset)) zfcp_adjust_fc_host_stats(fc_stats, data, adapter->stats_reset_data); - } else + else zfcp_set_fc_host_stats(fc_stats, data); kfree(data); return fc_stats; } -static void -zfcp_reset_fc_host_stats(struct Scsi_Host *shost) +static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) { struct zfcp_adapter *adapter; - struct fsf_qtcb_bottom_port *data, *old_data; + struct fsf_qtcb_bottom_port *data; int ret; adapter = (struct zfcp_adapter *)shost->hostdata[0]; @@ -737,17 +460,33 @@ zfcp_reset_fc_host_stats(struct Scsi_Host *shost) return; ret = zfcp_fsf_exchange_port_data_sync(adapter, data); - if (ret) { + if (ret) kfree(data); - } else { + else { adapter->stats_reset = jiffies/HZ; - old_data = adapter->stats_reset_data; + kfree(adapter->stats_reset_data); adapter->stats_reset_data = data; /* finally freed in - adater_dequeue */ - kfree(old_data); + adapter_dequeue */ } } +static void zfcp_get_host_port_state(struct Scsi_Host *shost) +{ + struct zfcp_adapter *adapter = + (struct zfcp_adapter *)shost->hostdata[0]; + int status = atomic_read(&adapter->status); + + if ((status & ZFCP_STATUS_COMMON_RUNNING) && + !(status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)) + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + else if (status & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + else if (status & ZFCP_STATUS_COMMON_ERP_FAILED) + fc_host_port_state(shost) = FC_PORTSTATE_ERROR; + else + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; +} + static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) { rport->dev_loss_tmo = timeout; @@ -770,6 +509,8 @@ struct fc_function_template zfcp_transport_functions = { .get_fc_host_stats = zfcp_get_fc_host_stats, .reset_fc_host_stats = zfcp_reset_fc_host_stats, .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, + .get_host_port_state = zfcp_get_host_port_state, + .show_host_port_state = 1, /* no functions registered for following dynamic attributes but directly set by LLDD */ .show_host_port_type = 1, @@ -778,149 +519,26 @@ struct fc_function_template zfcp_transport_functions = { .disable_target_scan = 1, }; -/** - * ZFCP_DEFINE_SCSI_ATTR - * @_name: name of show attribute - * @_format: format string - * @_value: value to print - * - * Generates attribute for a unit. - */ -#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \ -static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct scsi_device *sdev; \ - struct zfcp_unit *unit; \ - \ - sdev = to_scsi_device(dev); \ - unit = sdev->hostdata; \ - return sprintf(buf, _format, _value); \ -} \ - \ -static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); - -ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", zfcp_get_busid_by_unit(unit)); -ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn); -ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun); - -static struct device_attribute *zfcp_sysfs_sdev_attrs[] = { - &dev_attr_fcp_lun, - &dev_attr_wwpn, - &dev_attr_hba_id, - NULL -}; - -static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *scsi_host = dev_to_shost(dev); - struct fsf_qtcb_bottom_port *qtcb_port; - int retval; - struct zfcp_adapter *adapter; - - adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; - if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) - return -EOPNOTSUPP; - - qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL); - if (!qtcb_port) - return -ENOMEM; - - retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port); - if (!retval) - retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, - qtcb_port->cb_util, qtcb_port->a_util); - kfree(qtcb_port); - return retval; -} - -static int zfcp_sysfs_adapter_ex_config(struct device *dev, - struct fsf_statistics_info *stat_inf) -{ - int retval; - struct fsf_qtcb_bottom_config *qtcb_config; - struct Scsi_Host *scsi_host = dev_to_shost(dev); - struct zfcp_adapter *adapter; - - adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; - if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) - return -EOPNOTSUPP; - - qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config), - GFP_KERNEL); - if (!qtcb_config) - return -ENOMEM; - - retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config); - if (!retval) - *stat_inf = qtcb_config->stat_info; - - kfree(qtcb_config); - return retval; -} - -static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct fsf_statistics_info stat_info; - int retval; - - retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); - if (retval) - return retval; - - return sprintf(buf, "%llu %llu %llu\n", - (unsigned long long) stat_info.input_req, - (unsigned long long) stat_info.output_req, - (unsigned long long) stat_info.control_req); -} - -static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct fsf_statistics_info stat_info; - int retval; - - retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); - if (retval) - return retval; - - return sprintf(buf, "%llu %llu\n", - (unsigned long long) stat_info.input_mb, - (unsigned long long) stat_info.output_mb); -} - -static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct fsf_statistics_info stat_info; - int retval; - - retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); - if (retval) - return retval; - - return sprintf(buf, "%llu\n", - (unsigned long long) stat_info.seconds_act); -} - -static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL); -static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL); -static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL); -static DEVICE_ATTR(seconds_active, S_IRUGO, - zfcp_sysfs_adapter_sec_active_show, NULL); - -static struct device_attribute *zfcp_a_stats_attrs[] = { - &dev_attr_utilization, - &dev_attr_requests, - &dev_attr_megabytes, - &dev_attr_seconds_active, - NULL +struct zfcp_data zfcp_data = { + .scsi_host_template = { + .name = "zfcp", + .module = THIS_MODULE, + .proc_name = "zfcp", + .slave_alloc = zfcp_scsi_slave_alloc, + .slave_configure = zfcp_scsi_slave_configure, + .slave_destroy = zfcp_scsi_slave_destroy, + .queuecommand = zfcp_scsi_queuecommand, + .eh_abort_handler = zfcp_scsi_eh_abort_handler, + .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, + .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, + .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, + .can_queue = 4096, + .this_id = -1, + .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, + .cmd_per_lun = 1, + .use_clustering = 1, + .sdev_attrs = zfcp_sysfs_sdev_attrs, + .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8), + .shost_attrs = zfcp_sysfs_shost_attrs, + }, }; - -#undef ZFCP_LOG_AREA diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c new file mode 100644 index 000000000000..2e85c6c49e7d --- /dev/null +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -0,0 +1,496 @@ +/* + * zfcp device driver + * + * sysfs attributes. + * + * Copyright IBM Corporation 2008 + */ + +#include "zfcp_ext.h" + +#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_feat##_##_name = __ATTR(_name, _mode,\ + _show, _store) +#define ZFCP_DEFINE_ATTR(_feat_def, _feat, _name, _format, _value) \ +static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ + struct device_attribute *at,\ + char *buf) \ +{ \ + struct _feat_def *_feat = dev_get_drvdata(dev); \ + \ + return sprintf(buf, _format, _value); \ +} \ +static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ + zfcp_sysfs_##_feat##_##_name##_show, NULL); + +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", + atomic_read(&adapter->status)); +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", + adapter->peer_wwnn); +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", + adapter->peer_wwpn); +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", + adapter->peer_d_id); +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", + adapter->hydra_version); +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n", + adapter->fsf_lic_version); +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n", + adapter->hardware_version); +ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n", + (atomic_read(&adapter->status) & + ZFCP_STATUS_COMMON_ERP_INUSE) != 0); + +ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", + atomic_read(&port->status)); +ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n", + (atomic_read(&port->status) & + ZFCP_STATUS_COMMON_ERP_INUSE) != 0); +ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", + (atomic_read(&port->status) & + ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); + +ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", + atomic_read(&unit->status)); +ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", + (atomic_read(&unit->status) & + ZFCP_STATUS_COMMON_ERP_INUSE) != 0); +ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", + (atomic_read(&unit->status) & + ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); +ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", + (atomic_read(&unit->status) & + ZFCP_STATUS_UNIT_SHARED) != 0); +ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", + (atomic_read(&unit->status) & + ZFCP_STATUS_UNIT_READONLY) != 0); + +#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \ +static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct _feat_def *_feat = dev_get_drvdata(dev); \ + \ + if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ + return sprintf(buf, "1\n"); \ + else \ + return sprintf(buf, "0\n"); \ +} \ +static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ + struct device_attribute *attr,\ + const char *buf, size_t count)\ +{ \ + struct _feat_def *_feat = dev_get_drvdata(dev); \ + unsigned long val; \ + int retval = 0; \ + \ + down(&zfcp_data.config_sema); \ + if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ + retval = -EBUSY; \ + goto out; \ + } \ + \ + if (strict_strtoul(buf, 0, &val) || val != 0) { \ + retval = -EINVAL; \ + goto out; \ + } \ + \ + zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \ + ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\ + zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \ + _reopen_id, NULL); \ + zfcp_erp_wait(_adapter); \ +out: \ + up(&zfcp_data.config_sema); \ + return retval ? retval : (ssize_t) count; \ +} \ +static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ + zfcp_sysfs_##_feat##_failed_show, \ + zfcp_sysfs_##_feat##_failed_store); + +ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, 44, 93); +ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, 45, 96); +ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, 46, 97); + +static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_adapter *adapter = dev_get_drvdata(dev); + int ret; + + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) + return -EBUSY; + + ret = zfcp_scan_ports(adapter); + return ret ? ret : (ssize_t) count; +} +static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, + zfcp_sysfs_port_rescan_store); + +static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_adapter *adapter = dev_get_drvdata(dev); + struct zfcp_port *port; + wwn_t wwpn; + int retval = 0; + + down(&zfcp_data.config_sema); + if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { + retval = -EBUSY; + goto out; + } + + if (strict_strtoull(buf, 0, &wwpn)) { + retval = -EINVAL; + goto out; + } + + write_lock_irq(&zfcp_data.config_lock); + port = zfcp_get_port_by_wwpn(adapter, wwpn); + if (port && (atomic_read(&port->refcount) == 0)) { + zfcp_port_get(port); + atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); + list_move(&port->list, &adapter->port_remove_lh); + } else + port = NULL; + write_unlock_irq(&zfcp_data.config_lock); + + if (!port) { + retval = -ENXIO; + goto out; + } + + zfcp_erp_port_shutdown(port, 0, 92, NULL); + zfcp_erp_wait(adapter); + zfcp_port_put(port); + zfcp_port_dequeue(port); + out: + up(&zfcp_data.config_sema); + return retval ? retval : (ssize_t) count; +} +static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, + zfcp_sysfs_port_remove_store); + +static struct attribute *zfcp_adapter_attrs[] = { + &dev_attr_adapter_failed.attr, + &dev_attr_adapter_in_recovery.attr, + &dev_attr_adapter_port_remove.attr, + &dev_attr_adapter_port_rescan.attr, + &dev_attr_adapter_peer_wwnn.attr, + &dev_attr_adapter_peer_wwpn.attr, + &dev_attr_adapter_peer_d_id.attr, + &dev_attr_adapter_card_version.attr, + &dev_attr_adapter_lic_version.attr, + &dev_attr_adapter_status.attr, + &dev_attr_adapter_hardware_version.attr, + NULL +}; + +struct attribute_group zfcp_sysfs_adapter_attrs = { + .attrs = zfcp_adapter_attrs, +}; + +static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_port *port = dev_get_drvdata(dev); + struct zfcp_unit *unit; + fcp_lun_t fcp_lun; + int retval = -EINVAL; + + down(&zfcp_data.config_sema); + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { + retval = -EBUSY; + goto out; + } + + if (strict_strtoull(buf, 0, &fcp_lun)) + goto out; + + unit = zfcp_unit_enqueue(port, fcp_lun); + if (IS_ERR(unit)) + goto out; + + retval = 0; + + zfcp_erp_unit_reopen(unit, 0, 94, NULL); + zfcp_erp_wait(unit->port->adapter); + zfcp_unit_put(unit); +out: + up(&zfcp_data.config_sema); + return retval ? retval : (ssize_t) count; +} +static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); + +static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_port *port = dev_get_drvdata(dev); + struct zfcp_unit *unit; + fcp_lun_t fcp_lun; + int retval = 0; + + down(&zfcp_data.config_sema); + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { + retval = -EBUSY; + goto out; + } + + if (strict_strtoull(buf, 0, &fcp_lun)) { + retval = -EINVAL; + goto out; + } + + write_lock_irq(&zfcp_data.config_lock); + unit = zfcp_get_unit_by_lun(port, fcp_lun); + if (unit && (atomic_read(&unit->refcount) == 0)) { + zfcp_unit_get(unit); + atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); + list_move(&unit->list, &port->unit_remove_lh); + } else + unit = NULL; + + write_unlock_irq(&zfcp_data.config_lock); + + if (!unit) { + retval = -ENXIO; + goto out; + } + + zfcp_erp_unit_shutdown(unit, 0, 95, NULL); + zfcp_erp_wait(unit->port->adapter); + zfcp_unit_put(unit); + zfcp_unit_dequeue(unit); +out: + up(&zfcp_data.config_sema); + return retval ? retval : (ssize_t) count; +} +static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); + +static struct attribute *zfcp_port_ns_attrs[] = { + &dev_attr_port_failed.attr, + &dev_attr_port_in_recovery.attr, + &dev_attr_port_status.attr, + &dev_attr_port_access_denied.attr, + NULL +}; + +/** + * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver + */ +struct attribute_group zfcp_sysfs_ns_port_attrs = { + .attrs = zfcp_port_ns_attrs, +}; + +static struct attribute *zfcp_port_no_ns_attrs[] = { + &dev_attr_unit_add.attr, + &dev_attr_unit_remove.attr, + &dev_attr_port_failed.attr, + &dev_attr_port_in_recovery.attr, + &dev_attr_port_status.attr, + &dev_attr_port_access_denied.attr, + NULL +}; + +/** + * zfcp_sysfs_port_attrs - sysfs attributes for all other ports + */ +struct attribute_group zfcp_sysfs_port_attrs = { + .attrs = zfcp_port_no_ns_attrs, +}; + +static struct attribute *zfcp_unit_attrs[] = { + &dev_attr_unit_failed.attr, + &dev_attr_unit_in_recovery.attr, + &dev_attr_unit_status.attr, + &dev_attr_unit_access_denied.attr, + &dev_attr_unit_access_shared.attr, + &dev_attr_unit_access_readonly.attr, + NULL +}; + +struct attribute_group zfcp_sysfs_unit_attrs = { + .attrs = zfcp_unit_attrs, +}; + +#define ZFCP_DEFINE_LATENCY_ATTR(_name) \ +static ssize_t \ +zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) { \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct zfcp_unit *unit = sdev->hostdata; \ + struct zfcp_latencies *lat = &unit->latencies; \ + struct zfcp_adapter *adapter = unit->port->adapter; \ + unsigned long flags; \ + unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ + \ + spin_lock_irqsave(&lat->lock, flags); \ + fsum = lat->_name.fabric.sum * adapter->timer_ticks; \ + fmin = lat->_name.fabric.min * adapter->timer_ticks; \ + fmax = lat->_name.fabric.max * adapter->timer_ticks; \ + csum = lat->_name.channel.sum * adapter->timer_ticks; \ + cmin = lat->_name.channel.min * adapter->timer_ticks; \ + cmax = lat->_name.channel.max * adapter->timer_ticks; \ + cc = lat->_name.counter; \ + spin_unlock_irqrestore(&lat->lock, flags); \ + \ + do_div(fsum, 1000); \ + do_div(fmin, 1000); \ + do_div(fmax, 1000); \ + do_div(csum, 1000); \ + do_div(cmin, 1000); \ + do_div(cmax, 1000); \ + \ + return sprintf(buf, "%llu %llu %llu %llu %llu %llu %llu\n", \ + fmin, fmax, fsum, cmin, cmax, csum, cc); \ +} \ +static ssize_t \ +zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct zfcp_unit *unit = sdev->hostdata; \ + struct zfcp_latencies *lat = &unit->latencies; \ + unsigned long flags; \ + \ + spin_lock_irqsave(&lat->lock, flags); \ + lat->_name.fabric.sum = 0; \ + lat->_name.fabric.min = 0xFFFFFFFF; \ + lat->_name.fabric.max = 0; \ + lat->_name.channel.sum = 0; \ + lat->_name.channel.min = 0xFFFFFFFF; \ + lat->_name.channel.max = 0; \ + lat->_name.counter = 0; \ + spin_unlock_irqrestore(&lat->lock, flags); \ + \ + return (ssize_t) count; \ +} \ +static DEVICE_ATTR(_name##_latency, S_IWUSR | S_IRUGO, \ + zfcp_sysfs_unit_##_name##_latency_show, \ + zfcp_sysfs_unit_##_name##_latency_store); + +ZFCP_DEFINE_LATENCY_ATTR(read); +ZFCP_DEFINE_LATENCY_ATTR(write); +ZFCP_DEFINE_LATENCY_ATTR(cmd); + +#define ZFCP_DEFINE_SCSI_ATTR(_name, _format, _value) \ +static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ + struct device_attribute *attr,\ + char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct zfcp_unit *unit = sdev->hostdata; \ + \ + return sprintf(buf, _format, _value); \ +} \ +static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); + +ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", + unit->port->adapter->ccw_device->dev.bus_id); +ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn); +ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun); + +struct device_attribute *zfcp_sysfs_sdev_attrs[] = { + &dev_attr_fcp_lun, + &dev_attr_wwpn, + &dev_attr_hba_id, + &dev_attr_read_latency, + &dev_attr_write_latency, + &dev_attr_cmd_latency, + NULL +}; + +static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *scsi_host = dev_to_shost(dev); + struct fsf_qtcb_bottom_port *qtcb_port; + struct zfcp_adapter *adapter; + int retval; + + adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; + if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) + return -EOPNOTSUPP; + + qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL); + if (!qtcb_port) + return -ENOMEM; + + retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port); + if (!retval) + retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, + qtcb_port->cb_util, qtcb_port->a_util); + kfree(qtcb_port); + return retval; +} +static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL); + +static int zfcp_sysfs_adapter_ex_config(struct device *dev, + struct fsf_statistics_info *stat_inf) +{ + struct Scsi_Host *scsi_host = dev_to_shost(dev); + struct fsf_qtcb_bottom_config *qtcb_config; + struct zfcp_adapter *adapter; + int retval; + + adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; + if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)) + return -EOPNOTSUPP; + + qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config), + GFP_KERNEL); + if (!qtcb_config) + return -ENOMEM; + + retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config); + if (!retval) + *stat_inf = qtcb_config->stat_info; + + kfree(qtcb_config); + return retval; +} + +#define ZFCP_SHOST_ATTR(_name, _format, _arg...) \ +static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ + struct device_attribute *attr,\ + char *buf) \ +{ \ + struct fsf_statistics_info stat_info; \ + int retval; \ + \ + retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info); \ + if (retval) \ + return retval; \ + \ + return sprintf(buf, _format, ## _arg); \ +} \ +static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); + +ZFCP_SHOST_ATTR(requests, "%llu %llu %llu\n", + (unsigned long long) stat_info.input_req, + (unsigned long long) stat_info.output_req, + (unsigned long long) stat_info.control_req); + +ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n", + (unsigned long long) stat_info.input_mb, + (unsigned long long) stat_info.output_mb); + +ZFCP_SHOST_ATTR(seconds_active, "%llu\n", + (unsigned long long) stat_info.seconds_act); + +struct device_attribute *zfcp_sysfs_shost_attrs[] = { + &dev_attr_utilization, + &dev_attr_requests, + &dev_attr_megabytes, + &dev_attr_seconds_active, + NULL +}; diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c deleted file mode 100644 index ccbba4dd3a77..000000000000 --- a/drivers/s390/scsi/zfcp_sysfs_adapter.c +++ /dev/null @@ -1,270 +0,0 @@ -/* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. - * - * (C) Copyright IBM Corp. 2002, 2006 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include "zfcp_ext.h" - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG - -/** - * ZFCP_DEFINE_ADAPTER_ATTR - * @_name: name of show attribute - * @_format: format string - * @_value: value to print - * - * Generates attributes for an adapter. - */ -#define ZFCP_DEFINE_ADAPTER_ATTR(_name, _format, _value) \ -static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct zfcp_adapter *adapter; \ - \ - adapter = dev_get_drvdata(dev); \ - return sprintf(buf, _format, _value); \ -} \ - \ -static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); - -ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); -ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); -ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); -ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); -ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); -ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); -ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", - adapter->hardware_version); -ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)); - -/** - * zfcp_sysfs_port_add_store - add a port to sysfs tree - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * @count: number of bytes in buffer - * - * Store function of the "port_add" attribute of an adapter. - */ -static ssize_t -zfcp_sysfs_port_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - wwn_t wwpn; - char *endp; - struct zfcp_adapter *adapter; - struct zfcp_port *port; - int retval = -EINVAL; - - down(&zfcp_data.config_sema); - - adapter = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) { - retval = -EBUSY; - goto out; - } - - wwpn = simple_strtoull(buf, &endp, 0); - if ((endp + 1) < (buf + count)) - goto out; - - port = zfcp_port_enqueue(adapter, wwpn, 0, 0); - if (!port) - goto out; - - retval = 0; - - zfcp_erp_port_reopen(port, 0, 91, NULL); - zfcp_erp_wait(port->adapter); - zfcp_port_put(port); - out: - up(&zfcp_data.config_sema); - return retval ? retval : (ssize_t) count; -} - -static DEVICE_ATTR(port_add, S_IWUSR, NULL, zfcp_sysfs_port_add_store); - -/** - * zfcp_sysfs_port_remove_store - remove a port from sysfs tree - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * @count: number of bytes in buffer - * - * Store function of the "port_remove" attribute of an adapter. - */ -static ssize_t -zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct zfcp_adapter *adapter; - struct zfcp_port *port; - wwn_t wwpn; - char *endp; - int retval = 0; - - down(&zfcp_data.config_sema); - - adapter = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) { - retval = -EBUSY; - goto out; - } - - wwpn = simple_strtoull(buf, &endp, 0); - if ((endp + 1) < (buf + count)) { - retval = -EINVAL; - goto out; - } - - write_lock_irq(&zfcp_data.config_lock); - port = zfcp_get_port_by_wwpn(adapter, wwpn); - if (port && (atomic_read(&port->refcount) == 0)) { - zfcp_port_get(port); - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); - list_move(&port->list, &adapter->port_remove_lh); - } - else { - port = NULL; - } - write_unlock_irq(&zfcp_data.config_lock); - - if (!port) { - retval = -ENXIO; - goto out; - } - - zfcp_erp_port_shutdown(port, 0, 92, NULL); - zfcp_erp_wait(adapter); - zfcp_port_put(port); - zfcp_port_dequeue(port); - out: - up(&zfcp_data.config_sema); - return retval ? retval : (ssize_t) count; -} - -static DEVICE_ATTR(port_remove, S_IWUSR, NULL, zfcp_sysfs_port_remove_store); - -/** - * zfcp_sysfs_adapter_failed_store - failed state of adapter - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * @count: number of bytes in buffer - * - * Store function of the "failed" attribute of an adapter. - * If a "0" gets written to "failed", error recovery will be - * started for the belonging adapter. - */ -static ssize_t -zfcp_sysfs_adapter_failed_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct zfcp_adapter *adapter; - unsigned int val; - char *endp; - int retval = 0; - - down(&zfcp_data.config_sema); - - adapter = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status)) { - retval = -EBUSY; - goto out; - } - - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val != 0)) { - retval = -EINVAL; - goto out; - } - - zfcp_erp_modify_adapter_status(adapter, 44, NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); - zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 93, - NULL); - zfcp_erp_wait(adapter); - out: - up(&zfcp_data.config_sema); - return retval ? retval : (ssize_t) count; -} - -/** - * zfcp_sysfs_adapter_failed_show - failed state of adapter - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * - * Show function of "failed" attribute of adapter. Will be - * "0" if adapter is working, otherwise "1". - */ -static ssize_t -zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct zfcp_adapter *adapter; - - adapter = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) - return sprintf(buf, "1\n"); - else - return sprintf(buf, "0\n"); -} - -static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_adapter_failed_show, - zfcp_sysfs_adapter_failed_store); - -static struct attribute *zfcp_adapter_attrs[] = { - &dev_attr_failed.attr, - &dev_attr_in_recovery.attr, - &dev_attr_port_remove.attr, - &dev_attr_port_add.attr, - &dev_attr_peer_wwnn.attr, - &dev_attr_peer_wwpn.attr, - &dev_attr_peer_d_id.attr, - &dev_attr_card_version.attr, - &dev_attr_lic_version.attr, - &dev_attr_status.attr, - &dev_attr_hardware_version.attr, - NULL -}; - -static struct attribute_group zfcp_adapter_attr_group = { - .attrs = zfcp_adapter_attrs, -}; - -/** - * zfcp_sysfs_create_adapter_files - create sysfs adapter files - * @dev: pointer to belonging device - * - * Create all attributes of the sysfs representation of an adapter. - */ -int -zfcp_sysfs_adapter_create_files(struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &zfcp_adapter_attr_group); -} - -/** - * zfcp_sysfs_remove_adapter_files - remove sysfs adapter files - * @dev: pointer to belonging device - * - * Remove all attributes of the sysfs representation of an adapter. - */ -void -zfcp_sysfs_adapter_remove_files(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &zfcp_adapter_attr_group); -} - -#undef ZFCP_LOG_AREA diff --git a/drivers/s390/scsi/zfcp_sysfs_driver.c b/drivers/s390/scsi/zfcp_sysfs_driver.c deleted file mode 100644 index 651edd58906a..000000000000 --- a/drivers/s390/scsi/zfcp_sysfs_driver.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. - * - * (C) Copyright IBM Corp. 2002, 2006 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include "zfcp_ext.h" - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG - -/** - * ZFCP_DEFINE_DRIVER_ATTR - define for all loglevels sysfs attributes - * @_name: name of attribute - * @_define: name of ZFCP loglevel define - * - * Generates store function for a sysfs loglevel attribute of zfcp driver. - */ -#define ZFCP_DEFINE_DRIVER_ATTR(_name, _define) \ -static ssize_t zfcp_sysfs_loglevel_##_name##_store(struct device_driver *drv, \ - const char *buf, \ - size_t count) \ -{ \ - unsigned int loglevel; \ - unsigned int new_loglevel; \ - char *endp; \ - \ - new_loglevel = simple_strtoul(buf, &endp, 0); \ - if ((endp + 1) < (buf + count)) \ - return -EINVAL; \ - if (new_loglevel > 3) \ - return -EINVAL; \ - down(&zfcp_data.config_sema); \ - loglevel = atomic_read(&zfcp_data.loglevel); \ - loglevel &= ~((unsigned int) 0xf << (ZFCP_LOG_AREA_##_define << 2)); \ - loglevel |= new_loglevel << (ZFCP_LOG_AREA_##_define << 2); \ - atomic_set(&zfcp_data.loglevel, loglevel); \ - up(&zfcp_data.config_sema); \ - return count; \ -} \ - \ -static ssize_t zfcp_sysfs_loglevel_##_name##_show(struct device_driver *dev, \ - char *buf) \ -{ \ - return sprintf(buf,"%d\n", (unsigned int) \ - ZFCP_GET_LOG_VALUE(ZFCP_LOG_AREA_##_define)); \ -} \ - \ -static DRIVER_ATTR(loglevel_##_name, S_IWUSR | S_IRUGO, \ - zfcp_sysfs_loglevel_##_name##_show, \ - zfcp_sysfs_loglevel_##_name##_store); - -ZFCP_DEFINE_DRIVER_ATTR(other, OTHER); -ZFCP_DEFINE_DRIVER_ATTR(scsi, SCSI); -ZFCP_DEFINE_DRIVER_ATTR(fsf, FSF); -ZFCP_DEFINE_DRIVER_ATTR(config, CONFIG); -ZFCP_DEFINE_DRIVER_ATTR(cio, CIO); -ZFCP_DEFINE_DRIVER_ATTR(qdio, QDIO); -ZFCP_DEFINE_DRIVER_ATTR(erp, ERP); -ZFCP_DEFINE_DRIVER_ATTR(fc, FC); - -static ssize_t zfcp_sysfs_version_show(struct device_driver *dev, - char *buf) -{ - return sprintf(buf, "%s\n", zfcp_data.driver_version); -} - -static DRIVER_ATTR(version, S_IRUGO, zfcp_sysfs_version_show, NULL); - -static struct attribute *zfcp_driver_attrs[] = { - &driver_attr_loglevel_other.attr, - &driver_attr_loglevel_scsi.attr, - &driver_attr_loglevel_fsf.attr, - &driver_attr_loglevel_config.attr, - &driver_attr_loglevel_cio.attr, - &driver_attr_loglevel_qdio.attr, - &driver_attr_loglevel_erp.attr, - &driver_attr_loglevel_fc.attr, - &driver_attr_version.attr, - NULL -}; - -static struct attribute_group zfcp_driver_attr_group = { - .attrs = zfcp_driver_attrs, -}; - -struct attribute_group *zfcp_driver_attr_groups[] = { - &zfcp_driver_attr_group, - NULL, -}; - -#undef ZFCP_LOG_AREA diff --git a/drivers/s390/scsi/zfcp_sysfs_port.c b/drivers/s390/scsi/zfcp_sysfs_port.c deleted file mode 100644 index 703c1b5cb602..000000000000 --- a/drivers/s390/scsi/zfcp_sysfs_port.c +++ /dev/null @@ -1,295 +0,0 @@ -/* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. - * - * (C) Copyright IBM Corp. 2002, 2006 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include "zfcp_ext.h" - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG - -/** - * zfcp_sysfs_port_release - gets called when a struct device port is released - * @dev: pointer to belonging device - */ -void -zfcp_sysfs_port_release(struct device *dev) -{ - kfree(dev); -} - -/** - * ZFCP_DEFINE_PORT_ATTR - * @_name: name of show attribute - * @_format: format string - * @_value: value to print - * - * Generates attributes for a port. - */ -#define ZFCP_DEFINE_PORT_ATTR(_name, _format, _value) \ -static ssize_t zfcp_sysfs_port_##_name##_show(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct zfcp_port *port; \ - \ - port = dev_get_drvdata(dev); \ - return sprintf(buf, _format, _value); \ -} \ - \ -static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_port_##_name##_show, NULL); - -ZFCP_DEFINE_PORT_ATTR(status, "0x%08x\n", atomic_read(&port->status)); -ZFCP_DEFINE_PORT_ATTR(in_recovery, "%d\n", atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)); -ZFCP_DEFINE_PORT_ATTR(access_denied, "%d\n", atomic_test_mask - (ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status)); - -/** - * zfcp_sysfs_unit_add_store - add a unit to sysfs tree - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * @count: number of bytes in buffer - * - * Store function of the "unit_add" attribute of a port. - */ -static ssize_t -zfcp_sysfs_unit_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - fcp_lun_t fcp_lun; - char *endp; - struct zfcp_port *port; - struct zfcp_unit *unit; - int retval = -EINVAL; - - down(&zfcp_data.config_sema); - - port = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) { - retval = -EBUSY; - goto out; - } - - fcp_lun = simple_strtoull(buf, &endp, 0); - if ((endp + 1) < (buf + count)) - goto out; - - unit = zfcp_unit_enqueue(port, fcp_lun); - if (!unit) - goto out; - - retval = 0; - - zfcp_erp_unit_reopen(unit, 0, 94, NULL); - zfcp_erp_wait(unit->port->adapter); - zfcp_unit_put(unit); - out: - up(&zfcp_data.config_sema); - return retval ? retval : (ssize_t) count; -} - -static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); - -/** - * zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * @count: number of bytes in buffer - */ -static ssize_t -zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct zfcp_port *port; - struct zfcp_unit *unit; - fcp_lun_t fcp_lun; - char *endp; - int retval = 0; - - down(&zfcp_data.config_sema); - - port = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) { - retval = -EBUSY; - goto out; - } - - fcp_lun = simple_strtoull(buf, &endp, 0); - if ((endp + 1) < (buf + count)) { - retval = -EINVAL; - goto out; - } - - write_lock_irq(&zfcp_data.config_lock); - unit = zfcp_get_unit_by_lun(port, fcp_lun); - if (unit && (atomic_read(&unit->refcount) == 0)) { - zfcp_unit_get(unit); - atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); - list_move(&unit->list, &port->unit_remove_lh); - } - else { - unit = NULL; - } - write_unlock_irq(&zfcp_data.config_lock); - - if (!unit) { - retval = -ENXIO; - goto out; - } - - zfcp_erp_unit_shutdown(unit, 0, 95, NULL); - zfcp_erp_wait(unit->port->adapter); - zfcp_unit_put(unit); - zfcp_unit_dequeue(unit); - out: - up(&zfcp_data.config_sema); - return retval ? retval : (ssize_t) count; -} - -static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); - -/** - * zfcp_sysfs_port_failed_store - failed state of port - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * @count: number of bytes in buffer - * - * Store function of the "failed" attribute of a port. - * If a "0" gets written to "failed", error recovery will be - * started for the belonging port. - */ -static ssize_t -zfcp_sysfs_port_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct zfcp_port *port; - unsigned int val; - char *endp; - int retval = 0; - - down(&zfcp_data.config_sema); - - port = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) { - retval = -EBUSY; - goto out; - } - - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val != 0)) { - retval = -EINVAL; - goto out; - } - - zfcp_erp_modify_port_status(port, 45, NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); - zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 96, NULL); - zfcp_erp_wait(port->adapter); - out: - up(&zfcp_data.config_sema); - return retval ? retval : (ssize_t) count; -} - -/** - * zfcp_sysfs_port_failed_show - failed state of port - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * - * Show function of "failed" attribute of port. Will be - * "0" if port is working, otherwise "1". - */ -static ssize_t -zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct zfcp_port *port; - - port = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) - return sprintf(buf, "1\n"); - else - return sprintf(buf, "0\n"); -} - -static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_port_failed_show, - zfcp_sysfs_port_failed_store); - -/** - * zfcp_port_common_attrs - * sysfs attributes that are common for all kind of fc ports. - */ -static struct attribute *zfcp_port_common_attrs[] = { - &dev_attr_failed.attr, - &dev_attr_in_recovery.attr, - &dev_attr_status.attr, - &dev_attr_access_denied.attr, - NULL -}; - -static struct attribute_group zfcp_port_common_attr_group = { - .attrs = zfcp_port_common_attrs, -}; - -/** - * zfcp_port_no_ns_attrs - * sysfs attributes not to be used for nameserver ports. - */ -static struct attribute *zfcp_port_no_ns_attrs[] = { - &dev_attr_unit_add.attr, - &dev_attr_unit_remove.attr, - NULL -}; - -static struct attribute_group zfcp_port_no_ns_attr_group = { - .attrs = zfcp_port_no_ns_attrs, -}; - -/** - * zfcp_sysfs_port_create_files - create sysfs port files - * @dev: pointer to belonging device - * - * Create all attributes of the sysfs representation of a port. - */ -int -zfcp_sysfs_port_create_files(struct device *dev, u32 flags) -{ - int retval; - - retval = sysfs_create_group(&dev->kobj, &zfcp_port_common_attr_group); - - if ((flags & ZFCP_STATUS_PORT_WKA) || retval) - return retval; - - retval = sysfs_create_group(&dev->kobj, &zfcp_port_no_ns_attr_group); - if (retval) - sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group); - - return retval; -} - -/** - * zfcp_sysfs_port_remove_files - remove sysfs port files - * @dev: pointer to belonging device - * - * Remove all attributes of the sysfs representation of a port. - */ -void -zfcp_sysfs_port_remove_files(struct device *dev, u32 flags) -{ - sysfs_remove_group(&dev->kobj, &zfcp_port_common_attr_group); - if (!(flags & ZFCP_STATUS_PORT_WKA)) - sysfs_remove_group(&dev->kobj, &zfcp_port_no_ns_attr_group); -} - -#undef ZFCP_LOG_AREA diff --git a/drivers/s390/scsi/zfcp_sysfs_unit.c b/drivers/s390/scsi/zfcp_sysfs_unit.c deleted file mode 100644 index 80fb2c2cf48a..000000000000 --- a/drivers/s390/scsi/zfcp_sysfs_unit.c +++ /dev/null @@ -1,167 +0,0 @@ -/* - * This file is part of the zfcp device driver for - * FCP adapters for IBM System z9 and zSeries. - * - * (C) Copyright IBM Corp. 2002, 2006 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include "zfcp_ext.h" - -#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG - -/** - * zfcp_sysfs_unit_release - gets called when a struct device unit is released - * @dev: pointer to belonging device - */ -void -zfcp_sysfs_unit_release(struct device *dev) -{ - kfree(dev); -} - -/** - * ZFCP_DEFINE_UNIT_ATTR - * @_name: name of show attribute - * @_format: format string - * @_value: value to print - * - * Generates attribute for a unit. - */ -#define ZFCP_DEFINE_UNIT_ATTR(_name, _format, _value) \ -static ssize_t zfcp_sysfs_unit_##_name##_show(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct zfcp_unit *unit; \ - \ - unit = dev_get_drvdata(dev); \ - return sprintf(buf, _format, _value); \ -} \ - \ -static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_unit_##_name##_show, NULL); - -ZFCP_DEFINE_UNIT_ATTR(status, "0x%08x\n", atomic_read(&unit->status)); -ZFCP_DEFINE_UNIT_ATTR(in_recovery, "%d\n", atomic_test_mask - (ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)); -ZFCP_DEFINE_UNIT_ATTR(access_denied, "%d\n", atomic_test_mask - (ZFCP_STATUS_COMMON_ACCESS_DENIED, &unit->status)); -ZFCP_DEFINE_UNIT_ATTR(access_shared, "%d\n", atomic_test_mask - (ZFCP_STATUS_UNIT_SHARED, &unit->status)); -ZFCP_DEFINE_UNIT_ATTR(access_readonly, "%d\n", atomic_test_mask - (ZFCP_STATUS_UNIT_READONLY, &unit->status)); - -/** - * zfcp_sysfs_unit_failed_store - failed state of unit - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * @count: number of bytes in buffer - * - * Store function of the "failed" attribute of a unit. - * If a "0" gets written to "failed", error recovery will be - * started for the belonging unit. - */ -static ssize_t -zfcp_sysfs_unit_failed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct zfcp_unit *unit; - unsigned int val; - char *endp; - int retval = 0; - - down(&zfcp_data.config_sema); - unit = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status)) { - retval = -EBUSY; - goto out; - } - - val = simple_strtoul(buf, &endp, 0); - if (((endp + 1) < (buf + count)) || (val != 0)) { - retval = -EINVAL; - goto out; - } - - zfcp_erp_modify_unit_status(unit, 46, NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); - zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, 97, NULL); - zfcp_erp_wait(unit->port->adapter); - out: - up(&zfcp_data.config_sema); - return retval ? retval : (ssize_t) count; -} - -/** - * zfcp_sysfs_unit_failed_show - failed state of unit - * @dev: pointer to belonging device - * @buf: pointer to input buffer - * - * Show function of "failed" attribute of unit. Will be - * "0" if unit is working, otherwise "1". - */ -static ssize_t -zfcp_sysfs_unit_failed_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct zfcp_unit *unit; - - unit = dev_get_drvdata(dev); - if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) - return sprintf(buf, "1\n"); - else - return sprintf(buf, "0\n"); -} - -static DEVICE_ATTR(failed, S_IWUSR | S_IRUGO, zfcp_sysfs_unit_failed_show, - zfcp_sysfs_unit_failed_store); - -static struct attribute *zfcp_unit_attrs[] = { - &dev_attr_failed.attr, - &dev_attr_in_recovery.attr, - &dev_attr_status.attr, - &dev_attr_access_denied.attr, - &dev_attr_access_shared.attr, - &dev_attr_access_readonly.attr, - NULL -}; - -static struct attribute_group zfcp_unit_attr_group = { - .attrs = zfcp_unit_attrs, -}; - -/** - * zfcp_sysfs_create_unit_files - create sysfs unit files - * @dev: pointer to belonging device - * - * Create all attributes of the sysfs representation of a unit. - */ -int -zfcp_sysfs_unit_create_files(struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &zfcp_unit_attr_group); -} - -/** - * zfcp_sysfs_remove_unit_files - remove sysfs unit files - * @dev: pointer to belonging device - * - * Remove all attributes of the sysfs representation of a unit. - */ -void -zfcp_sysfs_unit_remove_files(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &zfcp_unit_attr_group); -} - -#undef ZFCP_LOG_AREA diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 81ccbd7f9e34..26be540d1dd3 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS To compile this driver as a module, choose M here: the module will be called ibmvstgt. +config SCSI_IBMVFC + tristate "IBM Virtual FC support" + depends on PPC_PSERIES && SCSI + select SCSI_FC_ATTRS + help + This is the IBM POWER Virtual FC Client + + To compile this driver as a module, choose M here: the + module will be called ibmvfc. + +config SCSI_IBMVFC_TRACE + bool "enable driver internal trace" + depends on SCSI_IBMVFC + default y + help + If you say Y here, the driver will trace all commands issued + to the adapter. Performance impact is minimal. Trace can be + dumped using /sys/class/scsi_host/hostXX/trace. + config SCSI_INITIO tristate "Initio 9100U(W) support" depends on PCI && SCSI @@ -1738,10 +1757,12 @@ config SCSI_SUNESP select SCSI_SPI_ATTRS help This is the driver for the Sun ESP SCSI host adapter. The ESP - chipset is present in most SPARC SBUS-based computers. + chipset is present in most SPARC SBUS-based computers and + supports the Emulex family of ESP SCSI chips (esp100, esp100A, + esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip. To compile this driver as a module, choose M here: the - module will be called esp. + module will be called sun_esp. config ZFCP tristate "FCP host bus adapter driver for IBM eServer zSeries" @@ -1771,4 +1792,6 @@ endif # SCSI_LOWLEVEL source "drivers/scsi/pcmcia/Kconfig" +source "drivers/scsi/device_handler/Kconfig" + endmenu diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 6c775e350c98..a8149677de23 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/ obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o +obj-$(CONFIG_SCSI_DH) += device_handler/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o @@ -118,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR) += ipr.o obj-$(CONFIG_SCSI_SRP) += libsrp.o obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ +obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_SCSI_MVSAS) += mvsas.o diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index ced3eebe252c..84bb61628372 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -389,7 +389,7 @@ static u8 orc_load_firmware(struct orc_host * host) outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */ data32_ptr = (u8 *) & data32; - data32 = 0; /* Initial FW address to 0 */ + data32 = cpu_to_le32(0); /* Initial FW address to 0 */ outw(0x0010, host->base + ORC_EBIOSADR0); *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ outw(0x0011, host->base + ORC_EBIOSADR0); @@ -397,18 +397,19 @@ static u8 orc_load_firmware(struct orc_host * host) outw(0x0012, host->base + ORC_EBIOSADR0); *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2); - outl(data32, host->base + ORC_FWBASEADR); /* Write FW address */ + outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */ /* Copy the code from the BIOS to the SRAM */ - bios_addr = (u16) data32; /* FW code locate at BIOS address + ? */ + udelay(500); /* Required on Sun Ultra 5 ... 350 -> failures */ + bios_addr = (u16) le32_to_cpu(data32); /* FW code locate at BIOS address + ? */ for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */ i < 0x1000; /* Firmware code size = 4K */ i++, bios_addr++) { outw(bios_addr, host->base + ORC_EBIOSADR0); *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ if ((i % 4) == 3) { - outl(data32, host->base + ORC_RISCRAM); /* Write every 4 bytes */ + outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */ data32_ptr = (u8 *) & data32; } } @@ -423,7 +424,7 @@ static u8 orc_load_firmware(struct orc_host * host) outw(bios_addr, host->base + ORC_EBIOSADR0); *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */ if ((i % 4) == 3) { - if (inl(host->base + ORC_RISCRAM) != data32) { + if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) { outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */ outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */ return 0; @@ -459,8 +460,8 @@ static void setup_SCBs(struct orc_host * host) for (i = 0; i < ORC_MAXQUEUE; i++) { escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i)); - scb->sg_addr = (u32) escb_phys; - scb->sense_addr = (u32) escb_phys; + scb->sg_addr = cpu_to_le32((u32) escb_phys); + scb->sense_addr = cpu_to_le32((u32) escb_phys); scb->escb = escb; scb->scbidx = i; scb++; @@ -642,8 +643,8 @@ static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsig scb->link = 0xFF; scb->reserved0 = 0; scb->reserved1 = 0; - scb->xferlen = 0; - scb->sg_len = 0; + scb->xferlen = cpu_to_le32(0); + scb->sg_len = cpu_to_le32(0); escb->srb = NULL; escb->srb = cmd; @@ -839,7 +840,7 @@ static irqreturn_t orc_interrupt(struct orc_host * host) * Build a host adapter control block from the SCSI mid layer command */ -static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd) +static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd) { /* Create corresponding SCB */ struct scatterlist *sg; struct orc_sgent *sgent; /* Pointer to SG list */ @@ -858,28 +859,30 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru scb->lun = cmd->device->lun; scb->reserved0 = 0; scb->reserved1 = 0; - scb->sg_len = 0; + scb->sg_len = cpu_to_le32(0); - scb->xferlen = (u32) scsi_bufflen(cmd); + scb->xferlen = cpu_to_le32((u32) scsi_bufflen(cmd)); sgent = (struct orc_sgent *) & escb->sglist[0]; count_sg = scsi_dma_map(cmd); - BUG_ON(count_sg < 0); + if (count_sg < 0) + return count_sg; + BUG_ON(count_sg > TOTAL_SG_ENTRY); /* Build the scatter gather lists */ if (count_sg) { - scb->sg_len = (u32) (count_sg * 8); + scb->sg_len = cpu_to_le32((u32) (count_sg * 8)); scsi_for_each_sg(cmd, sg, count_sg, i) { - sgent->base = (u32) sg_dma_address(sg); - sgent->length = (u32) sg_dma_len(sg); + sgent->base = cpu_to_le32((u32) sg_dma_address(sg)); + sgent->length = cpu_to_le32((u32) sg_dma_len(sg)); sgent++; } } else { - scb->sg_len = 0; - sgent->base = 0; - sgent->length = 0; + scb->sg_len = cpu_to_le32(0); + sgent->base = cpu_to_le32(0); + sgent->length = cpu_to_le32(0); } - scb->sg_addr = (u32) scb->sense_addr; + scb->sg_addr = (u32) scb->sense_addr; /* sense_addr is already little endian */ scb->hastat = 0; scb->tastat = 0; scb->link = 0xFF; @@ -896,6 +899,7 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru scb->tag_msg = 0; /* No tag support */ } memcpy(scb->cdb, cmd->cmnd, scb->cdb_len); + return 0; } /** @@ -919,7 +923,10 @@ static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd if ((scb = orc_alloc_scb(host)) == NULL) return SCSI_MLQUEUE_HOST_BUSY; - inia100_build_scb(host, scb, cmd); + if (inia100_build_scb(host, scb, cmd)) { + orc_release_scb(host, scb); + return SCSI_MLQUEUE_HOST_BUSY; + } orc_exec_scb(host, scb); /* Start execute SCB */ return 0; } diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 5fd83deab36c..a7355260cfcf 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -41,6 +41,7 @@ #include <linux/kthread.h> #include <linux/semaphore.h> #include <asm/uaccess.h> +#include <scsi/scsi_host.h> #include "aacraid.h" @@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) for (i = 0; i < upsg->count; i++) { u64 addr; void* p; + if (upsg->sg[i].count > + (dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536) { + rcode = -EINVAL; + goto cleanup; + } /* Does this really need to be GFP_DMA? */ p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { @@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) for (i = 0; i < usg->count; i++) { u64 addr; void* p; + if (usg->sg[i].count > + (dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536) { + rcode = -EINVAL; + goto cleanup; + } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { @@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) for (i = 0; i < upsg->count; i++) { uintptr_t addr; void* p; + if (usg->sg[i].count > + (dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536) { + rcode = -EINVAL; + goto cleanup; + } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { @@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) for (i = 0; i < upsg->count; i++) { dma_addr_t addr; void* p; + if (upsg->sg[i].count > + (dev->adapter_info.options & + AAC_OPT_NEW_COMM) ? + (dev->scsi_host_ptr->max_sectors << 9) : + 65536) { + rcode = -EINVAL; + goto cleanup; + } p = kmalloc(upsg->sg[i].count, GFP_KERNEL); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 68c140e82673..9aa301c1ed07 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -865,7 +865,7 @@ static ssize_t aac_show_bios_version(struct device *device, return len; } -ssize_t aac_show_serial_number(struct device *device, +static ssize_t aac_show_serial_number(struct device *device, struct device_attribute *attr, char *buf) { struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata; diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig new file mode 100644 index 000000000000..2adc0f666b68 --- /dev/null +++ b/drivers/scsi/device_handler/Kconfig @@ -0,0 +1,32 @@ +# +# SCSI Device Handler configuration +# + +menuconfig SCSI_DH + tristate "SCSI Device Handlers" + depends on SCSI + default n + help + SCSI Device Handlers provide device specific support for + devices utilized in multipath configurations. Say Y here to + select support for specific hardware. + +config SCSI_DH_RDAC + tristate "LSI RDAC Device Handler" + depends on SCSI_DH + help + If you have a LSI RDAC select y. Otherwise, say N. + +config SCSI_DH_HP_SW + tristate "HP/COMPAQ MSA Device Handler" + depends on SCSI_DH + help + If you have a HP/COMPAQ MSA device that requires START_STOP to + be sent to start it and cannot upgrade the firmware then select y. + Otherwise, say N. + +config SCSI_DH_EMC + tristate "EMC CLARiiON Device Handler" + depends on SCSI_DH + help + If you have a EMC CLARiiON select y. Otherwise, say N. diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile new file mode 100644 index 000000000000..35272e93b1c8 --- /dev/null +++ b/drivers/scsi/device_handler/Makefile @@ -0,0 +1,7 @@ +# +# SCSI Device Handler +# +obj-$(CONFIG_SCSI_DH) += scsi_dh.o +obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o +obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o +obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c new file mode 100644 index 000000000000..ab6c21cd9689 --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -0,0 +1,162 @@ +/* + * SCSI device handler infrastruture. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2007 + * Authors: + * Chandra Seetharaman <sekharan@us.ibm.com> + * Mike Anderson <andmike@linux.vnet.ibm.com> + */ + +#include <scsi/scsi_dh.h> +#include "../scsi_priv.h" + +static DEFINE_SPINLOCK(list_lock); +static LIST_HEAD(scsi_dh_list); + +static struct scsi_device_handler *get_device_handler(const char *name) +{ + struct scsi_device_handler *tmp, *found = NULL; + + spin_lock(&list_lock); + list_for_each_entry(tmp, &scsi_dh_list, list) { + if (!strcmp(tmp->name, name)) { + found = tmp; + break; + } + } + spin_unlock(&list_lock); + return found; +} + +static int scsi_dh_notifier_add(struct device *dev, void *data) +{ + struct scsi_device_handler *scsi_dh = data; + + scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev); + return 0; +} + +/* + * scsi_register_device_handler - register a device handler personality + * module. + * @scsi_dh - device handler to be registered. + * + * Returns 0 on success, -EBUSY if handler already registered. + */ +int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) +{ + int ret = -EBUSY; + struct scsi_device_handler *tmp; + + tmp = get_device_handler(scsi_dh->name); + if (tmp) + goto done; + + ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb); + + bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add); + spin_lock(&list_lock); + list_add(&scsi_dh->list, &scsi_dh_list); + spin_unlock(&list_lock); + +done: + return ret; +} +EXPORT_SYMBOL_GPL(scsi_register_device_handler); + +static int scsi_dh_notifier_remove(struct device *dev, void *data) +{ + struct scsi_device_handler *scsi_dh = data; + + scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev); + return 0; +} + +/* + * scsi_unregister_device_handler - register a device handler personality + * module. + * @scsi_dh - device handler to be unregistered. + * + * Returns 0 on success, -ENODEV if handler not registered. + */ +int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) +{ + int ret = -ENODEV; + struct scsi_device_handler *tmp; + + tmp = get_device_handler(scsi_dh->name); + if (!tmp) + goto done; + + ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb); + + bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, + scsi_dh_notifier_remove); + spin_lock(&list_lock); + list_del(&scsi_dh->list); + spin_unlock(&list_lock); + +done: + return ret; +} +EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); + +/* + * scsi_dh_activate - activate the path associated with the scsi_device + * corresponding to the given request queue. + * @q - Request queue that is associated with the scsi_device to be + * activated. + */ +int scsi_dh_activate(struct request_queue *q) +{ + int err = 0; + unsigned long flags; + struct scsi_device *sdev; + struct scsi_device_handler *scsi_dh = NULL; + + spin_lock_irqsave(q->queue_lock, flags); + sdev = q->queuedata; + if (sdev && sdev->scsi_dh_data) + scsi_dh = sdev->scsi_dh_data->scsi_dh; + if (!scsi_dh || !get_device(&sdev->sdev_gendev)) + err = SCSI_DH_NOSYS; + spin_unlock_irqrestore(q->queue_lock, flags); + + if (err) + return err; + + if (scsi_dh->activate) + err = scsi_dh->activate(sdev); + put_device(&sdev->sdev_gendev); + return err; +} +EXPORT_SYMBOL_GPL(scsi_dh_activate); + +/* + * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for + * the given name. FALSE(0) otherwise. + * @name - name of the device handler. + */ +int scsi_dh_handler_exist(const char *name) +{ + return (get_device_handler(name) != NULL); +} +EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); + +MODULE_DESCRIPTION("SCSI device handler"); +MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c new file mode 100644 index 000000000000..f2467e936e55 --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -0,0 +1,504 @@ +/* + * Target driver for EMC CLARiiON AX/CX-series hardware. + * Based on code from Lars Marowsky-Bree <lmb@suse.de> + * and Ed Goggin <egoggin@emc.com>. + * + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <scsi/scsi.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_dh.h> +#include <scsi/scsi_device.h> + +#define CLARIION_NAME "emc_clariion" + +#define CLARIION_TRESPASS_PAGE 0x22 +#define CLARIION_BUFFER_SIZE 0x80 +#define CLARIION_TIMEOUT (60 * HZ) +#define CLARIION_RETRIES 3 +#define CLARIION_UNBOUND_LU -1 + +static unsigned char long_trespass[] = { + 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x09, /* Page length - 2 */ + 0x81, /* Trespass code + Honor reservation bit */ + 0xff, 0xff, /* Trespass target */ + 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ +}; + +static unsigned char long_trespass_hr[] = { + 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x09, /* Page length - 2 */ + 0x01, /* Trespass code + Honor reservation bit */ + 0xff, 0xff, /* Trespass target */ + 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ +}; + +static unsigned char short_trespass[] = { + 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x02, /* Page length - 2 */ + 0x81, /* Trespass code + Honor reservation bit */ + 0xff, /* Trespass target */ +}; + +static unsigned char short_trespass_hr[] = { + 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x02, /* Page length - 2 */ + 0x01, /* Trespass code + Honor reservation bit */ + 0xff, /* Trespass target */ +}; + +struct clariion_dh_data { + /* + * Use short trespass command (FC-series) or the long version + * (default for AX/CX CLARiiON arrays). + */ + unsigned short_trespass; + /* + * Whether or not (default) to honor SCSI reservations when + * initiating a switch-over. + */ + unsigned hr; + /* I/O buffer for both MODE_SELECT and INQUIRY commands. */ + char buffer[CLARIION_BUFFER_SIZE]; + /* + * SCSI sense buffer for commands -- assumes serial issuance + * and completion sequence of all commands for same multipath. + */ + unsigned char sense[SCSI_SENSE_BUFFERSIZE]; + /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */ + int default_sp; + /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */ + int current_sp; +}; + +static inline struct clariion_dh_data + *get_clariion_data(struct scsi_device *sdev) +{ + struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; + BUG_ON(scsi_dh_data == NULL); + return ((struct clariion_dh_data *) scsi_dh_data->buf); +} + +/* + * Parse MODE_SELECT cmd reply. + */ +static int trespass_endio(struct scsi_device *sdev, int result) +{ + int err = SCSI_DH_OK; + struct scsi_sense_hdr sshdr; + struct clariion_dh_data *csdev = get_clariion_data(sdev); + char *sense = csdev->sense; + + if (status_byte(result) == CHECK_CONDITION && + scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { + sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, " + "0x%2x, 0x%2x while sending CLARiiON trespass " + "command.\n", sshdr.sense_key, sshdr.asc, + sshdr.ascq); + + if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) && + (sshdr.ascq == 0x00)) { + /* + * Array based copy in progress -- do not send + * mode_select or copy will be aborted mid-stream. + */ + sdev_printk(KERN_INFO, sdev, "Array Based Copy in " + "progress while sending CLARiiON trespass " + "command.\n"); + err = SCSI_DH_DEV_TEMP_BUSY; + } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) && + (sshdr.ascq == 0x03)) { + /* + * LUN Not Ready - Manual Intervention Required + * indicates in-progress ucode upgrade (NDU). + */ + sdev_printk(KERN_INFO, sdev, "Detected in-progress " + "ucode upgrade NDU operation while sending " + "CLARiiON trespass command.\n"); + err = SCSI_DH_DEV_TEMP_BUSY; + } else + err = SCSI_DH_DEV_FAILED; + } else if (result) { + sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending " + "CLARiiON trespass command.\n", result); + err = SCSI_DH_IO; + } + + return err; +} + +static int parse_sp_info_reply(struct scsi_device *sdev, int result, + int *default_sp, int *current_sp, int *new_current_sp) +{ + int err = SCSI_DH_OK; + struct clariion_dh_data *csdev = get_clariion_data(sdev); + + if (result == 0) { + /* check for in-progress ucode upgrade (NDU) */ + if (csdev->buffer[48] != 0) { + sdev_printk(KERN_NOTICE, sdev, "Detected in-progress " + "ucode upgrade NDU operation while finding " + "current active SP."); + err = SCSI_DH_DEV_TEMP_BUSY; + } else { + *default_sp = csdev->buffer[5]; + + if (csdev->buffer[4] == 2) + /* SP for path is current */ + *current_sp = csdev->buffer[8]; + else { + if (csdev->buffer[4] == 1) + /* SP for this path is NOT current */ + if (csdev->buffer[8] == 0) + *current_sp = 1; + else + *current_sp = 0; + else + /* unbound LU or LUNZ */ + *current_sp = CLARIION_UNBOUND_LU; + } + *new_current_sp = csdev->buffer[8]; + } + } else { + struct scsi_sense_hdr sshdr; + + err = SCSI_DH_IO; + + if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, + &sshdr)) + sdev_printk(KERN_ERR, sdev, "Found valid sense data " + "0x%2x, 0x%2x, 0x%2x while finding current " + "active SP.", sshdr.sense_key, sshdr.asc, + sshdr.ascq); + else + sdev_printk(KERN_ERR, sdev, "Error 0x%x finding " + "current active SP.", result); + } + + return err; +} + +static int sp_info_endio(struct scsi_device *sdev, int result, + int mode_select_sent, int *done) +{ + struct clariion_dh_data *csdev = get_clariion_data(sdev); + int err_flags, default_sp, current_sp, new_current_sp; + + err_flags = parse_sp_info_reply(sdev, result, &default_sp, + ¤t_sp, &new_current_sp); + + if (err_flags != SCSI_DH_OK) + goto done; + + if (mode_select_sent) { + csdev->default_sp = default_sp; + csdev->current_sp = current_sp; + } else { + /* + * Issue the actual module_selec request IFF either + * (1) we do not know the identity of the current SP OR + * (2) what we think we know is actually correct. + */ + if ((current_sp != CLARIION_UNBOUND_LU) && + (new_current_sp != current_sp)) { + + csdev->default_sp = default_sp; + csdev->current_sp = current_sp; + + sdev_printk(KERN_INFO, sdev, "Ignoring path group " + "switch-over command for CLARiiON SP%s since " + " mapped device is already initialized.", + current_sp ? "B" : "A"); + if (done) + *done = 1; /* as good as doing it */ + } + } +done: + return err_flags; +} + +/* +* Get block request for REQ_BLOCK_PC command issued to path. Currently +* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. +* +* Uses data and sense buffers in hardware handler context structure and +* assumes serial servicing of commands, both issuance and completion. +*/ +static struct request *get_req(struct scsi_device *sdev, int cmd) +{ + struct clariion_dh_data *csdev = get_clariion_data(sdev); + struct request *rq; + unsigned char *page22; + int len = 0; + + rq = blk_get_request(sdev->request_queue, + (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC); + if (!rq) { + sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); + return NULL; + } + + memset(&rq->cmd, 0, BLK_MAX_CDB); + rq->cmd[0] = cmd; + rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); + + switch (cmd) { + case MODE_SELECT: + if (csdev->short_trespass) { + page22 = csdev->hr ? short_trespass_hr : short_trespass; + len = sizeof(short_trespass); + } else { + page22 = csdev->hr ? long_trespass_hr : long_trespass; + len = sizeof(long_trespass); + } + /* + * Can't DMA from kernel BSS -- must copy selected trespass + * command mode page contents to context buffer which is + * allocated by kmalloc. + */ + BUG_ON((len > CLARIION_BUFFER_SIZE)); + memcpy(csdev->buffer, page22, len); + rq->cmd_flags |= REQ_RW; + rq->cmd[1] = 0x10; + break; + case INQUIRY: + rq->cmd[1] = 0x1; + rq->cmd[2] = 0xC0; + len = CLARIION_BUFFER_SIZE; + memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE); + break; + default: + BUG_ON(1); + break; + } + + rq->cmd[4] = len; + rq->cmd_type = REQ_TYPE_BLOCK_PC; + rq->cmd_flags |= REQ_FAILFAST; + rq->timeout = CLARIION_TIMEOUT; + rq->retries = CLARIION_RETRIES; + + rq->sense = csdev->sense; + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); + rq->sense_len = 0; + + if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer, + len, GFP_ATOMIC)) { + __blk_put_request(rq->q, rq); + return NULL; + } + + return rq; +} + +static int send_cmd(struct scsi_device *sdev, int cmd) +{ + struct request *rq = get_req(sdev, cmd); + + if (!rq) + return SCSI_DH_RES_TEMP_UNAVAIL; + + return blk_execute_rq(sdev->request_queue, NULL, rq, 1); +} + +static int clariion_activate(struct scsi_device *sdev) +{ + int result, done = 0; + + result = send_cmd(sdev, INQUIRY); + result = sp_info_endio(sdev, result, 0, &done); + if (result || done) + goto done; + + result = send_cmd(sdev, MODE_SELECT); + result = trespass_endio(sdev, result); + if (result) + goto done; + + result = send_cmd(sdev, INQUIRY); + result = sp_info_endio(sdev, result, 1, NULL); +done: + return result; +} + +static int clariion_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03) + /* + * LUN Not Ready - Manual Intervention Required + * indicates this is a passive path. + * + * FIXME: However, if this is seen and EVPD C0 + * indicates that this is due to a NDU in + * progress, we should set FAIL_PATH too. + * This indicates we might have to do a SCSI + * inquiry in the end_io path. Ugh. + * + * Can return FAILED only when we want the error + * recovery process to kick in. + */ + return SUCCESS; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01) + /* + * An array based copy is in progress. Do not + * fail the path, do not bypass to another PG, + * do not retry. Fail the IO immediately. + * (Actually this is the same conclusion as in + * the default handler, but lets make sure.) + * + * Can return FAILED only when we want the error + * recovery process to kick in. + */ + return SUCCESS; + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) + /* + * Unit Attention Code. This is the first IO + * to the new path, so just retry. + */ + return NEEDS_RETRY; + break; + } + + /* success just means we do not care what scsi-ml does */ + return SUCCESS; +} + +static const struct { + char *vendor; + char *model; +} clariion_dev_list[] = { + {"DGC", "RAID"}, + {"DGC", "DISK"}, + {NULL, NULL}, +}; + +static int clariion_bus_notify(struct notifier_block *, unsigned long, void *); + +static struct scsi_device_handler clariion_dh = { + .name = CLARIION_NAME, + .module = THIS_MODULE, + .nb.notifier_call = clariion_bus_notify, + .check_sense = clariion_check_sense, + .activate = clariion_activate, +}; + +/* + * TODO: need some interface so we can set trespass values + */ +static int clariion_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct scsi_device *sdev; + struct scsi_dh_data *scsi_dh_data; + struct clariion_dh_data *h; + int i, found = 0; + unsigned long flags; + + if (!scsi_is_sdev_device(dev)) + return 0; + + sdev = to_scsi_device(dev); + + if (action == BUS_NOTIFY_ADD_DEVICE) { + for (i = 0; clariion_dev_list[i].vendor; i++) { + if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, + strlen(clariion_dev_list[i].vendor)) && + !strncmp(sdev->model, clariion_dev_list[i].model, + strlen(clariion_dev_list[i].model))) { + found = 1; + break; + } + } + if (!found) + goto out; + + scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + + sizeof(*h) , GFP_KERNEL); + if (!scsi_dh_data) { + sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n", + CLARIION_NAME); + goto out; + } + + scsi_dh_data->scsi_dh = &clariion_dh; + h = (struct clariion_dh_data *) scsi_dh_data->buf; + h->default_sp = CLARIION_UNBOUND_LU; + h->current_sp = CLARIION_UNBOUND_LU; + + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + sdev->scsi_dh_data = scsi_dh_data; + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); + + sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME); + try_module_get(THIS_MODULE); + + } else if (action == BUS_NOTIFY_DEL_DEVICE) { + if (sdev->scsi_dh_data == NULL || + sdev->scsi_dh_data->scsi_dh != &clariion_dh) + goto out; + + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + scsi_dh_data = sdev->scsi_dh_data; + sdev->scsi_dh_data = NULL; + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); + + sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", + CLARIION_NAME); + + kfree(scsi_dh_data); + module_put(THIS_MODULE); + } + +out: + return 0; +} + +static int __init clariion_init(void) +{ + int r; + + r = scsi_register_device_handler(&clariion_dh); + if (r != 0) + printk(KERN_ERR "Failed to register scsi device handler."); + return r; +} + +static void __exit clariion_exit(void) +{ + scsi_unregister_device_handler(&clariion_dh); +} + +module_init(clariion_init); +module_exit(clariion_exit); + +MODULE_DESCRIPTION("EMC CX/AX/FC-family driver"); +MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c new file mode 100644 index 000000000000..ae6be87d6a83 --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -0,0 +1,207 @@ +/* + * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be + * upgraded. + * + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <scsi/scsi.h> +#include <scsi/scsi_dbg.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_dh.h> + +#define HP_SW_NAME "hp_sw" + +#define HP_SW_TIMEOUT (60 * HZ) +#define HP_SW_RETRIES 3 + +struct hp_sw_dh_data { + unsigned char sense[SCSI_SENSE_BUFFERSIZE]; + int retries; +}; + +static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) +{ + struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; + BUG_ON(scsi_dh_data == NULL); + return ((struct hp_sw_dh_data *) scsi_dh_data->buf); +} + +static int hp_sw_done(struct scsi_device *sdev) +{ + struct hp_sw_dh_data *h = get_hp_sw_data(sdev); + struct scsi_sense_hdr sshdr; + int rc; + + sdev_printk(KERN_INFO, sdev, "hp_sw_done\n"); + + rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); + if (!rc) + goto done; + switch (sshdr.sense_key) { + case NOT_READY: + if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) { + rc = SCSI_DH_RETRY; + h->retries++; + break; + } + /* fall through */ + default: + h->retries++; + rc = SCSI_DH_IMM_RETRY; + } + +done: + if (rc == SCSI_DH_OK || rc == SCSI_DH_IO) + h->retries = 0; + else if (h->retries > HP_SW_RETRIES) { + h->retries = 0; + rc = SCSI_DH_IO; + } + return rc; +} + +static int hp_sw_activate(struct scsi_device *sdev) +{ + struct hp_sw_dh_data *h = get_hp_sw_data(sdev); + struct request *req; + int ret = SCSI_DH_RES_TEMP_UNAVAIL; + + req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC); + if (!req) + goto done; + + sdev_printk(KERN_INFO, sdev, "sending START_STOP."); + + req->cmd_type = REQ_TYPE_BLOCK_PC; + req->cmd_flags |= REQ_FAILFAST; + req->cmd_len = COMMAND_SIZE(START_STOP); + memset(req->cmd, 0, MAX_COMMAND_SIZE); + req->cmd[0] = START_STOP; + req->cmd[4] = 1; /* Start spin cycle */ + req->timeout = HP_SW_TIMEOUT; + req->sense = h->sense; + memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); + req->sense_len = 0; + + ret = blk_execute_rq(req->q, NULL, req, 1); + if (!ret) /* SUCCESS */ + ret = hp_sw_done(sdev); + else + ret = SCSI_DH_IO; +done: + return ret; +} + +static const struct { + char *vendor; + char *model; +} hp_sw_dh_data_list[] = { + {"COMPAQ", "MSA"}, + {"HP", "HSV"}, + {"DEC", "HSG80"}, + {NULL, NULL}, +}; + +static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *); + +static struct scsi_device_handler hp_sw_dh = { + .name = HP_SW_NAME, + .module = THIS_MODULE, + .nb.notifier_call = hp_sw_bus_notify, + .activate = hp_sw_activate, +}; + +static int hp_sw_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct scsi_device *sdev; + struct scsi_dh_data *scsi_dh_data; + int i, found = 0; + unsigned long flags; + + if (!scsi_is_sdev_device(dev)) + return 0; + + sdev = to_scsi_device(dev); + + if (action == BUS_NOTIFY_ADD_DEVICE) { + for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { + if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor, + strlen(hp_sw_dh_data_list[i].vendor)) && + !strncmp(sdev->model, hp_sw_dh_data_list[i].model, + strlen(hp_sw_dh_data_list[i].model))) { + found = 1; + break; + } + } + if (!found) + goto out; + + scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + + sizeof(struct hp_sw_dh_data) , GFP_KERNEL); + if (!scsi_dh_data) { + sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n", + HP_SW_NAME); + goto out; + } + + scsi_dh_data->scsi_dh = &hp_sw_dh; + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + sdev->scsi_dh_data = scsi_dh_data; + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); + try_module_get(THIS_MODULE); + + sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME); + } else if (action == BUS_NOTIFY_DEL_DEVICE) { + if (sdev->scsi_dh_data == NULL || + sdev->scsi_dh_data->scsi_dh != &hp_sw_dh) + goto out; + + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + scsi_dh_data = sdev->scsi_dh_data; + sdev->scsi_dh_data = NULL; + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); + module_put(THIS_MODULE); + + sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME); + + kfree(scsi_dh_data); + } + +out: + return 0; +} + +static int __init hp_sw_init(void) +{ + return scsi_register_device_handler(&hp_sw_dh); +} + +static void __exit hp_sw_exit(void) +{ + scsi_unregister_device_handler(&hp_sw_dh); +} + +module_init(hp_sw_init); +module_exit(hp_sw_exit); + +MODULE_DESCRIPTION("HP MSA 1000"); +MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c new file mode 100644 index 000000000000..fdf34b0ec6e1 --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -0,0 +1,696 @@ +/* + * Engenio/LSI RDAC SCSI Device Handler + * + * Copyright (C) 2005 Mike Christie. All rights reserved. + * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include <scsi/scsi.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_dh.h> + +#define RDAC_NAME "rdac" + +/* + * LSI mode page stuff + * + * These struct definitions and the forming of the + * mode page were taken from the LSI RDAC 2.4 GPL'd + * driver, and then converted to Linux conventions. + */ +#define RDAC_QUIESCENCE_TIME 20; +/* + * Page Codes + */ +#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c + +/* + * Controller modes definitions + */ +#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 + +/* + * RDAC Options field + */ +#define RDAC_FORCED_QUIESENCE 0x02 + +#define RDAC_TIMEOUT (60 * HZ) +#define RDAC_RETRIES 3 + +struct rdac_mode_6_hdr { + u8 data_len; + u8 medium_type; + u8 device_params; + u8 block_desc_len; +}; + +struct rdac_mode_10_hdr { + u16 data_len; + u8 medium_type; + u8 device_params; + u16 reserved; + u16 block_desc_len; +}; + +struct rdac_mode_common { + u8 controller_serial[16]; + u8 alt_controller_serial[16]; + u8 rdac_mode[2]; + u8 alt_rdac_mode[2]; + u8 quiescence_timeout; + u8 rdac_options; +}; + +struct rdac_pg_legacy { + struct rdac_mode_6_hdr hdr; + u8 page_code; + u8 page_len; + struct rdac_mode_common common; +#define MODE6_MAX_LUN 32 + u8 lun_table[MODE6_MAX_LUN]; + u8 reserved2[32]; + u8 reserved3; + u8 reserved4; +}; + +struct rdac_pg_expanded { + struct rdac_mode_10_hdr hdr; + u8 page_code; + u8 subpage_code; + u8 page_len[2]; + struct rdac_mode_common common; + u8 lun_table[256]; + u8 reserved3; + u8 reserved4; +}; + +struct c9_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC9 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "vace" */ + u8 avte_cvp; + u8 path_prio; + u8 reserved2[38]; +}; + +#define SUBSYS_ID_LEN 16 +#define SLOT_ID_LEN 2 + +struct c4_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC4 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "subs" */ + u8 subsys_id[SUBSYS_ID_LEN]; + u8 revision[4]; + u8 slot_id[SLOT_ID_LEN]; + u8 reserved[2]; +}; + +struct rdac_controller { + u8 subsys_id[SUBSYS_ID_LEN]; + u8 slot_id[SLOT_ID_LEN]; + int use_ms10; + struct kref kref; + struct list_head node; /* list of all controllers */ + union { + struct rdac_pg_legacy legacy; + struct rdac_pg_expanded expanded; + } mode_select; +}; +struct c8_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC8 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "edid" */ + u8 reserved2[3]; + u8 vol_uniq_id_len; + u8 vol_uniq_id[16]; + u8 vol_user_label_len; + u8 vol_user_label[60]; + u8 array_uniq_id_len; + u8 array_unique_id[16]; + u8 array_user_label_len; + u8 array_user_label[60]; + u8 lun[8]; +}; + +struct c2_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC2 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "swr4" */ + u8 sw_version[3]; + u8 sw_date[3]; + u8 features_enabled; + u8 max_lun_supported; + u8 partitions[239]; /* Total allocation length should be 0xFF */ +}; + +struct rdac_dh_data { + struct rdac_controller *ctlr; +#define UNINITIALIZED_LUN (1 << 8) + unsigned lun; +#define RDAC_STATE_ACTIVE 0 +#define RDAC_STATE_PASSIVE 1 + unsigned char state; + unsigned char sense[SCSI_SENSE_BUFFERSIZE]; + union { + struct c2_inquiry c2; + struct c4_inquiry c4; + struct c8_inquiry c8; + struct c9_inquiry c9; + } inq; +}; + +static LIST_HEAD(ctlr_list); +static DEFINE_SPINLOCK(list_lock); + +static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) +{ + struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; + BUG_ON(scsi_dh_data == NULL); + return ((struct rdac_dh_data *) scsi_dh_data->buf); +} + +static struct request *get_rdac_req(struct scsi_device *sdev, + void *buffer, unsigned buflen, int rw) +{ + struct request *rq; + struct request_queue *q = sdev->request_queue; + struct rdac_dh_data *h = get_rdac_data(sdev); + + rq = blk_get_request(q, rw, GFP_KERNEL); + + if (!rq) { + sdev_printk(KERN_INFO, sdev, + "get_rdac_req: blk_get_request failed.\n"); + return NULL; + } + + if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { + blk_put_request(rq); + sdev_printk(KERN_INFO, sdev, + "get_rdac_req: blk_rq_map_kern failed.\n"); + return NULL; + } + + memset(&rq->cmd, 0, BLK_MAX_CDB); + rq->sense = h->sense; + memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); + rq->sense_len = 0; + + rq->cmd_type = REQ_TYPE_BLOCK_PC; + rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; + rq->retries = RDAC_RETRIES; + rq->timeout = RDAC_TIMEOUT; + + return rq; +} + +static struct request *rdac_failover_get(struct scsi_device *sdev) +{ + struct request *rq; + struct rdac_mode_common *common; + unsigned data_size; + struct rdac_dh_data *h = get_rdac_data(sdev); + + if (h->ctlr->use_ms10) { + struct rdac_pg_expanded *rdac_pg; + + data_size = sizeof(struct rdac_pg_expanded); + rdac_pg = &h->ctlr->mode_select.expanded; + memset(rdac_pg, 0, data_size); + common = &rdac_pg->common; + rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; + rdac_pg->subpage_code = 0x1; + rdac_pg->page_len[0] = 0x01; + rdac_pg->page_len[1] = 0x28; + rdac_pg->lun_table[h->lun] = 0x81; + } else { + struct rdac_pg_legacy *rdac_pg; + + data_size = sizeof(struct rdac_pg_legacy); + rdac_pg = &h->ctlr->mode_select.legacy; + memset(rdac_pg, 0, data_size); + common = &rdac_pg->common; + rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; + rdac_pg->page_len = 0x68; + rdac_pg->lun_table[h->lun] = 0x81; + } + common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; + common->quiescence_timeout = RDAC_QUIESCENCE_TIME; + common->rdac_options = RDAC_FORCED_QUIESENCE; + + /* get request for block layer packet command */ + rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE); + if (!rq) + return NULL; + + /* Prepare the command. */ + if (h->ctlr->use_ms10) { + rq->cmd[0] = MODE_SELECT_10; + rq->cmd[7] = data_size >> 8; + rq->cmd[8] = data_size & 0xff; + } else { + rq->cmd[0] = MODE_SELECT; + rq->cmd[4] = data_size; + } + rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); + + return rq; +} + +static void release_controller(struct kref *kref) +{ + struct rdac_controller *ctlr; + ctlr = container_of(kref, struct rdac_controller, kref); + + spin_lock(&list_lock); + list_del(&ctlr->node); + spin_unlock(&list_lock); + kfree(ctlr); +} + +static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) +{ + struct rdac_controller *ctlr, *tmp; + + spin_lock(&list_lock); + + list_for_each_entry(tmp, &ctlr_list, node) { + if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && + (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { + kref_get(&tmp->kref); + spin_unlock(&list_lock); + return tmp; + } + } + ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); + if (!ctlr) + goto done; + + /* initialize fields of controller */ + memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); + memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); + kref_init(&ctlr->kref); + ctlr->use_ms10 = -1; + list_add(&ctlr->node, &ctlr_list); +done: + spin_unlock(&list_lock); + return ctlr; +} + +static int submit_inquiry(struct scsi_device *sdev, int page_code, + unsigned int len) +{ + struct request *rq; + struct request_queue *q = sdev->request_queue; + struct rdac_dh_data *h = get_rdac_data(sdev); + int err = SCSI_DH_RES_TEMP_UNAVAIL; + + rq = get_rdac_req(sdev, &h->inq, len, READ); + if (!rq) + goto done; + + /* Prepare the command. */ + rq->cmd[0] = INQUIRY; + rq->cmd[1] = 1; + rq->cmd[2] = page_code; + rq->cmd[4] = len; + rq->cmd_len = COMMAND_SIZE(INQUIRY); + err = blk_execute_rq(q, NULL, rq, 1); + if (err == -EIO) + err = SCSI_DH_IO; +done: + return err; +} + +static int get_lun(struct scsi_device *sdev) +{ + int err; + struct c8_inquiry *inqp; + struct rdac_dh_data *h = get_rdac_data(sdev); + + err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry)); + if (err == SCSI_DH_OK) { + inqp = &h->inq.c8; + h->lun = inqp->lun[7]; /* currently it uses only one byte */ + } + return err; +} + +#define RDAC_OWNED 0 +#define RDAC_UNOWNED 1 +#define RDAC_FAILED 2 +static int check_ownership(struct scsi_device *sdev) +{ + int err; + struct c9_inquiry *inqp; + struct rdac_dh_data *h = get_rdac_data(sdev); + + err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry)); + if (err == SCSI_DH_OK) { + err = RDAC_UNOWNED; + inqp = &h->inq.c9; + /* + * If in AVT mode or if the path already owns the LUN, + * return RDAC_OWNED; + */ + if (((inqp->avte_cvp >> 7) == 0x1) || + ((inqp->avte_cvp & 0x1) != 0)) + err = RDAC_OWNED; + } else + err = RDAC_FAILED; + return err; +} + +static int initialize_controller(struct scsi_device *sdev) +{ + int err; + struct c4_inquiry *inqp; + struct rdac_dh_data *h = get_rdac_data(sdev); + + err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry)); + if (err == SCSI_DH_OK) { + inqp = &h->inq.c4; + h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id); + if (!h->ctlr) + err = SCSI_DH_RES_TEMP_UNAVAIL; + } + return err; +} + +static int set_mode_select(struct scsi_device *sdev) +{ + int err; + struct c2_inquiry *inqp; + struct rdac_dh_data *h = get_rdac_data(sdev); + + err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry)); + if (err == SCSI_DH_OK) { + inqp = &h->inq.c2; + /* + * If more than MODE6_MAX_LUN luns are supported, use + * mode select 10 + */ + if (inqp->max_lun_supported >= MODE6_MAX_LUN) + h->ctlr->use_ms10 = 1; + else + h->ctlr->use_ms10 = 0; + } + return err; +} + +static int mode_select_handle_sense(struct scsi_device *sdev) +{ + struct scsi_sense_hdr sense_hdr; + struct rdac_dh_data *h = get_rdac_data(sdev); + int sense, err = SCSI_DH_IO, ret; + + ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); + if (!ret) + goto done; + + err = SCSI_DH_OK; + sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) | + sense_hdr.ascq; + /* If it is retryable failure, submit the c9 inquiry again */ + if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 || + sense == 0x62900) { + /* 0x59136 - Command lock contention + * 0x[6b]8b02 - Quiesense in progress or achieved + * 0x62900 - Power On, Reset, or Bus Device Reset + */ + err = SCSI_DH_RETRY; + } + + if (sense) + sdev_printk(KERN_INFO, sdev, + "MODE_SELECT failed with sense 0x%x.\n", sense); +done: + return err; +} + +static int send_mode_select(struct scsi_device *sdev) +{ + struct request *rq; + struct request_queue *q = sdev->request_queue; + struct rdac_dh_data *h = get_rdac_data(sdev); + int err = SCSI_DH_RES_TEMP_UNAVAIL; + + rq = rdac_failover_get(sdev); + if (!rq) + goto done; + + sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n"); + + err = blk_execute_rq(q, NULL, rq, 1); + if (err != SCSI_DH_OK) + err = mode_select_handle_sense(sdev); + if (err == SCSI_DH_OK) + h->state = RDAC_STATE_ACTIVE; +done: + return err; +} + +static int rdac_activate(struct scsi_device *sdev) +{ + struct rdac_dh_data *h = get_rdac_data(sdev); + int err = SCSI_DH_OK; + + if (h->lun == UNINITIALIZED_LUN) { + err = get_lun(sdev); + if (err != SCSI_DH_OK) + goto done; + } + + err = check_ownership(sdev); + switch (err) { + case RDAC_UNOWNED: + break; + case RDAC_OWNED: + err = SCSI_DH_OK; + goto done; + case RDAC_FAILED: + default: + err = SCSI_DH_IO; + goto done; + } + + if (!h->ctlr) { + err = initialize_controller(sdev); + if (err != SCSI_DH_OK) + goto done; + } + + if (h->ctlr->use_ms10 == -1) { + err = set_mode_select(sdev); + if (err != SCSI_DH_OK) + goto done; + } + + err = send_mode_select(sdev); +done: + return err; +} + +static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) +{ + struct rdac_dh_data *h = get_rdac_data(sdev); + int ret = BLKPREP_OK; + + if (h->state != RDAC_STATE_ACTIVE) { + ret = BLKPREP_KILL; + req->cmd_flags |= REQ_QUIET; + } + return ret; + +} + +static int rdac_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + struct rdac_dh_data *h = get_rdac_data(sdev); + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) + /* LUN Not Ready - Storage firmware incompatible + * Manual code synchonisation required. + * + * Nothing we can do here. Try to bypass the path. + */ + return SUCCESS; + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1) + /* LUN Not Ready - Quiescense in progress + * + * Just retry and wait. + */ + return NEEDS_RETRY; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { + /* Invalid Request - Current Logical Unit Ownership. + * Controller is not the current owner of the LUN, + * Fail the path, so that the other path be used. + */ + h->state = RDAC_STATE_PASSIVE; + return SUCCESS; + } + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) + /* + * Power On, Reset, or Bus Device Reset, just retry. + */ + return NEEDS_RETRY; + break; + } + /* success just means we do not care what scsi-ml does */ + return SCSI_RETURN_NOT_HANDLED; +} + +static const struct { + char *vendor; + char *model; +} rdac_dev_list[] = { + {"IBM", "1722"}, + {"IBM", "1724"}, + {"IBM", "1726"}, + {"IBM", "1742"}, + {"IBM", "1814"}, + {"IBM", "1815"}, + {"IBM", "1818"}, + {"IBM", "3526"}, + {"SGI", "TP9400"}, + {"SGI", "TP9500"}, + {"SGI", "IS"}, + {"STK", "OPENstorage D280"}, + {"SUN", "CSM200_R"}, + {"SUN", "LCSM100_F"}, + {NULL, NULL}, +}; + +static int rdac_bus_notify(struct notifier_block *, unsigned long, void *); + +static struct scsi_device_handler rdac_dh = { + .name = RDAC_NAME, + .module = THIS_MODULE, + .nb.notifier_call = rdac_bus_notify, + .prep_fn = rdac_prep_fn, + .check_sense = rdac_check_sense, + .activate = rdac_activate, +}; + +/* + * TODO: need some interface so we can set trespass values + */ +static int rdac_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct scsi_device *sdev; + struct scsi_dh_data *scsi_dh_data; + struct rdac_dh_data *h; + int i, found = 0; + unsigned long flags; + + if (!scsi_is_sdev_device(dev)) + return 0; + + sdev = to_scsi_device(dev); + + if (action == BUS_NOTIFY_ADD_DEVICE) { + for (i = 0; rdac_dev_list[i].vendor; i++) { + if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor, + strlen(rdac_dev_list[i].vendor)) && + !strncmp(sdev->model, rdac_dev_list[i].model, + strlen(rdac_dev_list[i].model))) { + found = 1; + break; + } + } + if (!found) + goto out; + + scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + + sizeof(*h) , GFP_KERNEL); + if (!scsi_dh_data) { + sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n", + RDAC_NAME); + goto out; + } + + scsi_dh_data->scsi_dh = &rdac_dh; + h = (struct rdac_dh_data *) scsi_dh_data->buf; + h->lun = UNINITIALIZED_LUN; + h->state = RDAC_STATE_ACTIVE; + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + sdev->scsi_dh_data = scsi_dh_data; + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); + try_module_get(THIS_MODULE); + + sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME); + + } else if (action == BUS_NOTIFY_DEL_DEVICE) { + if (sdev->scsi_dh_data == NULL || + sdev->scsi_dh_data->scsi_dh != &rdac_dh) + goto out; + + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + scsi_dh_data = sdev->scsi_dh_data; + sdev->scsi_dh_data = NULL; + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); + + h = (struct rdac_dh_data *) scsi_dh_data->buf; + if (h->ctlr) + kref_put(&h->ctlr->kref, release_controller); + kfree(scsi_dh_data); + module_put(THIS_MODULE); + sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME); + } + +out: + return 0; +} + +static int __init rdac_init(void) +{ + int r; + + r = scsi_register_device_handler(&rdac_dh); + if (r != 0) + printk(KERN_ERR "Failed to register scsi device handler."); + return r; +} + +static void __exit rdac_exit(void) +{ + scsi_unregister_device_handler(&rdac_dh); +} + +module_init(rdac_init); +module_exit(rdac_exit); + +MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver"); +MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 59fbef08d690..62a4618530d0 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -219,19 +219,10 @@ static void esp_reset_esp(struct esp *esp) /* Now reset the ESP chip */ scsi_esp_cmd(esp, ESP_CMD_RC); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); + if (esp->rev == FAST) + esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); - /* Reload the configuration registers */ - esp_write8(esp->cfact, ESP_CFACT); - - esp->prev_stp = 0; - esp_write8(esp->prev_stp, ESP_STP); - - esp->prev_soff = 0; - esp_write8(esp->prev_soff, ESP_SOFF); - - esp_write8(esp->neg_defp, ESP_TIMEO); - /* This is the only point at which it is reliable to read * the ID-code for a fast ESP chip variants. */ @@ -316,6 +307,17 @@ static void esp_reset_esp(struct esp *esp) break; } + /* Reload the configuration registers */ + esp_write8(esp->cfact, ESP_CFACT); + + esp->prev_stp = 0; + esp_write8(esp->prev_stp, ESP_STP); + + esp->prev_soff = 0; + esp_write8(esp->prev_soff, ESP_SOFF); + + esp_write8(esp->neg_defp, ESP_TIMEO); + /* Eat any bitrot in the chip */ esp_read8(ESP_INTRPT); udelay(100); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index c6457bfc8a49..35cd892dce04 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -290,7 +290,7 @@ static void scsi_host_dev_release(struct device *dev) kfree(shost); } -struct device_type scsi_host_type = { +static struct device_type scsi_host_type = { .name = "scsi_host", .release = scsi_host_dev_release, }; diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile index 6ac0633d5452..a423d9633625 100644 --- a/drivers/scsi/ibmvscsi/Makefile +++ b/drivers/scsi/ibmvscsi/Makefile @@ -5,3 +5,4 @@ ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o +obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c new file mode 100644 index 000000000000..eb702b96d57c --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -0,0 +1,3910 @@ +/* + * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter + * + * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation + * + * Copyright (C) IBM Corporation, 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/kthread.h> +#include <linux/of.h> +#include <linux/stringify.h> +#include <asm/firmware.h> +#include <asm/irq.h> +#include <asm/vio.h> +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_transport_fc.h> +#include "ibmvfc.h" + +static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; +static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; +static unsigned int max_lun = IBMVFC_MAX_LUN; +static unsigned int max_targets = IBMVFC_MAX_TARGETS; +static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; +static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; +static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO; +static unsigned int ibmvfc_debug = IBMVFC_DEBUG; +static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; +static LIST_HEAD(ibmvfc_head); +static DEFINE_SPINLOCK(ibmvfc_driver_lock); +static struct scsi_transport_template *ibmvfc_transport_template; + +MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver"); +MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IBMVFC_DRIVER_VERSION); + +module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. " + "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]"); +module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(default_timeout, + "Default timeout in seconds for initialization and EH commands. " + "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]"); +module_param_named(max_requests, max_requests, uint, S_IRUGO); +MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " + "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); +module_param_named(max_lun, max_lun, uint, S_IRUGO); +MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. " + "[Default=" __stringify(IBMVFC_MAX_LUN) "]"); +module_param_named(max_targets, max_targets, uint, S_IRUGO); +MODULE_PARM_DESC(max_targets, "Maximum allowed targets. " + "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]"); +module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. " + "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]"); +module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Enable driver debug information. " + "[Default=" __stringify(IBMVFC_DEBUG) "]"); +module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC " + "transport should insulate the loss of a remote port. Once this " + "value is exceeded, the scsi target is removed. " + "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]"); +module_param_named(log_level, log_level, uint, 0); +MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " + "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); + +static const struct { + u16 status; + u16 error; + u8 result; + u8 retry; + int log; + char *name; +} cmd_status [] = { + { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, + + { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" }, + { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" }, + + { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" }, + { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" }, + { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" }, + { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" }, + { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" }, + { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" }, + { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" }, + { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" }, + { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" }, + { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" }, + { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, + + { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, +}; + +static void ibmvfc_npiv_login(struct ibmvfc_host *); +static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); +static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); +static void ibmvfc_tgt_query_target(struct ibmvfc_target *); + +static const char *unknown_error = "unknown error"; + +#ifdef CONFIG_SCSI_IBMVFC_TRACE +/** + * ibmvfc_trc_start - Log a start trace entry + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_trc_start(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd; + struct ibmvfc_mad_common *mad = &evt->iu.mad_common; + struct ibmvfc_trace_entry *entry; + + entry = &vhost->trace[vhost->trace_index++]; + entry->evt = evt; + entry->time = jiffies; + entry->fmt = evt->crq.format; + entry->type = IBMVFC_TRC_START; + + switch (entry->fmt) { + case IBMVFC_CMD_FORMAT: + entry->op_code = vfc_cmd->iu.cdb[0]; + entry->scsi_id = vfc_cmd->tgt_scsi_id; + entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); + entry->tmf_flags = vfc_cmd->iu.tmf_flags; + entry->u.start.xfer_len = vfc_cmd->iu.xfer_len; + break; + case IBMVFC_MAD_FORMAT: + entry->op_code = mad->opcode; + break; + default: + break; + }; +} + +/** + * ibmvfc_trc_end - Log an end trace entry + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_trc_end(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; + struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common; + struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++]; + + entry->evt = evt; + entry->time = jiffies; + entry->fmt = evt->crq.format; + entry->type = IBMVFC_TRC_END; + + switch (entry->fmt) { + case IBMVFC_CMD_FORMAT: + entry->op_code = vfc_cmd->iu.cdb[0]; + entry->scsi_id = vfc_cmd->tgt_scsi_id; + entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); + entry->tmf_flags = vfc_cmd->iu.tmf_flags; + entry->u.end.status = vfc_cmd->status; + entry->u.end.error = vfc_cmd->error; + entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags; + entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code; + entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status; + break; + case IBMVFC_MAD_FORMAT: + entry->op_code = mad->opcode; + entry->u.end.status = mad->status; + break; + default: + break; + + }; +} + +#else +#define ibmvfc_trc_start(evt) do { } while (0) +#define ibmvfc_trc_end(evt) do { } while (0) +#endif + +/** + * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response + * @status: status / error class + * @error: error + * + * Return value: + * index into cmd_status / -EINVAL on failure + **/ +static int ibmvfc_get_err_index(u16 status, u16 error) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cmd_status); i++) + if ((cmd_status[i].status & status) == cmd_status[i].status && + cmd_status[i].error == error) + return i; + + return -EINVAL; +} + +/** + * ibmvfc_get_cmd_error - Find the error description for the fcp response + * @status: status / error class + * @error: error + * + * Return value: + * error description string + **/ +static const char *ibmvfc_get_cmd_error(u16 status, u16 error) +{ + int rc = ibmvfc_get_err_index(status, error); + if (rc >= 0) + return cmd_status[rc].name; + return unknown_error; +} + +/** + * ibmvfc_get_err_result - Find the scsi status to return for the fcp response + * @vfc_cmd: ibmvfc command struct + * + * Return value: + * SCSI result value to return for completed command + **/ +static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) +{ + int err; + struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; + int fc_rsp_len = rsp->fcp_rsp_len; + + if ((rsp->flags & FCP_RSP_LEN_VALID) && + ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || + rsp->data.info.rsp_code)) + return DID_ERROR << 16; + + if (!vfc_cmd->status) { + if (rsp->flags & FCP_RESID_OVER) + return rsp->scsi_status | (DID_ERROR << 16); + else + return rsp->scsi_status | (DID_OK << 16); + } + + err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); + if (err >= 0) + return rsp->scsi_status | (cmd_status[err].result << 16); + return rsp->scsi_status | (DID_ERROR << 16); +} + +/** + * ibmvfc_retry_cmd - Determine if error status is retryable + * @status: status / error class + * @error: error + * + * Return value: + * 1 if error should be retried / 0 if it should not + **/ +static int ibmvfc_retry_cmd(u16 status, u16 error) +{ + int rc = ibmvfc_get_err_index(status, error); + + if (rc >= 0) + return cmd_status[rc].retry; + return 1; +} + +static const char *unknown_fc_explain = "unknown fc explain"; + +static const struct { + u16 fc_explain; + char *name; +} ls_explain [] = { + { 0x00, "no additional explanation" }, + { 0x01, "service parameter error - options" }, + { 0x03, "service parameter error - initiator control" }, + { 0x05, "service parameter error - recipient control" }, + { 0x07, "service parameter error - received data field size" }, + { 0x09, "service parameter error - concurrent seq" }, + { 0x0B, "service parameter error - credit" }, + { 0x0D, "invalid N_Port/F_Port_Name" }, + { 0x0E, "invalid node/Fabric Name" }, + { 0x0F, "invalid common service parameters" }, + { 0x11, "invalid association header" }, + { 0x13, "association header required" }, + { 0x15, "invalid originator S_ID" }, + { 0x17, "invalid OX_ID-RX-ID combination" }, + { 0x19, "command (request) already in progress" }, + { 0x1E, "N_Port Login requested" }, + { 0x1F, "Invalid N_Port_ID" }, +}; + +static const struct { + u16 fc_explain; + char *name; +} gs_explain [] = { + { 0x00, "no additional explanation" }, + { 0x01, "port identifier not registered" }, + { 0x02, "port name not registered" }, + { 0x03, "node name not registered" }, + { 0x04, "class of service not registered" }, + { 0x06, "initial process associator not registered" }, + { 0x07, "FC-4 TYPEs not registered" }, + { 0x08, "symbolic port name not registered" }, + { 0x09, "symbolic node name not registered" }, + { 0x0A, "port type not registered" }, + { 0xF0, "authorization exception" }, + { 0xF1, "authentication exception" }, + { 0xF2, "data base full" }, + { 0xF3, "data base empty" }, + { 0xF4, "processing request" }, + { 0xF5, "unable to verify connection" }, + { 0xF6, "devices not in a common zone" }, +}; + +/** + * ibmvfc_get_ls_explain - Return the FC Explain description text + * @status: FC Explain status + * + * Returns: + * error string + **/ +static const char *ibmvfc_get_ls_explain(u16 status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ls_explain); i++) + if (ls_explain[i].fc_explain == status) + return ls_explain[i].name; + + return unknown_fc_explain; +} + +/** + * ibmvfc_get_gs_explain - Return the FC Explain description text + * @status: FC Explain status + * + * Returns: + * error string + **/ +static const char *ibmvfc_get_gs_explain(u16 status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(gs_explain); i++) + if (gs_explain[i].fc_explain == status) + return gs_explain[i].name; + + return unknown_fc_explain; +} + +static const struct { + enum ibmvfc_fc_type fc_type; + char *name; +} fc_type [] = { + { IBMVFC_FABRIC_REJECT, "fabric reject" }, + { IBMVFC_PORT_REJECT, "port reject" }, + { IBMVFC_LS_REJECT, "ELS reject" }, + { IBMVFC_FABRIC_BUSY, "fabric busy" }, + { IBMVFC_PORT_BUSY, "port busy" }, + { IBMVFC_BASIC_REJECT, "basic reject" }, +}; + +static const char *unknown_fc_type = "unknown fc type"; + +/** + * ibmvfc_get_fc_type - Return the FC Type description text + * @status: FC Type error status + * + * Returns: + * error string + **/ +static const char *ibmvfc_get_fc_type(u16 status) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fc_type); i++) + if (fc_type[i].fc_type == status) + return fc_type[i].name; + + return unknown_fc_type; +} + +/** + * ibmvfc_set_tgt_action - Set the next init action for the target + * @tgt: ibmvfc target struct + * @action: action to perform + * + **/ +static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, + enum ibmvfc_target_action action) +{ + switch (tgt->action) { + case IBMVFC_TGT_ACTION_DEL_RPORT: + break; + default: + tgt->action = action; + break; + } +} + +/** + * ibmvfc_set_host_state - Set the state for the host + * @vhost: ibmvfc host struct + * @state: state to set host to + * + * Returns: + * 0 if state changed / non-zero if not changed + **/ +static int ibmvfc_set_host_state(struct ibmvfc_host *vhost, + enum ibmvfc_host_state state) +{ + int rc = 0; + + switch (vhost->state) { + case IBMVFC_HOST_OFFLINE: + rc = -EINVAL; + break; + default: + vhost->state = state; + break; + }; + + return rc; +} + +/** + * ibmvfc_set_host_action - Set the next init action for the host + * @vhost: ibmvfc host struct + * @action: action to perform + * + **/ +static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, + enum ibmvfc_host_action action) +{ + switch (action) { + case IBMVFC_HOST_ACTION_ALLOC_TGTS: + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_INIT_WAIT: + if (vhost->action == IBMVFC_HOST_ACTION_INIT) + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_QUERY: + switch (vhost->action) { + case IBMVFC_HOST_ACTION_INIT_WAIT: + case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_TGT_ADD: + vhost->action = action; + break; + default: + break; + }; + break; + case IBMVFC_HOST_ACTION_TGT_INIT: + if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) + vhost->action = action; + break; + case IBMVFC_HOST_ACTION_INIT: + case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_QUERY_TGTS: + case IBMVFC_HOST_ACTION_TGT_ADD: + case IBMVFC_HOST_ACTION_NONE: + default: + vhost->action = action; + break; + }; +} + +/** + * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login) + * @vhost: ibmvfc host struct + * + * Return value: + * nothing + **/ +static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) +{ + if (vhost->action == IBMVFC_HOST_ACTION_NONE) { + scsi_block_requests(vhost->host); + ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + } else + vhost->reinit = 1; + + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_link_down - Handle a link down event from the adapter + * @vhost: ibmvfc host struct + * @state: ibmvfc host state to enter + * + **/ +static void ibmvfc_link_down(struct ibmvfc_host *vhost, + enum ibmvfc_host_state state) +{ + struct ibmvfc_target *tgt; + + ENTER; + scsi_block_requests(vhost->host); + list_for_each_entry(tgt, &vhost->targets, queue) + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_set_host_state(vhost, state); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); + vhost->events_to_log |= IBMVFC_AE_LINKDOWN; + wake_up(&vhost->work_wait_q); + LEAVE; +} + +/** + * ibmvfc_init_host - Start host initialization + * @vhost: ibmvfc host struct + * + * Return value: + * nothing + **/ +static void ibmvfc_init_host(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { + if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) { + dev_err(vhost->dev, + "Host initialization retries exceeded. Taking adapter offline\n"); + ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); + return; + } + } + + if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + list_for_each_entry(tgt, &vhost->targets, queue) + tgt->need_login = 1; + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + vhost->job_step = ibmvfc_npiv_login; + wake_up(&vhost->work_wait_q); + } +} + +/** + * ibmvfc_send_crq - Send a CRQ + * @vhost: ibmvfc host struct + * @word1: the first 64 bits of the data + * @word2: the second 64 bits of the data + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2) +{ + struct vio_dev *vdev = to_vio_dev(vhost->dev); + return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); +} + +/** + * ibmvfc_send_crq_init - Send a CRQ init message + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost) +{ + ibmvfc_dbg(vhost, "Sending CRQ init\n"); + return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0); +} + +/** + * ibmvfc_send_crq_init_complete - Send a CRQ init complete message + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost) +{ + ibmvfc_dbg(vhost, "Sending CRQ init complete\n"); + return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0); +} + +/** + * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ + * @vhost: ibmvfc host struct + * + * Frees irq, deallocates a page for messages, unmaps dma, and unregisters + * the crq with the hypervisor. + **/ +static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) +{ + long rc; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + struct ibmvfc_crq_queue *crq = &vhost->crq; + + ibmvfc_dbg(vhost, "Releasing CRQ\n"); + free_irq(vdev->irq, vhost); + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + vhost->state = IBMVFC_NO_CRQ; + dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)crq->msgs); +} + +/** + * ibmvfc_reenable_crq_queue - reenables the CRQ + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) +{ + int rc; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + + /* Re-enable the CRQ */ + do { + rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) + dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc); + + return rc; +} + +/** + * ibmvfc_reset_crq - resets a crq after a failure + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) +{ + int rc; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + struct ibmvfc_crq_queue *crq = &vhost->crq; + + /* Close the CRQ */ + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + vhost->state = IBMVFC_NO_CRQ; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + + /* Clean out the queue */ + memset(crq->msgs, 0, PAGE_SIZE); + crq->cur = 0; + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_CLOSED) + /* Adapter is good, but other end is not ready */ + dev_warn(vhost->dev, "Partner adapter not ready\n"); + else if (rc != 0) + dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); + + return rc; +} + +/** + * ibmvfc_valid_event - Determines if event is valid. + * @pool: event_pool that contains the event + * @evt: ibmvfc event to be checked for validity + * + * Return value: + * 1 if event is valid / 0 if event is not valid + **/ +static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, + struct ibmvfc_event *evt) +{ + int index = evt - pool->events; + if (index < 0 || index >= pool->size) /* outside of bounds */ + return 0; + if (evt != pool->events + index) /* unaligned */ + return 0; + return 1; +} + +/** + * ibmvfc_free_event - Free the specified event + * @evt: ibmvfc_event to be freed + * + **/ +static void ibmvfc_free_event(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_event_pool *pool = &vhost->pool; + + BUG_ON(!ibmvfc_valid_event(pool, evt)); + BUG_ON(atomic_inc_return(&evt->free) != 1); + list_add_tail(&evt->queue, &vhost->free); +} + +/** + * ibmvfc_scsi_eh_done - EH done function for queuecommand commands + * @evt: ibmvfc event struct + * + * This function does not setup any error status, that must be done + * before this function gets called. + **/ +static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt) +{ + struct scsi_cmnd *cmnd = evt->cmnd; + + if (cmnd) { + scsi_dma_unmap(cmnd); + cmnd->scsi_done(cmnd); + } + + ibmvfc_free_event(evt); +} + +/** + * ibmvfc_fail_request - Fail request with specified error code + * @evt: ibmvfc event struct + * @error_code: error code to fail request with + * + * Return value: + * none + **/ +static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) +{ + if (evt->cmnd) { + evt->cmnd->result = (error_code << 16); + evt->done = ibmvfc_scsi_eh_done; + } else + evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED; + + list_del(&evt->queue); + del_timer(&evt->timer); + ibmvfc_trc_end(evt); + evt->done(evt); +} + +/** + * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests + * @vhost: ibmvfc host struct + * @error_code: error code to fail requests with + * + * Return value: + * none + **/ +static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) +{ + struct ibmvfc_event *evt, *pos; + + ibmvfc_dbg(vhost, "Purging all requests\n"); + list_for_each_entry_safe(evt, pos, &vhost->sent, queue) + ibmvfc_fail_request(evt, error_code); +} + +/** + * __ibmvfc_reset_host - Reset the connection to the server (no locking) + * @vhost: struct ibmvfc host to reset + **/ +static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +{ + int rc; + + scsi_block_requests(vhost->host); + ibmvfc_purge_requests(vhost, DID_ERROR); + if ((rc = ibmvfc_reset_crq(vhost)) || + (rc = ibmvfc_send_crq_init(vhost)) || + (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { + dev_err(vhost->dev, "Error after reset rc=%d\n", rc); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + } else + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); +} + +/** + * ibmvfc_reset_host - Reset the connection to the server + * @vhost: struct ibmvfc host to reset + **/ +static void ibmvfc_reset_host(struct ibmvfc_host *vhost) +{ + unsigned long flags; + + spin_lock_irqsave(vhost->host->host_lock, flags); + __ibmvfc_reset_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_retry_host_init - Retry host initialization if allowed + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) +{ + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { + if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) { + dev_err(vhost->dev, + "Host initialization retries exceeded. Taking adapter offline\n"); + ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); + } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES) + __ibmvfc_reset_host(vhost); + else + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + } + + wake_up(&vhost->work_wait_q); +} + +/** + * __ibmvfc_find_target - Find the specified scsi_target (no locking) + * @starget: scsi target struct + * + * Return value: + * ibmvfc_target struct / NULL if not found + **/ +static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ibmvfc_host *vhost = shost_priv(shost); + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->target_id == starget->id) + return tgt; + return NULL; +} + +/** + * ibmvfc_find_target - Find the specified scsi_target + * @starget: scsi target struct + * + * Return value: + * ibmvfc_target struct / NULL if not found + **/ +static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ibmvfc_target *tgt; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + tgt = __ibmvfc_find_target(starget); + spin_unlock_irqrestore(shost->host_lock, flags); + return tgt; +} + +/** + * ibmvfc_get_host_speed - Get host port speed + * @shost: scsi host struct + * + * Return value: + * none + **/ +static void ibmvfc_get_host_speed(struct Scsi_Host *shost) +{ + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + if (vhost->state == IBMVFC_ACTIVE) { + switch (vhost->login_buf->resp.link_speed / 100) { + case 1: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case 2: + fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + break; + case 4: + fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + break; + case 8: + fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + break; + case 10: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case 16: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; + default: + ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n", + vhost->login_buf->resp.link_speed / 100); + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } + } else + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * ibmvfc_get_host_port_state - Get host port state + * @shost: scsi host struct + * + * Return value: + * none + **/ +static void ibmvfc_get_host_port_state(struct Scsi_Host *shost) +{ + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + switch (vhost->state) { + case IBMVFC_INITIALIZING: + case IBMVFC_ACTIVE: + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + break; + case IBMVFC_LINK_DOWN: + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + break; + case IBMVFC_LINK_DEAD: + case IBMVFC_HOST_OFFLINE: + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + break; + case IBMVFC_HALTED: + fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED; + break; + default: + ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state); + fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; + break; + } + spin_unlock_irqrestore(shost->host_lock, flags); +} + +/** + * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout + * @rport: rport struct + * @timeout: timeout value + * + * Return value: + * none + **/ +static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} + +/** + * ibmvfc_get_starget_node_name - Get SCSI target's node name + * @starget: scsi target struct + * + * Return value: + * none + **/ +static void ibmvfc_get_starget_node_name(struct scsi_target *starget) +{ + struct ibmvfc_target *tgt = ibmvfc_find_target(starget); + fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0; +} + +/** + * ibmvfc_get_starget_port_name - Get SCSI target's port name + * @starget: scsi target struct + * + * Return value: + * none + **/ +static void ibmvfc_get_starget_port_name(struct scsi_target *starget) +{ + struct ibmvfc_target *tgt = ibmvfc_find_target(starget); + fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0; +} + +/** + * ibmvfc_get_starget_port_id - Get SCSI target's port ID + * @starget: scsi target struct + * + * Return value: + * none + **/ +static void ibmvfc_get_starget_port_id(struct scsi_target *starget) +{ + struct ibmvfc_target *tgt = ibmvfc_find_target(starget); + fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1; +} + +/** + * ibmvfc_wait_while_resetting - Wait while the host resets + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost) +{ + long timeout = wait_event_timeout(vhost->init_wait_q, + (vhost->state == IBMVFC_ACTIVE || + vhost->state == IBMVFC_HOST_OFFLINE || + vhost->state == IBMVFC_LINK_DEAD), + (init_timeout * HZ)); + + return timeout ? 0 : -EIO; +} + +/** + * ibmvfc_issue_fc_host_lip - Re-initiate link initialization + * @shost: scsi host struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost) +{ + struct ibmvfc_host *vhost = shost_priv(shost); + + dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n"); + ibmvfc_reset_host(vhost); + return ibmvfc_wait_while_resetting(vhost); +} + +/** + * ibmvfc_gather_partition_info - Gather info about the LPAR + * + * Return value: + * none + **/ +static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost) +{ + struct device_node *rootdn; + const char *name; + const unsigned int *num; + + rootdn = of_find_node_by_path("/"); + if (!rootdn) + return; + + name = of_get_property(rootdn, "ibm,partition-name", NULL); + if (name) + strncpy(vhost->partition_name, name, sizeof(vhost->partition_name)); + num = of_get_property(rootdn, "ibm,partition-no", NULL); + if (num) + vhost->partition_number = *num; + of_node_put(rootdn); +} + +/** + * ibmvfc_set_login_info - Setup info for NPIV login + * @vhost: ibmvfc host struct + * + * Return value: + * none + **/ +static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_login *login_info = &vhost->login_info; + struct device_node *of_node = vhost->dev->archdata.of_node; + const char *location; + + memset(login_info, 0, sizeof(*login_info)); + + login_info->ostype = IBMVFC_OS_LINUX; + login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9; + login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu); + login_info->max_response = sizeof(struct ibmvfc_fcp_rsp); + login_info->partition_num = vhost->partition_number; + login_info->vfc_frame_version = 1; + login_info->fcp_version = 3; + if (vhost->client_migrated) + login_info->flags = IBMVFC_CLIENT_MIGRATED; + + login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; + login_info->capabilities = IBMVFC_CAN_MIGRATE; + login_info->async.va = vhost->async_crq.msg_token; + login_info->async.len = vhost->async_crq.size; + strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); + strncpy(login_info->device_name, + vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME); + + location = of_get_property(of_node, "ibm,loc-code", NULL); + location = location ? location : vhost->dev->bus_id; + strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME); +} + +/** + * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host + * @vhost: ibmvfc host who owns the event pool + * + * Returns zero on success. + **/ +static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost) +{ + int i; + struct ibmvfc_event_pool *pool = &vhost->pool; + + ENTER; + pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ; + pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); + if (!pool->events) + return -ENOMEM; + + pool->iu_storage = dma_alloc_coherent(vhost->dev, + pool->size * sizeof(*pool->iu_storage), + &pool->iu_token, 0); + + if (!pool->iu_storage) { + kfree(pool->events); + return -ENOMEM; + } + + for (i = 0; i < pool->size; ++i) { + struct ibmvfc_event *evt = &pool->events[i]; + atomic_set(&evt->free, 1); + evt->crq.valid = 0x80; + evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i); + evt->xfer_iu = pool->iu_storage + i; + evt->vhost = vhost; + evt->ext_list = NULL; + list_add_tail(&evt->queue, &vhost->free); + } + + LEAVE; + return 0; +} + +/** + * ibmvfc_free_event_pool - Frees memory of the event pool of a host + * @vhost: ibmvfc host who owns the event pool + * + **/ +static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost) +{ + int i; + struct ibmvfc_event_pool *pool = &vhost->pool; + + ENTER; + for (i = 0; i < pool->size; ++i) { + list_del(&pool->events[i].queue); + BUG_ON(atomic_read(&pool->events[i].free) != 1); + if (pool->events[i].ext_list) + dma_pool_free(vhost->sg_pool, + pool->events[i].ext_list, + pool->events[i].ext_list_token); + } + + kfree(pool->events); + dma_free_coherent(vhost->dev, + pool->size * sizeof(*pool->iu_storage), + pool->iu_storage, pool->iu_token); + LEAVE; +} + +/** + * ibmvfc_get_event - Gets the next free event in pool + * @vhost: ibmvfc host struct + * + * Returns a free event from the pool. + **/ +static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost) +{ + struct ibmvfc_event *evt; + + BUG_ON(list_empty(&vhost->free)); + evt = list_entry(vhost->free.next, struct ibmvfc_event, queue); + atomic_set(&evt->free, 0); + list_del(&evt->queue); + return evt; +} + +/** + * ibmvfc_init_event - Initialize fields in an event struct that are always + * required. + * @evt: The event + * @done: Routine to call when the event is responded to + * @format: SRP or MAD format + **/ +static void ibmvfc_init_event(struct ibmvfc_event *evt, + void (*done) (struct ibmvfc_event *), u8 format) +{ + evt->cmnd = NULL; + evt->sync_iu = NULL; + evt->crq.format = format; + evt->done = done; +} + +/** + * ibmvfc_map_sg_list - Initialize scatterlist + * @scmd: scsi command struct + * @nseg: number of scatterlist segments + * @md: memory descriptor list to initialize + **/ +static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg, + struct srp_direct_buf *md) +{ + int i; + struct scatterlist *sg; + + scsi_for_each_sg(scmd, sg, nseg, i) { + md[i].va = sg_dma_address(sg); + md[i].len = sg_dma_len(sg); + md[i].key = 0; + } +} + +/** + * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields + * @scmd: Scsi_Cmnd with the scatterlist + * @evt: ibmvfc event struct + * @vfc_cmd: vfc_cmd that contains the memory descriptor + * @dev: device for which to map dma memory + * + * Returns: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, + struct ibmvfc_event *evt, + struct ibmvfc_cmd *vfc_cmd, struct device *dev) +{ + + int sg_mapped; + struct srp_direct_buf *data = &vfc_cmd->ioba; + struct ibmvfc_host *vhost = dev_get_drvdata(dev); + + sg_mapped = scsi_dma_map(scmd); + if (!sg_mapped) { + vfc_cmd->flags |= IBMVFC_NO_MEM_DESC; + return 0; + } else if (unlikely(sg_mapped < 0)) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n"); + return sg_mapped; + } + + if (scmd->sc_data_direction == DMA_TO_DEVICE) { + vfc_cmd->flags |= IBMVFC_WRITE; + vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA; + } else { + vfc_cmd->flags |= IBMVFC_READ; + vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA; + } + + if (sg_mapped == 1) { + ibmvfc_map_sg_list(scmd, sg_mapped, data); + return 0; + } + + vfc_cmd->flags |= IBMVFC_SCATTERLIST; + + if (!evt->ext_list) { + evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC, + &evt->ext_list_token); + + if (!evt->ext_list) { + scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n"); + return -ENOMEM; + } + } + + ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list); + + data->va = evt->ext_list_token; + data->len = sg_mapped * sizeof(struct srp_direct_buf); + data->key = 0; + return 0; +} + +/** + * ibmvfc_timeout - Internal command timeout handler + * @evt: struct ibmvfc_event that timed out + * + * Called when an internally generated command times out + **/ +static void ibmvfc_timeout(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt); + ibmvfc_reset_host(vhost); +} + +/** + * ibmvfc_send_event - Transforms event to u64 array and calls send_crq() + * @evt: event to be sent + * @vhost: ibmvfc host struct + * @timeout: timeout in seconds - 0 means do not time command + * + * Returns the value returned from ibmvfc_send_crq(). (Zero for success) + **/ +static int ibmvfc_send_event(struct ibmvfc_event *evt, + struct ibmvfc_host *vhost, unsigned long timeout) +{ + u64 *crq_as_u64 = (u64 *) &evt->crq; + int rc; + + /* Copy the IU into the transfer area */ + *evt->xfer_iu = evt->iu; + if (evt->crq.format == IBMVFC_CMD_FORMAT) + evt->xfer_iu->cmd.tag = (u64)evt; + else if (evt->crq.format == IBMVFC_MAD_FORMAT) + evt->xfer_iu->mad_common.tag = (u64)evt; + else + BUG(); + + list_add_tail(&evt->queue, &vhost->sent); + init_timer(&evt->timer); + + if (timeout) { + evt->timer.data = (unsigned long) evt; + evt->timer.expires = jiffies + (timeout * HZ); + evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout; + add_timer(&evt->timer); + } + + if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) { + list_del(&evt->queue); + del_timer(&evt->timer); + + /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. + * Firmware will send a CRQ with a transport event (0xFF) to + * tell this client what has happened to the transport. This + * will be handled in ibmvfc_handle_crq() + */ + if (rc == H_CLOSED) { + if (printk_ratelimit()) + dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n"); + if (evt->cmnd) + scsi_dma_unmap(evt->cmnd); + ibmvfc_free_event(evt); + return SCSI_MLQUEUE_HOST_BUSY; + } + + dev_err(vhost->dev, "Send error (rc=%d)\n", rc); + if (evt->cmnd) { + evt->cmnd->result = DID_ERROR << 16; + evt->done = ibmvfc_scsi_eh_done; + } else + evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR; + + evt->done(evt); + } else + ibmvfc_trc_start(evt); + + return 0; +} + +/** + * ibmvfc_log_error - Log an error for the failed command if appropriate + * @evt: ibmvfc event to log + * + **/ +static void ibmvfc_log_error(struct ibmvfc_event *evt) +{ + struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; + struct scsi_cmnd *cmnd = evt->cmnd; + const char *err = unknown_error; + int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); + int logerr = 0; + int rsp_code = 0; + + if (index >= 0) { + logerr = cmd_status[index].log; + err = cmd_status[index].name; + } + + if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL)) + return; + + if (rsp->flags & FCP_RSP_LEN_VALID) + rsp_code = rsp->data.info.rsp_code; + + scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) " + "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", + cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error, + rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); +} + +/** + * ibmvfc_scsi_done - Handle responses from commands + * @evt: ibmvfc event to be handled + * + * Used as a callback when sending scsi cmds. + **/ +static void ibmvfc_scsi_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; + struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; + struct scsi_cmnd *cmnd = evt->cmnd; + int rsp_len = 0; + int sense_len = rsp->fcp_sense_len; + + if (cmnd) { + if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID) + scsi_set_resid(cmnd, vfc_cmd->adapter_resid); + else if (rsp->flags & FCP_RESID_UNDER) + scsi_set_resid(cmnd, rsp->fcp_resid); + else + scsi_set_resid(cmnd, 0); + + if (vfc_cmd->status) { + cmnd->result = ibmvfc_get_err_result(vfc_cmd); + + if (rsp->flags & FCP_RSP_LEN_VALID) + rsp_len = rsp->fcp_rsp_len; + if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) + sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; + if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len) + memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); + + ibmvfc_log_error(evt); + } + + if (!cmnd->result && + (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow)) + cmnd->result = (DID_ERROR << 16); + + scsi_dma_unmap(cmnd); + cmnd->scsi_done(cmnd); + } + + ibmvfc_free_event(evt); +} + +/** + * ibmvfc_host_chkready - Check if the host can accept commands + * @vhost: struct ibmvfc host + * + * Returns: + * 1 if host can accept command / 0 if not + **/ +static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost) +{ + int result = 0; + + switch (vhost->state) { + case IBMVFC_LINK_DEAD: + case IBMVFC_HOST_OFFLINE: + result = DID_NO_CONNECT << 16; + break; + case IBMVFC_NO_CRQ: + case IBMVFC_INITIALIZING: + case IBMVFC_HALTED: + case IBMVFC_LINK_DOWN: + result = DID_REQUEUE << 16; + break; + case IBMVFC_ACTIVE: + result = 0; + break; + }; + + return result; +} + +/** + * ibmvfc_queuecommand - The queuecommand function of the scsi template + * @cmnd: struct scsi_cmnd to be executed + * @done: Callback function to be called when cmnd is completed + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, + void (*done) (struct scsi_cmnd *)) +{ + struct ibmvfc_host *vhost = shost_priv(cmnd->device->host); + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + struct ibmvfc_cmd *vfc_cmd; + struct ibmvfc_event *evt; + u8 tag[2]; + int rc; + + if (unlikely((rc = fc_remote_port_chkready(rport))) || + unlikely((rc = ibmvfc_host_chkready(vhost)))) { + cmnd->result = rc; + done(cmnd); + return 0; + } + + cmnd->result = (DID_OK << 16); + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT); + evt->cmnd = cmnd; + cmnd->scsi_done = done; + vfc_cmd = &evt->iu.cmd; + memset(vfc_cmd, 0, sizeof(*vfc_cmd)); + vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); + vfc_cmd->resp.len = sizeof(vfc_cmd->rsp); + vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE; + vfc_cmd->payload_len = sizeof(vfc_cmd->iu); + vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); + vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; + vfc_cmd->tgt_scsi_id = rport->port_id; + if ((rport->supported_classes & FC_COS_CLASS3) && + (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3)) + vfc_cmd->flags = IBMVFC_CLASS_3_ERR; + vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); + int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); + memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); + + if (scsi_populate_tag_msg(cmnd, tag)) { + vfc_cmd->task_tag = tag[1]; + switch (tag[0]) { + case MSG_SIMPLE_TAG: + vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK; + break; + case MSG_HEAD_TAG: + vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE; + break; + case MSG_ORDERED_TAG: + vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK; + break; + }; + } + + if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) + return ibmvfc_send_event(evt, vhost, 0); + + ibmvfc_free_event(evt); + if (rc == -ENOMEM) + return SCSI_MLQUEUE_HOST_BUSY; + + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + scmd_printk(KERN_ERR, cmnd, + "Failed to map DMA buffer for command. rc=%d\n", rc); + + cmnd->result = DID_ERROR << 16; + done(cmnd); + return 0; +} + +/** + * ibmvfc_sync_completion - Signal that a synchronous command has completed + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_sync_completion(struct ibmvfc_event *evt) +{ + /* copy the response back */ + if (evt->sync_iu) + *evt->sync_iu = *evt->xfer_iu; + + complete(&evt->comp); +} + +/** + * ibmvfc_reset_device - Reset the device with the specified reset type + * @sdev: scsi device to reset + * @type: reset type + * @desc: reset type description for log messages + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_cmd *tmf; + struct ibmvfc_event *evt; + union ibmvfc_iu rsp_iu; + struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; + int rsp_rc = -EBUSY; + unsigned long flags; + int rsp_code = 0; + + spin_lock_irqsave(vhost->host->host_lock, flags); + if (vhost->state == IBMVFC_ACTIVE) { + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); + + tmf = &evt->iu.cmd; + memset(tmf, 0, sizeof(*tmf)); + tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); + tmf->resp.len = sizeof(tmf->rsp); + tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; + tmf->payload_len = sizeof(tmf->iu); + tmf->resp_len = sizeof(tmf->rsp); + tmf->cancel_key = (unsigned long)sdev->hostdata; + tmf->tgt_scsi_id = rport->port_id; + int_to_scsilun(sdev->lun, &tmf->iu.lun); + tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); + tmf->iu.tmf_flags = type; + evt->sync_iu = &rsp_iu; + + init_completion(&evt->comp); + rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n", + desc, rsp_rc); + return -EIO; + } + + sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc); + wait_for_completion(&evt->comp); + + if (rsp_iu.cmd.status) { + if (fc_rsp->flags & FCP_RSP_LEN_VALID) + rsp_code = fc_rsp->data.info.rsp_code; + + sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " + "flags: %x fcp_rsp: %x, scsi_status: %x\n", + desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), + rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, + fc_rsp->scsi_status); + rsp_rc = -EIO; + } else + sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc); + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return rsp_rc; +} + +/** + * ibmvfc_abort_task_set - Abort outstanding commands to the device + * @sdev: scsi device to abort commands + * + * This sends an Abort Task Set to the VIOS for the specified device. This does + * NOT send any cancel to the VIOS. That must be done separately. + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_abort_task_set(struct scsi_device *sdev) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_cmd *tmf; + struct ibmvfc_event *evt, *found_evt; + union ibmvfc_iu rsp_iu; + struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; + int rsp_rc = -EBUSY; + unsigned long flags; + int rsp_code = 0; + + spin_lock_irqsave(vhost->host->host_lock, flags); + found_evt = NULL; + list_for_each_entry(evt, &vhost->sent, queue) { + if (evt->cmnd && evt->cmnd->device == sdev) { + found_evt = evt; + break; + } + } + + if (!found_evt) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + sdev_printk(KERN_INFO, sdev, "No events found to abort\n"); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; + } + + if (vhost->state == IBMVFC_ACTIVE) { + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); + + tmf = &evt->iu.cmd; + memset(tmf, 0, sizeof(*tmf)); + tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); + tmf->resp.len = sizeof(tmf->rsp); + tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; + tmf->payload_len = sizeof(tmf->iu); + tmf->resp_len = sizeof(tmf->rsp); + tmf->cancel_key = (unsigned long)sdev->hostdata; + tmf->tgt_scsi_id = rport->port_id; + int_to_scsilun(sdev->lun, &tmf->iu.lun); + tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); + tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET; + evt->sync_iu = &rsp_iu; + + init_completion(&evt->comp); + rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + } + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc); + return -EIO; + } + + sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n"); + wait_for_completion(&evt->comp); + + if (rsp_iu.cmd.status) { + if (fc_rsp->flags & FCP_RSP_LEN_VALID) + rsp_code = fc_rsp->data.info.rsp_code; + + sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " + "flags: %x fcp_rsp: %x, scsi_status: %x\n", + ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), + rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, + fc_rsp->scsi_status); + rsp_rc = -EIO; + } else + sdev_printk(KERN_INFO, sdev, "Abort successful\n"); + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return rsp_rc; +} + +/** + * ibmvfc_cancel_all - Cancel all outstanding commands to the device + * @sdev: scsi device to cancel commands + * @type: type of error recovery being performed + * + * This sends a cancel to the VIOS for the specified device. This does + * NOT send any abort to the actual device. That must be done separately. + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_tmf *tmf; + struct ibmvfc_event *evt, *found_evt; + union ibmvfc_iu rsp; + int rsp_rc = -EBUSY; + unsigned long flags; + u16 status; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + found_evt = NULL; + list_for_each_entry(evt, &vhost->sent, queue) { + if (evt->cmnd && evt->cmnd->device == sdev) { + found_evt = evt; + break; + } + } + + if (!found_evt) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + sdev_printk(KERN_INFO, sdev, "No events found to cancel\n"); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; + } + + if (vhost->state == IBMVFC_ACTIVE) { + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); + + tmf = &evt->iu.tmf; + memset(tmf, 0, sizeof(*tmf)); + tmf->common.version = 1; + tmf->common.opcode = IBMVFC_TMF_MAD; + tmf->common.length = sizeof(*tmf); + tmf->scsi_id = rport->port_id; + int_to_scsilun(sdev->lun, &tmf->lun); + tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + tmf->cancel_key = (unsigned long)sdev->hostdata; + tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata); + + evt->sync_iu = &rsp; + init_completion(&evt->comp); + rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); + } + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (rsp_rc != 0) { + sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); + return -EIO; + } + + sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); + + wait_for_completion(&evt->comp); + status = rsp.mad_common.status; + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_free_event(evt); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (status != IBMVFC_MAD_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); + return -EIO; + } + + sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); + return 0; +} + +/** + * ibmvfc_eh_abort_handler - Abort a command + * @cmd: scsi command to abort + * + * Returns: + * SUCCESS / FAILED + **/ +static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) +{ + struct ibmvfc_host *vhost = shost_priv(cmd->device->host); + struct ibmvfc_event *evt, *pos; + int cancel_rc, abort_rc; + unsigned long flags; + + ENTER; + ibmvfc_wait_while_resetting(vhost); + cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET); + abort_rc = ibmvfc_abort_task_set(cmd->device); + + if (!cancel_rc && !abort_rc) { + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry_safe(evt, pos, &vhost->sent, queue) { + if (evt->cmnd && evt->cmnd->device == cmd->device) + ibmvfc_fail_request(evt, DID_ABORT); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; + return SUCCESS; + } + + LEAVE; + return FAILED; +} + +/** + * ibmvfc_eh_device_reset_handler - Reset a single LUN + * @cmd: scsi command struct + * + * Returns: + * SUCCESS / FAILED + **/ +static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) +{ + struct ibmvfc_host *vhost = shost_priv(cmd->device->host); + struct ibmvfc_event *evt, *pos; + int cancel_rc, reset_rc; + unsigned long flags; + + ENTER; + ibmvfc_wait_while_resetting(vhost); + cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET); + reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN"); + + if (!cancel_rc && !reset_rc) { + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry_safe(evt, pos, &vhost->sent, queue) { + if (evt->cmnd && evt->cmnd->device == cmd->device) + ibmvfc_fail_request(evt, DID_ABORT); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; + return SUCCESS; + } + + LEAVE; + return FAILED; +} + +/** + * ibmvfc_dev_cancel_all - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); +} + +/** + * ibmvfc_dev_abort_all - Device iterated abort task set function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_abort_task_set(sdev); +} + +/** + * ibmvfc_eh_target_reset_handler - Reset the target + * @cmd: scsi command struct + * + * Returns: + * SUCCESS / FAILED + **/ +static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) +{ + struct ibmvfc_host *vhost = shost_priv(cmd->device->host); + struct scsi_target *starget = scsi_target(cmd->device); + struct ibmvfc_event *evt, *pos; + int reset_rc; + unsigned long cancel_rc = 0; + unsigned long flags; + + ENTER; + ibmvfc_wait_while_resetting(vhost); + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); + reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target"); + + if (!cancel_rc && !reset_rc) { + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry_safe(evt, pos, &vhost->sent, queue) { + if (evt->cmnd && scsi_target(evt->cmnd->device) == starget) + ibmvfc_fail_request(evt, DID_ABORT); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; + return SUCCESS; + } + + LEAVE; + return FAILED; +} + +/** + * ibmvfc_eh_host_reset_handler - Reset the connection to the server + * @cmd: struct scsi_cmnd having problems + * + **/ +static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + int rc; + struct ibmvfc_host *vhost = shost_priv(cmd->device->host); + + dev_err(vhost->dev, "Resetting connection due to error recovery\n"); + rc = ibmvfc_issue_fc_host_lip(vhost->host); + return rc ? FAILED : SUCCESS; +} + +/** + * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport. + * @rport: rport struct + * + * Return value: + * none + **/ +static void ibmvfc_terminate_rport_io(struct fc_rport *rport) +{ + struct scsi_target *starget = to_scsi_target(&rport->dev); + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct ibmvfc_host *vhost = shost_priv(shost); + struct ibmvfc_event *evt, *pos; + unsigned long cancel_rc = 0; + unsigned long abort_rc = 0; + unsigned long flags; + + ENTER; + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all); + starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all); + + if (!cancel_rc && !abort_rc) { + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry_safe(evt, pos, &vhost->sent, queue) { + if (evt->cmnd && scsi_target(evt->cmnd->device) == starget) + ibmvfc_fail_request(evt, DID_ABORT); + } + spin_unlock_irqrestore(shost->host_lock, flags); + } else + ibmvfc_issue_fc_host_lip(shost); + + scsi_target_unblock(&rport->dev); + LEAVE; +} + +static const struct { + enum ibmvfc_async_event ae; + const char *desc; +} ae_desc [] = { + { IBMVFC_AE_ELS_PLOGI, "PLOGI" }, + { IBMVFC_AE_ELS_LOGO, "LOGO" }, + { IBMVFC_AE_ELS_PRLO, "PRLO" }, + { IBMVFC_AE_SCN_NPORT, "N-Port SCN" }, + { IBMVFC_AE_SCN_GROUP, "Group SCN" }, + { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" }, + { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" }, + { IBMVFC_AE_LINK_UP, "Link Up" }, + { IBMVFC_AE_LINK_DOWN, "Link Down" }, + { IBMVFC_AE_LINK_DEAD, "Link Dead" }, + { IBMVFC_AE_HALT, "Halt" }, + { IBMVFC_AE_RESUME, "Resume" }, + { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" }, +}; + +static const char *unknown_ae = "Unknown async"; + +/** + * ibmvfc_get_ae_desc - Get text description for async event + * @ae: async event + * + **/ +static const char *ibmvfc_get_ae_desc(u64 ae) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ae_desc); i++) + if (ae_desc[i].ae == ae) + return ae_desc[i].desc; + + return unknown_ae; +} + +/** + * ibmvfc_handle_async - Handle an async event from the adapter + * @crq: crq to process + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, + struct ibmvfc_host *vhost) +{ + const char *desc = ibmvfc_get_ae_desc(crq->event); + + ibmvfc_log(vhost, 2, "%s event received\n", desc); + + switch (crq->event) { + case IBMVFC_AE_LINK_UP: + case IBMVFC_AE_RESUME: + vhost->events_to_log |= IBMVFC_AE_LINKUP; + ibmvfc_init_host(vhost); + break; + case IBMVFC_AE_SCN_FABRIC: + vhost->events_to_log |= IBMVFC_AE_RSCN; + ibmvfc_init_host(vhost); + break; + case IBMVFC_AE_SCN_NPORT: + case IBMVFC_AE_SCN_GROUP: + case IBMVFC_AE_SCN_DOMAIN: + vhost->events_to_log |= IBMVFC_AE_RSCN; + case IBMVFC_AE_ELS_LOGO: + case IBMVFC_AE_ELS_PRLO: + case IBMVFC_AE_ELS_PLOGI: + ibmvfc_reinit_host(vhost); + break; + case IBMVFC_AE_LINK_DOWN: + case IBMVFC_AE_ADAPTER_FAILED: + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + break; + case IBMVFC_AE_LINK_DEAD: + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + case IBMVFC_AE_HALT: + ibmvfc_link_down(vhost, IBMVFC_HALTED); + break; + default: + dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event); + break; + }; +} + +/** + * ibmvfc_handle_crq - Handles and frees received events in the CRQ + * @crq: Command/Response queue + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) +{ + long rc; + struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba; + + switch (crq->valid) { + case IBMVFC_CRQ_INIT_RSP: + switch (crq->format) { + case IBMVFC_CRQ_INIT: + dev_info(vhost->dev, "Partner initialized\n"); + /* Send back a response */ + rc = ibmvfc_send_crq_init_complete(vhost); + if (rc == 0) + ibmvfc_init_host(vhost); + else + dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc); + break; + case IBMVFC_CRQ_INIT_COMPLETE: + dev_info(vhost->dev, "Partner initialization complete\n"); + ibmvfc_init_host(vhost); + break; + default: + dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); + } + return; + case IBMVFC_CRQ_XPORT_EVENT: + vhost->state = IBMVFC_NO_CRQ; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + if (crq->format == IBMVFC_PARTITION_MIGRATED) { + /* We need to re-setup the interpartition connection */ + dev_info(vhost->dev, "Re-enabling adapter\n"); + vhost->client_migrated = 1; + ibmvfc_purge_requests(vhost, DID_REQUEUE); + if ((rc = ibmvfc_reenable_crq_queue(vhost)) || + (rc = ibmvfc_send_crq_init(vhost))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc); + } else + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + } else { + dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); + + ibmvfc_purge_requests(vhost, DID_ERROR); + if ((rc = ibmvfc_reset_crq(vhost)) || + (rc = ibmvfc_send_crq_init(vhost))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc); + } else + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + } + return; + case IBMVFC_CRQ_CMD_RSP: + break; + default: + dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid); + return; + } + + if (crq->format == IBMVFC_ASYNC_EVENT) + return; + + /* The only kind of payload CRQs we should get are responses to + * things we send. Make sure this response is to something we + * actually sent + */ + if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) { + dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n", + crq->ioba); + return; + } + + if (unlikely(atomic_read(&evt->free))) { + dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n", + crq->ioba); + return; + } + + del_timer(&evt->timer); + list_del(&evt->queue); + ibmvfc_trc_end(evt); + evt->done(evt); +} + +/** + * ibmvfc_scan_finished - Check if the device scan is done. + * @shost: scsi host struct + * @time: current elapsed time + * + * Returns: + * 0 if scan is not done / 1 if scan is done + **/ +static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + unsigned long flags; + struct ibmvfc_host *vhost = shost_priv(shost); + int done = 0; + + spin_lock_irqsave(shost->host_lock, flags); + if (time >= (init_timeout * HZ)) { + dev_info(vhost->dev, "Scan taking longer than %d seconds, " + "continuing initialization\n", init_timeout); + done = 1; + } + + if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) + done = 1; + spin_unlock_irqrestore(shost->host_lock, flags); + return done; +} + +/** + * ibmvfc_slave_alloc - Setup the device's task set value + * @sdev: struct scsi_device device to configure + * + * Set the device's task set value so that error handling works as + * expected. + * + * Returns: + * 0 on success / -ENXIO if device does not exist + **/ +static int ibmvfc_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + spin_lock_irqsave(shost->host_lock, flags); + sdev->hostdata = (void *)(unsigned long)vhost->task_set++; + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; +} + +/** + * ibmvfc_slave_configure - Configure the device + * @sdev: struct scsi_device device to configure + * + * Enable allow_restart for a device if it is a disk. Adjust the + * queue_depth here also. + * + * Returns: + * 0 + **/ +static int ibmvfc_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct fc_rport *rport = starget_to_rport(sdev->sdev_target); + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); + if (sdev->type == TYPE_DISK) + sdev->allow_restart = 1; + + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); + scsi_activate_tcq(sdev, sdev->queue_depth); + } else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + + rport->dev_loss_tmo = dev_loss_tmo; + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; +} + +/** + * ibmvfc_change_queue_depth - Change the device's queue depth + * @sdev: scsi device struct + * @qdepth: depth to set + * + * Return value: + * actual depth set + **/ +static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) + qdepth = IBMVFC_MAX_CMDS_PER_LUN; + + scsi_adjust_queue_depth(sdev, 0, qdepth); + return sdev->queue_depth; +} + +/** + * ibmvfc_change_queue_type - Change the device's queue type + * @sdev: scsi device struct + * @tag_type: type of tags to use + * + * Return value: + * actual queue type set + **/ +static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + + return tag_type; +} + +static ssize_t ibmvfc_show_host_partition_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.partition_name); +} + +static struct device_attribute ibmvfc_host_partition_name = { + .attr = { + .name = "partition_name", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_partition_name, +}; + +static ssize_t ibmvfc_show_host_device_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.device_name); +} + +static struct device_attribute ibmvfc_host_device_name = { + .attr = { + .name = "device_name", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_device_name, +}; + +static ssize_t ibmvfc_show_host_loc_code(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.port_loc_code); +} + +static struct device_attribute ibmvfc_host_loc_code = { + .attr = { + .name = "port_loc_code", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_loc_code, +}; + +static ssize_t ibmvfc_show_host_drc_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", + vhost->login_buf->resp.drc_name); +} + +static struct device_attribute ibmvfc_host_drc_name = { + .attr = { + .name = "drc_name", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_drc_name, +}; + +static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); +} + +static struct device_attribute ibmvfc_host_npiv_version = { + .attr = { + .name = "npiv_version", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_npiv_version, +}; + +/** + * ibmvfc_show_log_level - Show the adapter's error logging level + * @dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ibmvfc_show_log_level(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + int len; + + spin_lock_irqsave(shost->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level); + spin_unlock_irqrestore(shost->host_lock, flags); + return len; +} + +/** + * ibmvfc_store_log_level - Change the adapter's error logging level + * @dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ibmvfc_store_log_level(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); + vhost->log_level = simple_strtoul(buf, NULL, 10); + spin_unlock_irqrestore(shost->host_lock, flags); + return strlen(buf); +} + +static struct device_attribute ibmvfc_log_level_attr = { + .attr = { + .name = "log_level", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ibmvfc_show_log_level, + .store = ibmvfc_store_log_level +}; + +#ifdef CONFIG_SCSI_IBMVFC_TRACE +/** + * ibmvfc_read_trace - Dump the adapter trace + * @kobj: kobject struct + * @bin_attr: bin_attribute struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ibmvfc_read_trace(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + unsigned long flags = 0; + int size = IBMVFC_TRACE_SIZE; + char *src = (char *)vhost->trace; + + if (off > size) + return 0; + if (off + count > size) { + size -= off; + count = size; + } + + spin_lock_irqsave(shost->host_lock, flags); + memcpy(buf, &src[off], count); + spin_unlock_irqrestore(shost->host_lock, flags); + return count; +} + +static struct bin_attribute ibmvfc_trace_attr = { + .attr = { + .name = "trace", + .mode = S_IRUGO, + }, + .size = 0, + .read = ibmvfc_read_trace, +}; +#endif + +static struct device_attribute *ibmvfc_attrs[] = { + &ibmvfc_host_partition_name, + &ibmvfc_host_device_name, + &ibmvfc_host_loc_code, + &ibmvfc_host_drc_name, + &ibmvfc_host_npiv_version, + &ibmvfc_log_level_attr, + NULL +}; + +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "IBM POWER Virtual FC Adapter", + .proc_name = IBMVFC_NAME, + .queuecommand = ibmvfc_queuecommand, + .eh_abort_handler = ibmvfc_eh_abort_handler, + .eh_device_reset_handler = ibmvfc_eh_device_reset_handler, + .eh_target_reset_handler = ibmvfc_eh_target_reset_handler, + .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, + .slave_alloc = ibmvfc_slave_alloc, + .slave_configure = ibmvfc_slave_configure, + .scan_finished = ibmvfc_scan_finished, + .change_queue_depth = ibmvfc_change_queue_depth, + .change_queue_type = ibmvfc_change_queue_type, + .cmd_per_lun = 16, + .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = IBMVFC_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = ibmvfc_attrs, +}; + +/** + * ibmvfc_next_async_crq - Returns the next entry in async queue + * @vhost: ibmvfc host struct + * + * Returns: + * Pointer to next entry in queue / NULL if empty + **/ +static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost) +{ + struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq; + struct ibmvfc_async_crq *crq; + + crq = &async_crq->msgs[async_crq->cur]; + if (crq->valid & 0x80) { + if (++async_crq->cur == async_crq->size) + async_crq->cur = 0; + } else + crq = NULL; + + return crq; +} + +/** + * ibmvfc_next_crq - Returns the next entry in message queue + * @vhost: ibmvfc host struct + * + * Returns: + * Pointer to next entry in queue / NULL if empty + **/ +static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost) +{ + struct ibmvfc_crq_queue *queue = &vhost->crq; + struct ibmvfc_crq *crq; + + crq = &queue->msgs[queue->cur]; + if (crq->valid & 0x80) { + if (++queue->cur == queue->size) + queue->cur = 0; + } else + crq = NULL; + + return crq; +} + +/** + * ibmvfc_interrupt - Interrupt handler + * @irq: number of irq to handle, not used + * @dev_instance: ibmvfc_host that received interrupt + * + * Returns: + * IRQ_HANDLED + **/ +static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance) +{ + struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance; + struct vio_dev *vdev = to_vio_dev(vhost->dev); + struct ibmvfc_crq *crq; + struct ibmvfc_async_crq *async; + unsigned long flags; + int done = 0; + + spin_lock_irqsave(vhost->host->host_lock, flags); + vio_disable_interrupts(to_vio_dev(vhost->dev)); + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvfc_next_crq(vhost)) != NULL) { + ibmvfc_handle_crq(crq, vhost); + crq->valid = 0; + } + + /* Pull all the valid messages off the async CRQ */ + while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { + ibmvfc_handle_async(async, vhost); + async->valid = 0; + } + + vio_enable_interrupts(vdev); + if ((crq = ibmvfc_next_crq(vhost)) != NULL) { + vio_disable_interrupts(vdev); + ibmvfc_handle_crq(crq, vhost); + crq->valid = 0; + } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) { + vio_disable_interrupts(vdev); + ibmvfc_handle_async(async, vhost); + crq->valid = 0; + } else + done = 1; + } + + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return IRQ_HANDLED; +} + +/** + * ibmvfc_init_tgt - Set the next init job step for the target + * @tgt: ibmvfc target struct + * @job_step: job step to perform + * + **/ +static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, + void (*job_step) (struct ibmvfc_target *)) +{ + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT); + tgt->job_step = job_step; + wake_up(&tgt->vhost->work_wait_q); +} + +/** + * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization + * @tgt: ibmvfc target struct + * @job_step: initialization job step + * + **/ +static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, + void (*job_step) (struct ibmvfc_target *)) +{ + if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + wake_up(&tgt->vhost->work_wait_q); + } else + ibmvfc_init_tgt(tgt, job_step); +} + +/** + * ibmvfc_release_tgt - Free memory allocated for a target + * @kref: kref struct + * + **/ +static void ibmvfc_release_tgt(struct kref *kref) +{ + struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref); + kfree(tgt); +} + +/** + * ibmvfc_tgt_prli_done - Completion handler for Process Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; + u32 status = rsp->common.status; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Process Login succeeded\n"); + tgt->need_login = 0; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + break; + case IBMVFC_MAD_FAILED: + default: + tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), + rsp->status, rsp->error, status); + if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + break; + }; + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_send_prli - Send a process login + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt) +{ + struct ibmvfc_process_login *prli; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(vhost); + vhost->discovery_threads++; + ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + prli = &evt->iu.prli; + memset(prli, 0, sizeof(*prli)); + prli->common.version = 1; + prli->common.opcode = IBMVFC_PROCESS_LOGIN; + prli->common.length = sizeof(*prli); + prli->scsi_id = tgt->scsi_id; + + prli->parms.type = IBMVFC_SCSI_FCP_TYPE; + prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR; + prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC; + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent process login\n"); +} + +/** + * ibmvfc_tgt_plogi_done - Completion handler for Port Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; + u32 status = rsp->common.status; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Port Login succeeded\n"); + if (tgt->ids.port_name && + tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) { + vhost->reinit = 1; + tgt_dbg(tgt, "Port re-init required\n"); + break; + } + tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name); + tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name); + tgt->ids.port_id = tgt->scsi_id; + tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET; + memcpy(&tgt->service_parms, &rsp->service_parms, + sizeof(tgt->service_parms)); + memcpy(&tgt->service_parms_change, &rsp->service_parms_change, + sizeof(tgt->service_parms_change)); + ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + break; + case IBMVFC_MAD_FAILED: + default: + tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); + + if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + break; + }; + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) +{ + struct ibmvfc_port_login *plogi; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(vhost); + vhost->discovery_threads++; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + plogi = &evt->iu.plogi; + memset(plogi, 0, sizeof(*plogi)); + plogi->common.version = 1; + plogi->common.opcode = IBMVFC_PORT_LOGIN; + plogi->common.length = sizeof(*plogi); + plogi->scsi_id = tgt->scsi_id; + + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent port login\n"); +} + +/** + * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout; + u32 status = rsp->common.status; + + vhost->discovery_threads--; + ibmvfc_free_event(evt); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Implicit Logout succeeded\n"); + break; + case IBMVFC_MAD_DRIVER_FAILED: + kref_put(&tgt->kref, ibmvfc_release_tgt); + wake_up(&vhost->work_wait_q); + return; + case IBMVFC_MAD_FAILED: + default: + tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status); + break; + }; + + if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT) + ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi); + else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS && + tgt->scsi_id != tgt->new_scsi_id) + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + kref_put(&tgt->kref, ibmvfc_release_tgt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt) +{ + struct ibmvfc_implicit_logout *mad; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(vhost); + vhost->discovery_threads++; + ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + mad = &evt->iu.implicit_logout; + memset(mad, 0, sizeof(*mad)); + mad->common.version = 1; + mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT; + mad->common.length = sizeof(*mad); + mad->old_scsi_id = tgt->scsi_id; + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Implicit Logout\n"); +} + +/** + * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; + u32 status = rsp->common.status; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Query Target succeeded\n"); + tgt->new_scsi_id = rsp->scsi_id; + if (rsp->scsi_id != tgt->scsi_id) + ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + break; + case IBMVFC_MAD_FAILED: + default: + tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); + + if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && + rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && + rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + break; + }; + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_query_target - Initiate a Query Target for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt) +{ + struct ibmvfc_query_tgt *query_tgt; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_event(vhost); + vhost->discovery_threads++; + evt->tgt = tgt; + ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT); + query_tgt = &evt->iu.query_tgt; + memset(query_tgt, 0, sizeof(*query_tgt)); + query_tgt->common.version = 1; + query_tgt->common.opcode = IBMVFC_QUERY_TARGET; + query_tgt->common.length = sizeof(*query_tgt); + query_tgt->wwpn = tgt->ids.port_name; + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Query Target\n"); +} + +/** + * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target + * @vhost: ibmvfc host struct + * @scsi_id: SCSI ID to allocate target for + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) +{ + struct ibmvfc_target *tgt; + unsigned long flags; + + spin_lock_irqsave(vhost->host->host_lock, flags); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->scsi_id == scsi_id) { + if (tgt->need_login) + ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + goto unlock_out; + } + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); + if (!tgt) { + dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n", + scsi_id); + return -ENOMEM; + } + + tgt->scsi_id = scsi_id; + tgt->new_scsi_id = scsi_id; + tgt->vhost = vhost; + tgt->need_login = 1; + kref_init(&tgt->kref); + ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + spin_lock_irqsave(vhost->host->host_lock, flags); + list_add_tail(&tgt->queue, &vhost->targets); + +unlock_out: + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; +} + +/** + * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets + * @vhost: ibmvfc host struct + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost) +{ + int i, rc; + + for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++) + rc = ibmvfc_alloc_target(vhost, + vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK); + + return rc; +} + +/** + * ibmvfc_discover_targets_done - Completion handler for discover targets MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; + u32 mad_status = rsp->common.status; + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_dbg(vhost, "Discover Targets succeeded\n"); + vhost->num_targets = rsp->num_written; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); + break; + case IBMVFC_MAD_FAILED: + dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); + ibmvfc_retry_host_init(vhost); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + default: + dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + } + + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_discover_targets - Send Discover Targets MAD + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) +{ + struct ibmvfc_discover_targets *mad; + struct ibmvfc_event *evt = ibmvfc_get_event(vhost); + + ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT); + mad = &evt->iu.discover_targets; + memset(mad, 0, sizeof(*mad)); + mad->common.version = 1; + mad->common.opcode = IBMVFC_DISC_TARGETS; + mad->common.length = sizeof(*mad); + mad->bufflen = vhost->disc_buf_sz; + mad->buffer.va = vhost->disc_buf_dma; + mad->buffer.len = vhost->disc_buf_sz; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent discover targets\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + +/** + * ibmvfc_npiv_login_done - Completion handler for NPIV Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + u32 mad_status = evt->xfer_iu->npiv_login.common.status; + struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; + unsigned int npiv_max_sectors; + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_free_event(evt); + break; + case IBMVFC_MAD_FAILED: + dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); + if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + ibmvfc_retry_host_init(vhost); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_free_event(evt); + return; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_host_init(vhost); + case IBMVFC_MAD_DRIVER_FAILED: + ibmvfc_free_event(evt); + return; + default: + dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_free_event(evt); + return; + } + + vhost->client_migrated = 0; + + if (!(rsp->flags & IBMVFC_NATIVE_FC)) { + dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n", + rsp->flags); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + wake_up(&vhost->work_wait_q); + return; + } + + if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) { + dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n", + rsp->max_cmds); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + wake_up(&vhost->work_wait_q); + return; + } + + npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); + dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", + rsp->partition_name, rsp->device_name, rsp->port_loc_code, + rsp->drc_name, npiv_max_sectors); + + fc_host_fabric_name(vhost->host) = rsp->node_name; + fc_host_node_name(vhost->host) = rsp->node_name; + fc_host_port_name(vhost->host) = rsp->port_name; + fc_host_port_id(vhost->host) = rsp->scsi_id; + fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV; + fc_host_supported_classes(vhost->host) = 0; + if (rsp->service_parms.class1_parms[0] & 0x80000000) + fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1; + if (rsp->service_parms.class2_parms[0] & 0x80000000) + fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2; + if (rsp->service_parms.class3_parms[0] & 0x80000000) + fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3; + fc_host_maxframe_size(vhost->host) = + rsp->service_parms.common.bb_rcv_sz & 0x0fff; + + vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ; + vhost->host->max_sectors = npiv_max_sectors; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_npiv_login - Sends NPIV login + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_login_mad *mad; + struct ibmvfc_event *evt = ibmvfc_get_event(vhost); + + ibmvfc_gather_partition_info(vhost); + ibmvfc_set_login_info(vhost); + ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT); + + memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info)); + mad = &evt->iu.npiv_login; + memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad)); + mad->common.version = 1; + mad->common.opcode = IBMVFC_NPIV_LOGIN; + mad->common.length = sizeof(struct ibmvfc_npiv_login_mad); + mad->buffer.va = vhost->login_buf_dma; + mad->buffer.len = sizeof(*vhost->login_buf); + + memset(vhost->async_crq.msgs, 0, PAGE_SIZE); + vhost->async_crq.cur = 0; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent NPIV login\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +}; + +/** + * ibmvfc_dev_init_to_do - Is there target initialization work to do? + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_INIT || + tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) + return 1; + } + + return 0; +} + +/** + * __ibmvfc_work_to_do - Is there task level work to do? (no locking) + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + if (kthread_should_stop()) + return 1; + switch (vhost->action) { + case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_INIT_WAIT: + return 0; + case IBMVFC_HOST_ACTION_TGT_INIT: + case IBMVFC_HOST_ACTION_QUERY_TGTS: + if (vhost->discovery_threads == disc_threads) + return 0; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_INIT) + return 1; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) + return 0; + return 1; + case IBMVFC_HOST_ACTION_INIT: + case IBMVFC_HOST_ACTION_ALLOC_TGTS: + case IBMVFC_HOST_ACTION_TGT_ADD: + case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_QUERY: + default: + break; + }; + + return 1; +} + +/** + * ibmvfc_work_to_do - Is there task level work to do? + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int ibmvfc_work_to_do(struct ibmvfc_host *vhost) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(vhost->host->host_lock, flags); + rc = __ibmvfc_work_to_do(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return rc; +} + +/** + * ibmvfc_log_ae - Log async events if necessary + * @vhost: ibmvfc host struct + * @events: events to log + * + **/ +static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) +{ + if (events & IBMVFC_AE_RSCN) + fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0); + if ((events & IBMVFC_AE_LINKDOWN) && + vhost->state >= IBMVFC_HALTED) + fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); + if ((events & IBMVFC_AE_LINKUP) && + vhost->state == IBMVFC_INITIALIZING) + fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0); +} + +/** + * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) +{ + struct ibmvfc_host *vhost = tgt->vhost; + struct fc_rport *rport; + unsigned long flags; + + tgt_dbg(tgt, "Adding rport\n"); + rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); + spin_lock_irqsave(vhost->host->host_lock, flags); + tgt->rport = rport; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + if (rport) { + tgt_dbg(tgt, "rport add succeeded\n"); + rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; + rport->supported_classes = 0; + if (tgt->service_parms.class1_parms[0] & 0x80000000) + rport->supported_classes |= FC_COS_CLASS1; + if (tgt->service_parms.class2_parms[0] & 0x80000000) + rport->supported_classes |= FC_COS_CLASS2; + if (tgt->service_parms.class3_parms[0] & 0x80000000) + rport->supported_classes |= FC_COS_CLASS3; + } else + tgt_dbg(tgt, "rport add failed\n"); + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_do_work - Do task level work + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_do_work(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + unsigned long flags; + struct fc_rport *rport; + + ibmvfc_log_ae(vhost, vhost->events_to_log); + spin_lock_irqsave(vhost->host->host_lock, flags); + vhost->events_to_log = 0; + switch (vhost->action) { + case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_INIT_WAIT: + break; + case IBMVFC_HOST_ACTION_INIT: + BUG_ON(vhost->state != IBMVFC_INITIALIZING); + vhost->job_step(vhost); + break; + case IBMVFC_HOST_ACTION_QUERY: + list_for_each_entry(tgt, &vhost->targets, queue) + ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS); + break; + case IBMVFC_HOST_ACTION_QUERY_TGTS: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_INIT) { + tgt->job_step(tgt); + break; + } + } + + if (!ibmvfc_dev_init_to_do(vhost)) + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); + break; + case IBMVFC_HOST_ACTION_TGT_DEL: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt_dbg(tgt, "Deleting rport\n"); + rport = tgt->rport; + tgt->rport = NULL; + list_del(&tgt->queue); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + if (rport) + fc_remote_port_delete(rport); + kref_put(&tgt->kref, ibmvfc_release_tgt); + return; + } + } + + if (vhost->state == IBMVFC_INITIALIZING) { + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + vhost->job_step = ibmvfc_discover_targets; + } else { + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); + wake_up(&vhost->init_wait_q); + return; + } + break; + case IBMVFC_HOST_ACTION_ALLOC_TGTS: + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_alloc_targets(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); + break; + case IBMVFC_HOST_ACTION_TGT_INIT: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_INIT) { + tgt->job_step(tgt); + break; + } + } + + if (!ibmvfc_dev_init_to_do(vhost)) { + ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); + vhost->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); + return; + } + break; + case IBMVFC_HOST_ACTION_TGT_ADD: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_tgt_add_rport(tgt); + return; + } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt_dbg(tgt, "Deleting rport\n"); + rport = tgt->rport; + tgt->rport = NULL; + list_del(&tgt->queue); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + if (rport) + fc_remote_port_delete(rport); + kref_put(&tgt->kref, ibmvfc_release_tgt); + return; + } + } + + if (vhost->reinit) { + vhost->reinit = 0; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + } else { + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + wake_up(&vhost->init_wait_q); + } + break; + default: + break; + }; + + spin_unlock_irqrestore(vhost->host->host_lock, flags); +} + +/** + * ibmvfc_work - Do task level work + * @data: ibmvfc host struct + * + * Returns: + * zero + **/ +static int ibmvfc_work(void *data) +{ + struct ibmvfc_host *vhost = data; + int rc; + + set_user_nice(current, -20); + + while (1) { + rc = wait_event_interruptible(vhost->work_wait_q, + ibmvfc_work_to_do(vhost)); + + BUG_ON(rc); + + if (kthread_should_stop()) + break; + + ibmvfc_do_work(vhost); + } + + ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n"); + return 0; +} + +/** + * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor + * @vhost: ibmvfc host struct + * + * Allocates a page for messages, maps it for dma, and registers + * the crq with the hypervisor. + * + * Return value: + * zero on success / other on failure + **/ +static int ibmvfc_init_crq(struct ibmvfc_host *vhost) +{ + int rc, retrc = -ENOMEM; + struct device *dev = vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + struct ibmvfc_crq_queue *crq = &vhost->crq; + + ENTER; + crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL); + + if (!crq->msgs) + return -ENOMEM; + + crq->size = PAGE_SIZE / sizeof(*crq->msgs); + crq->msg_token = dma_map_single(dev, crq->msgs, + PAGE_SIZE, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(crq->msg_token)) + goto map_failed; + + retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, + crq->msg_token, PAGE_SIZE); + + if (rc == H_RESOURCE) + /* maybe kexecing and resource is busy. try a reset */ + retrc = rc = ibmvfc_reset_crq(vhost); + + if (rc == H_CLOSED) + dev_warn(dev, "Partner adapter not ready\n"); + else if (rc) { + dev_warn(dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + retrc = 0; + + if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) { + dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc); + goto req_irq_failed; + } + + if ((rc = vio_enable_interrupts(vdev))) { + dev_err(dev, "Error %d enabling interrupts\n", rc); + goto req_irq_failed; + } + + crq->cur = 0; + LEAVE; + return retrc; + +req_irq_failed: + do { + rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_crq_failed: + dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); +map_failed: + free_page((unsigned long)crq->msgs); + return retrc; +} + +/** + * ibmvfc_free_mem - Free memory for vhost + * @vhost: ibmvfc host struct + * + * Return value: + * none + **/ +static void ibmvfc_free_mem(struct ibmvfc_host *vhost) +{ + struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq; + + ENTER; + mempool_destroy(vhost->tgt_pool); + kfree(vhost->trace); + dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf, + vhost->disc_buf_dma); + dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), + vhost->login_buf, vhost->login_buf_dma); + dma_pool_destroy(vhost->sg_pool); + dma_unmap_single(vhost->dev, async_q->msg_token, + async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL); + free_page((unsigned long)async_q->msgs); + LEAVE; +} + +/** + * ibmvfc_alloc_mem - Allocate memory for vhost + * @vhost: ibmvfc host struct + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) +{ + struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq; + struct device *dev = vhost->dev; + + ENTER; + async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL); + if (!async_q->msgs) { + dev_err(dev, "Couldn't allocate async queue.\n"); + goto nomem; + } + + async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq); + async_q->msg_token = dma_map_single(dev, async_q->msgs, + async_q->size * sizeof(*async_q->msgs), + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(async_q->msg_token)) { + dev_err(dev, "Failed to map async queue\n"); + goto free_async_crq; + } + + vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev, + SG_ALL * sizeof(struct srp_direct_buf), + sizeof(struct srp_direct_buf), 0); + + if (!vhost->sg_pool) { + dev_err(dev, "Failed to allocate sg pool\n"); + goto unmap_async_crq; + } + + vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf), + &vhost->login_buf_dma, GFP_KERNEL); + + if (!vhost->login_buf) { + dev_err(dev, "Couldn't allocate NPIV login buffer\n"); + goto free_sg_pool; + } + + vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets; + vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz, + &vhost->disc_buf_dma, GFP_KERNEL); + + if (!vhost->disc_buf) { + dev_err(dev, "Couldn't allocate Discover Targets buffer\n"); + goto free_login_buffer; + } + + vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES, + sizeof(struct ibmvfc_trace_entry), GFP_KERNEL); + + if (!vhost->trace) + goto free_disc_buffer; + + vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ, + sizeof(struct ibmvfc_target)); + + if (!vhost->tgt_pool) { + dev_err(dev, "Couldn't allocate target memory pool\n"); + goto free_trace; + } + + LEAVE; + return 0; + +free_trace: + kfree(vhost->trace); +free_disc_buffer: + dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf, + vhost->disc_buf_dma); +free_login_buffer: + dma_free_coherent(dev, sizeof(*vhost->login_buf), + vhost->login_buf, vhost->login_buf_dma); +free_sg_pool: + dma_pool_destroy(vhost->sg_pool); +unmap_async_crq: + dma_unmap_single(dev, async_q->msg_token, + async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL); +free_async_crq: + free_page((unsigned long)async_q->msgs); +nomem: + LEAVE; + return -ENOMEM; +} + +/** + * ibmvfc_probe - Adapter hot plug add entry point + * @vdev: vio device struct + * @id: vio device id struct + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct ibmvfc_host *vhost; + struct Scsi_Host *shost; + struct device *dev = &vdev->dev; + int rc = -ENOMEM; + + ENTER; + shost = scsi_host_alloc(&driver_template, sizeof(*vhost)); + if (!shost) { + dev_err(dev, "Couldn't allocate host data\n"); + goto out; + } + + shost->transportt = ibmvfc_transport_template; + shost->can_queue = max_requests; + shost->max_lun = max_lun; + shost->max_id = max_targets; + shost->max_sectors = IBMVFC_MAX_SECTORS; + shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; + shost->unique_id = shost->host_no; + + vhost = shost_priv(shost); + INIT_LIST_HEAD(&vhost->sent); + INIT_LIST_HEAD(&vhost->free); + INIT_LIST_HEAD(&vhost->targets); + sprintf(vhost->name, IBMVFC_NAME); + vhost->host = shost; + vhost->dev = dev; + vhost->partition_number = -1; + vhost->log_level = log_level; + strcpy(vhost->partition_name, "UNKNOWN"); + init_waitqueue_head(&vhost->work_wait_q); + init_waitqueue_head(&vhost->init_wait_q); + + if ((rc = ibmvfc_alloc_mem(vhost))) + goto free_scsi_host; + + vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME, + shost->host_no); + + if (IS_ERR(vhost->work_thread)) { + dev_err(dev, "Couldn't create kernel thread: %ld\n", + PTR_ERR(vhost->work_thread)); + goto free_host_mem; + } + + if ((rc = ibmvfc_init_crq(vhost))) { + dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); + goto kill_kthread; + } + + if ((rc = ibmvfc_init_event_pool(vhost))) { + dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc); + goto release_crq; + } + + if ((rc = scsi_add_host(shost, dev))) + goto release_event_pool; + + if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj, + &ibmvfc_trace_attr))) { + dev_err(dev, "Failed to create trace file. rc=%d\n", rc); + goto remove_shost; + } + + dev_set_drvdata(dev, vhost); + spin_lock(&ibmvfc_driver_lock); + list_add_tail(&vhost->queue, &ibmvfc_head); + spin_unlock(&ibmvfc_driver_lock); + + ibmvfc_send_crq_init(vhost); + scsi_scan_host(shost); + return 0; + +remove_shost: + scsi_remove_host(shost); +release_event_pool: + ibmvfc_free_event_pool(vhost); +release_crq: + ibmvfc_release_crq_queue(vhost); +kill_kthread: + kthread_stop(vhost->work_thread); +free_host_mem: + ibmvfc_free_mem(vhost); +free_scsi_host: + scsi_host_put(shost); +out: + LEAVE; + return rc; +} + +/** + * ibmvfc_remove - Adapter hot plug remove entry point + * @vdev: vio device struct + * + * Return value: + * 0 + **/ +static int ibmvfc_remove(struct vio_dev *vdev) +{ + struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev); + unsigned long flags; + + ENTER; + ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); + kthread_stop(vhost->work_thread); + fc_remove_host(vhost->host); + scsi_remove_host(vhost->host); + ibmvfc_release_crq_queue(vhost); + + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_purge_requests(vhost, DID_ERROR); + ibmvfc_free_event_pool(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + ibmvfc_free_mem(vhost); + spin_lock(&ibmvfc_driver_lock); + list_del(&vhost->queue); + spin_unlock(&ibmvfc_driver_lock); + scsi_host_put(vhost->host); + LEAVE; + return 0; +} + +static struct vio_device_id ibmvfc_device_table[] __devinitdata = { + {"fcp", "IBM,vfc-client"}, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); + +static struct vio_driver ibmvfc_driver = { + .id_table = ibmvfc_device_table, + .probe = ibmvfc_probe, + .remove = ibmvfc_remove, + .driver = { + .name = IBMVFC_NAME, + .owner = THIS_MODULE, + } +}; + +static struct fc_function_template ibmvfc_transport_functions = { + .show_host_fabric_name = 1, + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_port_type = 1, + .show_host_port_id = 1, + + .get_host_port_state = ibmvfc_get_host_port_state, + .show_host_port_state = 1, + + .get_host_speed = ibmvfc_get_host_speed, + .show_host_speed = 1, + + .issue_fc_host_lip = ibmvfc_issue_fc_host_lip, + .terminate_rport_io = ibmvfc_terminate_rport_io, + + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .get_starget_node_name = ibmvfc_get_starget_node_name, + .show_starget_node_name = 1, + + .get_starget_port_name = ibmvfc_get_starget_port_name, + .show_starget_port_name = 1, + + .get_starget_port_id = ibmvfc_get_starget_port_id, + .show_starget_port_id = 1, +}; + +/** + * ibmvfc_module_init - Initialize the ibmvfc module + * + * Return value: + * 0 on success / other on failure + **/ +static int __init ibmvfc_module_init(void) +{ + int rc; + + if (!firmware_has_feature(FW_FEATURE_VIO)) + return -ENODEV; + + printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", + IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); + + ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); + if (!ibmvfc_transport_template) + return -ENOMEM; + + rc = vio_register_driver(&ibmvfc_driver); + if (rc) + fc_release_transport(ibmvfc_transport_template); + return rc; +} + +/** + * ibmvfc_module_exit - Teardown the ibmvfc module + * + * Return value: + * nothing + **/ +static void __exit ibmvfc_module_exit(void) +{ + vio_unregister_driver(&ibmvfc_driver); + fc_release_transport(ibmvfc_transport_template); +} + +module_init(ibmvfc_module_init); +module_exit(ibmvfc_module_exit); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h new file mode 100644 index 000000000000..057f3c01ed61 --- /dev/null +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -0,0 +1,682 @@ +/* + * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter + * + * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation + * + * Copyright (C) IBM Corporation, 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _IBMVFC_H +#define _IBMVFC_H + +#include <linux/list.h> +#include <linux/types.h> +#include "viosrp.h" + +#define IBMVFC_NAME "ibmvfc" +#define IBMVFC_DRIVER_VERSION "1.0.0" +#define IBMVFC_DRIVER_DATE "(July 1, 2008)" + +#define IBMVFC_DEFAULT_TIMEOUT 15 +#define IBMVFC_INIT_TIMEOUT 30 +#define IBMVFC_MAX_REQUESTS_DEFAULT 100 + +#define IBMVFC_DEBUG 0 +#define IBMVFC_MAX_TARGETS 1024 +#define IBMVFC_MAX_LUN 0xffffffff +#define IBMVFC_MAX_SECTORS 0xffffu +#define IBMVFC_MAX_DISC_THREADS 4 +#define IBMVFC_TGT_MEMPOOL_SZ 64 +#define IBMVFC_MAX_CMDS_PER_LUN 64 +#define IBMVFC_MAX_INIT_RETRIES 3 +#define IBMVFC_DEV_LOSS_TMO (5 * 60) +#define IBMVFC_DEFAULT_LOG_LEVEL 2 +#define IBMVFC_MAX_CDB_LEN 16 + +/* + * Ensure we have resources for ERP and initialization: + * 1 for ERP + * 1 for initialization + * 1 for each discovery thread + */ +#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + disc_threads) + +#define IBMVFC_MAD_SUCCESS 0x00 +#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 +#define IBMVFC_MAD_FAILED 0xF7 +#define IBMVFC_MAD_DRIVER_FAILED 0xEE +#define IBMVFC_MAD_CRQ_ERROR 0xEF + +enum ibmvfc_crq_valid { + IBMVFC_CRQ_CMD_RSP = 0x80, + IBMVFC_CRQ_INIT_RSP = 0xC0, + IBMVFC_CRQ_XPORT_EVENT = 0xFF, +}; + +enum ibmvfc_crq_format { + IBMVFC_CRQ_INIT = 0x01, + IBMVFC_CRQ_INIT_COMPLETE = 0x02, + IBMVFC_PARTITION_MIGRATED = 0x06, +}; + +enum ibmvfc_cmd_status_flags { + IBMVFC_FABRIC_MAPPED = 0x0001, + IBMVFC_VIOS_FAILURE = 0x0002, + IBMVFC_FC_FAILURE = 0x0004, + IBMVFC_FC_SCSI_ERROR = 0x0008, + IBMVFC_HW_EVENT_LOGGED = 0x0010, + IBMVFC_VIOS_LOGGED = 0x0020, +}; + +enum ibmvfc_fabric_mapped_errors { + IBMVFC_UNABLE_TO_ESTABLISH = 0x0001, + IBMVFC_XPORT_FAULT = 0x0002, + IBMVFC_CMD_TIMEOUT = 0x0003, + IBMVFC_ENETDOWN = 0x0004, + IBMVFC_HW_FAILURE = 0x0005, + IBMVFC_LINK_DOWN_ERR = 0x0006, + IBMVFC_LINK_DEAD_ERR = 0x0007, + IBMVFC_UNABLE_TO_REGISTER = 0x0008, + IBMVFC_XPORT_BUSY = 0x000A, + IBMVFC_XPORT_DEAD = 0x000B, + IBMVFC_CONFIG_ERROR = 0x000C, + IBMVFC_NAME_SERVER_FAIL = 0x000D, + IBMVFC_LINK_HALTED = 0x000E, + IBMVFC_XPORT_GENERAL = 0x8000, +}; + +enum ibmvfc_vios_errors { + IBMVFC_CRQ_FAILURE = 0x0001, + IBMVFC_SW_FAILURE = 0x0002, + IBMVFC_INVALID_PARAMETER = 0x0003, + IBMVFC_MISSING_PARAMETER = 0x0004, + IBMVFC_HOST_IO_BUS = 0x0005, + IBMVFC_TRANS_CANCELLED = 0x0006, + IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007, + IBMVFC_INSUFFICIENT_RESOURCE = 0x0008, + IBMVFC_COMMAND_FAILED = 0x8000, +}; + +enum ibmvfc_mad_types { + IBMVFC_NPIV_LOGIN = 0x0001, + IBMVFC_DISC_TARGETS = 0x0002, + IBMVFC_PORT_LOGIN = 0x0004, + IBMVFC_PROCESS_LOGIN = 0x0008, + IBMVFC_QUERY_TARGET = 0x0010, + IBMVFC_IMPLICIT_LOGOUT = 0x0040, + IBMVFC_TMF_MAD = 0x0100, +}; + +struct ibmvfc_mad_common { + u32 version; + u32 reserved; + u32 opcode; + u16 status; + u16 length; + u64 tag; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_npiv_login_mad { + struct ibmvfc_mad_common common; + struct srp_direct_buf buffer; +}__attribute__((packed, aligned (8))); + +#define IBMVFC_MAX_NAME 256 + +struct ibmvfc_npiv_login { + u32 ostype; +#define IBMVFC_OS_LINUX 0x02 + u32 pad; + u64 max_dma_len; + u32 max_payload; + u32 max_response; + u32 partition_num; + u32 vfc_frame_version; + u16 fcp_version; + u16 flags; +#define IBMVFC_CLIENT_MIGRATED 0x01 +#define IBMVFC_FLUSH_ON_HALT 0x02 + u32 max_cmds; + u64 capabilities; +#define IBMVFC_CAN_MIGRATE 0x01 + u64 node_name; + struct srp_direct_buf async; + u8 partition_name[IBMVFC_MAX_NAME]; + u8 device_name[IBMVFC_MAX_NAME]; + u8 drc_name[IBMVFC_MAX_NAME]; + u64 reserved2[2]; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_common_svc_parms { + u16 fcph_version; + u16 b2b_credit; + u16 features; + u16 bb_rcv_sz; /* upper nibble is BB_SC_N */ + u32 ratov; + u32 edtov; +}__attribute__((packed, aligned (4))); + +struct ibmvfc_service_parms { + struct ibmvfc_common_svc_parms common; + u8 port_name[8]; + u8 node_name[8]; + u32 class1_parms[4]; + u32 class2_parms[4]; + u32 class3_parms[4]; + u32 obsolete[4]; + u32 vendor_version[4]; + u32 services_avail[2]; + u32 ext_len; + u32 reserved[30]; + u32 clk_sync_qos[2]; +}__attribute__((packed, aligned (4))); + +struct ibmvfc_npiv_login_resp { + u32 version; + u16 status; + u16 error; + u32 flags; +#define IBMVFC_NATIVE_FC 0x01 +#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 + u32 reserved; + u64 capabilites; + u32 max_cmds; + u32 scsi_id_sz; + u64 max_dma_len; + u64 scsi_id; + u64 port_name; + u64 node_name; + u64 link_speed; + u8 partition_name[IBMVFC_MAX_NAME]; + u8 device_name[IBMVFC_MAX_NAME]; + u8 port_loc_code[IBMVFC_MAX_NAME]; + u8 drc_name[IBMVFC_MAX_NAME]; + struct ibmvfc_service_parms service_parms; + u64 reserved2; +}__attribute__((packed, aligned (8))); + +union ibmvfc_npiv_login_data { + struct ibmvfc_npiv_login login; + struct ibmvfc_npiv_login_resp resp; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_discover_targets_buf { + u32 scsi_id[1]; +#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff +}; + +struct ibmvfc_discover_targets { + struct ibmvfc_mad_common common; + struct srp_direct_buf buffer; + u32 flags; + u16 status; + u16 error; + u32 bufflen; + u32 num_avail; + u32 num_written; + u64 reserved[2]; +}__attribute__((packed, aligned (8))); + +enum ibmvfc_fc_reason { + IBMVFC_INVALID_ELS_CMD_CODE = 0x01, + IBMVFC_INVALID_VERSION = 0x02, + IBMVFC_LOGICAL_ERROR = 0x03, + IBMVFC_INVALID_CT_IU_SIZE = 0x04, + IBMVFC_LOGICAL_BUSY = 0x05, + IBMVFC_PROTOCOL_ERROR = 0x07, + IBMVFC_UNABLE_TO_PERFORM_REQ = 0x09, + IBMVFC_CMD_NOT_SUPPORTED = 0x0B, + IBMVFC_SERVER_NOT_AVAIL = 0x0D, + IBMVFC_CMD_IN_PROGRESS = 0x0E, + IBMVFC_VENDOR_SPECIFIC = 0xFF, +}; + +enum ibmvfc_fc_type { + IBMVFC_FABRIC_REJECT = 0x01, + IBMVFC_PORT_REJECT = 0x02, + IBMVFC_LS_REJECT = 0x03, + IBMVFC_FABRIC_BUSY = 0x04, + IBMVFC_PORT_BUSY = 0x05, + IBMVFC_BASIC_REJECT = 0x06, +}; + +enum ibmvfc_gs_explain { + IBMVFC_PORT_NAME_NOT_REG = 0x02, +}; + +struct ibmvfc_port_login { + struct ibmvfc_mad_common common; + u64 scsi_id; + u16 reserved; + u16 fc_service_class; + u32 blksz; + u32 hdr_per_blk; + u16 status; + u16 error; /* also fc_reason */ + u16 fc_explain; + u16 fc_type; + u32 reserved2; + struct ibmvfc_service_parms service_parms; + struct ibmvfc_service_parms service_parms_change; + u64 reserved3[2]; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_prli_svc_parms { + u8 type; +#define IBMVFC_SCSI_FCP_TYPE 0x08 + u8 type_ext; + u16 flags; +#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000 +#define IBMVFC_PRLI_RESP_PA_VALID 0x4000 +#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000 + u32 orig_pa; + u32 resp_pa; + u32 service_parms; +#define IBMVFC_PRLI_TASK_RETRY 0x00000200 +#define IBMVFC_PRLI_RETRY 0x00000100 +#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040 +#define IBMVFC_PRLI_INITIATOR_FUNC 0x00000020 +#define IBMVFC_PRLI_TARGET_FUNC 0x00000010 +#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002 +#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001 +}__attribute__((packed, aligned (4))); + +struct ibmvfc_process_login { + struct ibmvfc_mad_common common; + u64 scsi_id; + struct ibmvfc_prli_svc_parms parms; + u8 reserved[48]; + u16 status; + u16 error; /* also fc_reason */ + u32 reserved2; + u64 reserved3[2]; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_query_tgt { + struct ibmvfc_mad_common common; + u64 wwpn; + u64 scsi_id; + u16 status; + u16 error; + u16 fc_explain; + u16 fc_type; + u64 reserved[2]; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_implicit_logout { + struct ibmvfc_mad_common common; + u64 old_scsi_id; + u64 reserved[2]; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_tmf { + struct ibmvfc_mad_common common; + u64 scsi_id; + struct scsi_lun lun; + u32 flags; +#define IBMVFC_TMF_ABORT_TASK 0x02 +#define IBMVFC_TMF_ABORT_TASK_SET 0x04 +#define IBMVFC_TMF_LUN_RESET 0x10 +#define IBMVFC_TMF_TGT_RESET 0x20 +#define IBMVFC_TMF_LUA_VALID 0x40 + u32 cancel_key; + u32 my_cancel_key; +#define IBMVFC_TMF_CANCEL_KEY 0x80000000 + u32 pad; + u64 reserved[2]; +}__attribute__((packed, aligned (8))); + +enum ibmvfc_fcp_rsp_info_codes { + RSP_NO_FAILURE = 0x00, + RSP_TMF_REJECTED = 0x04, + RSP_TMF_FAILED = 0x05, + RSP_TMF_INVALID_LUN = 0x09, +}; + +struct ibmvfc_fcp_rsp_info { + u16 reserved; + u8 rsp_code; + u8 reserved2[4]; +}__attribute__((packed, aligned (2))); + +enum ibmvfc_fcp_rsp_flags { + FCP_BIDI_RSP = 0x80, + FCP_BIDI_READ_RESID_UNDER = 0x40, + FCP_BIDI_READ_RESID_OVER = 0x20, + FCP_CONF_REQ = 0x10, + FCP_RESID_UNDER = 0x08, + FCP_RESID_OVER = 0x04, + FCP_SNS_LEN_VALID = 0x02, + FCP_RSP_LEN_VALID = 0x01, +}; + +union ibmvfc_fcp_rsp_data { + struct ibmvfc_fcp_rsp_info info; + u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)]; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_fcp_rsp { + u64 reserved; + u16 retry_delay_timer; + u8 flags; + u8 scsi_status; + u32 fcp_resid; + u32 fcp_sense_len; + u32 fcp_rsp_len; + union ibmvfc_fcp_rsp_data data; +}__attribute__((packed, aligned (8))); + +enum ibmvfc_cmd_flags { + IBMVFC_SCATTERLIST = 0x0001, + IBMVFC_NO_MEM_DESC = 0x0002, + IBMVFC_READ = 0x0004, + IBMVFC_WRITE = 0x0008, + IBMVFC_TMF = 0x0080, + IBMVFC_CLASS_3_ERR = 0x0100, +}; + +enum ibmvfc_fc_task_attr { + IBMVFC_SIMPLE_TASK = 0x00, + IBMVFC_HEAD_OF_QUEUE = 0x01, + IBMVFC_ORDERED_TASK = 0x02, + IBMVFC_ACA_TASK = 0x04, +}; + +enum ibmvfc_fc_tmf_flags { + IBMVFC_ABORT_TASK_SET = 0x02, + IBMVFC_LUN_RESET = 0x10, + IBMVFC_TARGET_RESET = 0x20, +}; + +struct ibmvfc_fcp_cmd_iu { + struct scsi_lun lun; + u8 crn; + u8 pri_task_attr; + u8 tmf_flags; + u8 add_cdb_len; +#define IBMVFC_RDDATA 0x02 +#define IBMVFC_WRDATA 0x01 + u8 cdb[IBMVFC_MAX_CDB_LEN]; + u32 xfer_len; +}__attribute__((packed, aligned (4))); + +struct ibmvfc_cmd { + u64 task_tag; + u32 frame_type; + u32 payload_len; + u32 resp_len; + u32 adapter_resid; + u16 status; + u16 error; + u16 flags; + u16 response_flags; +#define IBMVFC_ADAPTER_RESID_VALID 0x01 + u32 cancel_key; + u32 exchange_id; + struct srp_direct_buf ext_func; + struct srp_direct_buf ioba; + struct srp_direct_buf resp; + u64 correlation; + u64 tgt_scsi_id; + u64 tag; + u64 reserved3[2]; + struct ibmvfc_fcp_cmd_iu iu; + struct ibmvfc_fcp_rsp rsp; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_trace_start_entry { + u32 xfer_len; +}__attribute__((packed)); + +struct ibmvfc_trace_end_entry { + u16 status; + u16 error; + u8 fcp_rsp_flags; + u8 rsp_code; + u8 scsi_status; + u8 reserved; +}__attribute__((packed)); + +struct ibmvfc_trace_entry { + struct ibmvfc_event *evt; + u32 time; + u32 scsi_id; + u32 lun; + u8 fmt; + u8 op_code; + u8 tmf_flags; + u8 type; +#define IBMVFC_TRC_START 0x00 +#define IBMVFC_TRC_END 0xff + union { + struct ibmvfc_trace_start_entry start; + struct ibmvfc_trace_end_entry end; + } u; +}__attribute__((packed, aligned (8))); + +enum ibmvfc_crq_formats { + IBMVFC_CMD_FORMAT = 0x01, + IBMVFC_ASYNC_EVENT = 0x02, + IBMVFC_MAD_FORMAT = 0x04, +}; + +enum ibmvfc_async_event { + IBMVFC_AE_ELS_PLOGI = 0x0001, + IBMVFC_AE_ELS_LOGO = 0x0002, + IBMVFC_AE_ELS_PRLO = 0x0004, + IBMVFC_AE_SCN_NPORT = 0x0008, + IBMVFC_AE_SCN_GROUP = 0x0010, + IBMVFC_AE_SCN_DOMAIN = 0x0020, + IBMVFC_AE_SCN_FABRIC = 0x0040, + IBMVFC_AE_LINK_UP = 0x0080, + IBMVFC_AE_LINK_DOWN = 0x0100, + IBMVFC_AE_LINK_DEAD = 0x0200, + IBMVFC_AE_HALT = 0x0400, + IBMVFC_AE_RESUME = 0x0800, + IBMVFC_AE_ADAPTER_FAILED = 0x1000, +}; + +struct ibmvfc_crq { + u8 valid; + u8 format; + u8 reserved[6]; + u64 ioba; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_crq_queue { + struct ibmvfc_crq *msgs; + int size, cur; + dma_addr_t msg_token; +}; + +struct ibmvfc_async_crq { + u8 valid; + u8 pad[3]; + u32 pad2; + u64 event; + u64 scsi_id; + u64 wwpn; + u64 node_name; + u64 reserved; +}__attribute__((packed, aligned (8))); + +struct ibmvfc_async_crq_queue { + struct ibmvfc_async_crq *msgs; + int size, cur; + dma_addr_t msg_token; +}; + +union ibmvfc_iu { + struct ibmvfc_mad_common mad_common; + struct ibmvfc_npiv_login_mad npiv_login; + struct ibmvfc_discover_targets discover_targets; + struct ibmvfc_port_login plogi; + struct ibmvfc_process_login prli; + struct ibmvfc_query_tgt query_tgt; + struct ibmvfc_implicit_logout implicit_logout; + struct ibmvfc_tmf tmf; + struct ibmvfc_cmd cmd; +}__attribute__((packed, aligned (8))); + +enum ibmvfc_target_action { + IBMVFC_TGT_ACTION_NONE = 0, + IBMVFC_TGT_ACTION_INIT, + IBMVFC_TGT_ACTION_INIT_WAIT, + IBMVFC_TGT_ACTION_ADD_RPORT, + IBMVFC_TGT_ACTION_DEL_RPORT, +}; + +struct ibmvfc_target { + struct list_head queue; + struct ibmvfc_host *vhost; + u64 scsi_id; + u64 new_scsi_id; + struct fc_rport *rport; + int target_id; + enum ibmvfc_target_action action; + int need_login; + int init_retries; + struct ibmvfc_service_parms service_parms; + struct ibmvfc_service_parms service_parms_change; + struct fc_rport_identifiers ids; + void (*job_step) (struct ibmvfc_target *); + struct kref kref; +}; + +/* a unit of work for the hosting partition */ +struct ibmvfc_event { + struct list_head queue; + struct ibmvfc_host *vhost; + struct ibmvfc_target *tgt; + struct scsi_cmnd *cmnd; + atomic_t free; + union ibmvfc_iu *xfer_iu; + void (*done) (struct ibmvfc_event *); + struct ibmvfc_crq crq; + union ibmvfc_iu iu; + union ibmvfc_iu *sync_iu; + struct srp_direct_buf *ext_list; + dma_addr_t ext_list_token; + struct completion comp; + struct timer_list timer; +}; + +/* a pool of event structs for use */ +struct ibmvfc_event_pool { + struct ibmvfc_event *events; + u32 size; + union ibmvfc_iu *iu_storage; + dma_addr_t iu_token; +}; + +enum ibmvfc_host_action { + IBMVFC_HOST_ACTION_NONE = 0, + IBMVFC_HOST_ACTION_INIT, + IBMVFC_HOST_ACTION_INIT_WAIT, + IBMVFC_HOST_ACTION_QUERY, + IBMVFC_HOST_ACTION_QUERY_TGTS, + IBMVFC_HOST_ACTION_TGT_DEL, + IBMVFC_HOST_ACTION_ALLOC_TGTS, + IBMVFC_HOST_ACTION_TGT_INIT, + IBMVFC_HOST_ACTION_TGT_ADD, +}; + +enum ibmvfc_host_state { + IBMVFC_NO_CRQ = 0, + IBMVFC_INITIALIZING, + IBMVFC_ACTIVE, + IBMVFC_HALTED, + IBMVFC_LINK_DOWN, + IBMVFC_LINK_DEAD, + IBMVFC_HOST_OFFLINE, +}; + +struct ibmvfc_host { + char name[8]; + struct list_head queue; + struct Scsi_Host *host; + enum ibmvfc_host_state state; + enum ibmvfc_host_action action; +#define IBMVFC_NUM_TRACE_INDEX_BITS 8 +#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS) +#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES) + struct ibmvfc_trace_entry *trace; + u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS; + int num_targets; + struct list_head targets; + struct list_head sent; + struct list_head free; + struct device *dev; + struct ibmvfc_event_pool pool; + struct dma_pool *sg_pool; + mempool_t *tgt_pool; + struct ibmvfc_crq_queue crq; + struct ibmvfc_async_crq_queue async_crq; + struct ibmvfc_npiv_login login_info; + union ibmvfc_npiv_login_data *login_buf; + dma_addr_t login_buf_dma; + int disc_buf_sz; + int log_level; + struct ibmvfc_discover_targets_buf *disc_buf; + int task_set; + int init_retries; + int discovery_threads; + int client_migrated; + int reinit; + int events_to_log; +#define IBMVFC_AE_LINKUP 0x0001 +#define IBMVFC_AE_LINKDOWN 0x0002 +#define IBMVFC_AE_RSCN 0x0004 + dma_addr_t disc_buf_dma; + unsigned int partition_number; + char partition_name[97]; + void (*job_step) (struct ibmvfc_host *); + struct task_struct *work_thread; + wait_queue_head_t init_wait_q; + wait_queue_head_t work_wait_q; +}; + +#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0) + +#define tgt_dbg(t, fmt, ...) \ + DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) + +#define tgt_err(t, fmt, ...) \ + dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) + +#define ibmvfc_dbg(vhost, ...) \ + DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) + +#define ibmvfc_log(vhost, level, ...) \ + do { \ + if (level >= (vhost)->log_level) \ + dev_err((vhost)->dev, ##__VA_ARGS__); \ + } while (0) + +#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__)) +#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__)) + +#ifdef CONFIG_SCSI_IBMVFC_TRACE +#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ibmvfc_create_trace_file(kobj, attr) 0 +#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0) +#endif + +#endif diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 683bce375c74..f843c1383a4b 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -258,19 +258,6 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) return ide_stopped; } -static ide_startstop_t -idescsi_atapi_abort(ide_drive_t *drive, struct request *rq) -{ - debug_log("%s called for %lu\n", __func__, - ((struct ide_atapi_pc *) rq->special)->scsi_cmd->serial_number); - - rq->errors |= ERROR_MAX; - - idescsi_end_request(drive, 0, 0); - - return ide_stopped; -} - static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs) { idescsi_scsi_t *scsi = drive_to_idescsi(drive); @@ -524,7 +511,6 @@ static ide_driver_t idescsi_driver = { .do_request = idescsi_do_request, .end_request = idescsi_end_request, .error = idescsi_atapi_error, - .abort = idescsi_atapi_abort, #ifdef CONFIG_IDE_PROC_FS .proc = idescsi_proc, #endif diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 72b9b2a0eba3..2a2f0094570f 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -64,6 +64,10 @@ MODULE_LICENSE("GPL"); #define BUG_ON(expr) #endif +static struct scsi_transport_template *iscsi_tcp_scsi_transport; +static struct scsi_host_template iscsi_sht; +static struct iscsi_transport iscsi_tcp_transport; + static unsigned int iscsi_max_lun = 512; module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); @@ -494,39 +498,43 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) * must be called with session lock */ static void -iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task) { - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_r2t_info *r2t; - /* flush ctask's r2t queues */ - while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + /* nothing to do for mgmt tasks */ + if (!task->sc) + return; + + /* flush task's r2t queues */ + while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { + __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); - debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); + debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n"); } - r2t = tcp_ctask->r2t; + r2t = tcp_task->r2t; if (r2t != NULL) { - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); - tcp_ctask->r2t = NULL; + tcp_task->r2t = NULL; } } /** * iscsi_data_rsp - SCSI Data-In Response processing * @conn: iscsi connection - * @ctask: scsi command task + * @task: scsi command task **/ static int -iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; struct iscsi_session *session = conn->session; - struct scsi_cmnd *sc = ctask->sc; + struct scsi_cmnd *sc = task->sc; int datasn = be32_to_cpu(rhdr->datasn); unsigned total_in_length = scsi_in(sc)->length; @@ -534,18 +542,18 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (tcp_conn->in.datalen == 0) return 0; - if (tcp_ctask->exp_datasn != datasn) { - debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n", - __FUNCTION__, tcp_ctask->exp_datasn, datasn); + if (tcp_task->exp_datasn != datasn) { + debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n", + __func__, tcp_task->exp_datasn, datasn); return ISCSI_ERR_DATASN; } - tcp_ctask->exp_datasn++; + tcp_task->exp_datasn++; - tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); - if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) { + tcp_task->data_offset = be32_to_cpu(rhdr->offset); + if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) { debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n", - __FUNCTION__, tcp_ctask->data_offset, + __func__, tcp_task->data_offset, tcp_conn->in.datalen, total_in_length); return ISCSI_ERR_DATA_OFFSET; } @@ -574,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) /** * iscsi_solicit_data_init - initialize first Data-Out * @conn: iscsi connection - * @ctask: scsi command task + * @task: scsi command task * @r2t: R2T info * * Notes: @@ -584,7 +592,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) * This function is called with connection lock taken. **/ static void -iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, +iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task, struct iscsi_r2t_info *r2t) { struct iscsi_data *hdr; @@ -595,8 +603,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, hdr->datasn = cpu_to_be32(r2t->solicit_datasn); r2t->solicit_datasn++; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; - memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); - hdr->itt = ctask->hdr->itt; + memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); + hdr->itt = task->hdr->itt; hdr->exp_statsn = r2t->exp_statsn; hdr->offset = cpu_to_be32(r2t->data_offset); if (r2t->data_length > conn->max_xmit_dlength) { @@ -616,14 +624,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, /** * iscsi_r2t_rsp - iSCSI R2T Response processing * @conn: iscsi connection - * @ctask: scsi command task + * @task: scsi command task **/ static int -iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) { struct iscsi_r2t_info *r2t; struct iscsi_session *session = conn->session; - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; int r2tsn = be32_to_cpu(rhdr->r2tsn); @@ -636,23 +644,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) return ISCSI_ERR_DATALEN; } - if (tcp_ctask->exp_datasn != r2tsn){ - debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n", - __FUNCTION__, tcp_ctask->exp_datasn, r2tsn); + if (tcp_task->exp_datasn != r2tsn){ + debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n", + __func__, tcp_task->exp_datasn, r2tsn); return ISCSI_ERR_R2TSN; } /* fill-in new R2T associated with the task */ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); - if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { + if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) { iscsi_conn_printk(KERN_INFO, conn, "dropping R2T itt %d in recovery.\n", - ctask->itt); + task->itt); return 0; } - rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); + rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); BUG_ON(!rc); r2t->exp_statsn = rhdr->statsn; @@ -660,7 +668,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (r2t->data_length == 0) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with zero data len\n"); - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); return ISCSI_ERR_DATALEN; } @@ -671,12 +679,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) r2t->data_length, session->max_burst); r2t->data_offset = be32_to_cpu(rhdr->data_offset); - if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) { + if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with data len %u at offset %u " "and total length %d\n", r2t->data_length, - r2t->data_offset, scsi_out(ctask->sc)->length); - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + r2t->data_offset, scsi_out(task->sc)->length); + __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); return ISCSI_ERR_DATALEN; } @@ -684,13 +692,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) r2t->ttt = rhdr->ttt; /* no flip */ r2t->solicit_datasn = 0; - iscsi_solicit_data_init(conn, ctask, r2t); + iscsi_solicit_data_init(conn, task, r2t); - tcp_ctask->exp_datasn = r2tsn + 1; - __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); + tcp_task->exp_datasn = r2tsn + 1; + __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); conn->r2t_pdus_cnt++; - iscsi_requeue_ctask(ctask); + iscsi_requeue_task(task); return 0; } @@ -733,10 +741,8 @@ static int iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) { int rc = 0, opcode, ahslen; - struct iscsi_session *session = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_cmd_task *ctask; - uint32_t itt; + struct iscsi_task *task; /* verify PDU length */ tcp_conn->in.datalen = ntoh24(hdr->dlength); @@ -754,7 +760,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) opcode = hdr->opcode & ISCSI_OPCODE_MASK; /* verify itt (itt encoding: age+cid+itt) */ - rc = iscsi_verify_itt(conn, hdr, &itt); + rc = iscsi_verify_itt(conn, hdr->itt); if (rc) return rc; @@ -763,16 +769,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) switch(opcode) { case ISCSI_OP_SCSI_DATA_IN: - ctask = session->cmds[itt]; spin_lock(&conn->session->lock); - rc = iscsi_data_rsp(conn, ctask); - spin_unlock(&conn->session->lock); - if (rc) - return rc; + task = iscsi_itt_to_ctask(conn, hdr->itt); + if (!task) + rc = ISCSI_ERR_BAD_ITT; + else + rc = iscsi_data_rsp(conn, task); + if (rc) { + spin_unlock(&conn->session->lock); + break; + } + if (tcp_conn->in.datalen) { - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_tcp_task *tcp_task = task->dd_data; struct hash_desc *rx_hash = NULL; - struct scsi_data_buffer *sdb = scsi_in(ctask->sc); + struct scsi_data_buffer *sdb = scsi_in(task->sc); /* * Setup copy of Data-In into the Scsi_Cmnd @@ -787,17 +798,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, " "datalen=%d)\n", tcp_conn, - tcp_ctask->data_offset, + tcp_task->data_offset, tcp_conn->in.datalen); - return iscsi_segment_seek_sg(&tcp_conn->in.segment, - sdb->table.sgl, - sdb->table.nents, - tcp_ctask->data_offset, - tcp_conn->in.datalen, - iscsi_tcp_process_data_in, - rx_hash); + rc = iscsi_segment_seek_sg(&tcp_conn->in.segment, + sdb->table.sgl, + sdb->table.nents, + tcp_task->data_offset, + tcp_conn->in.datalen, + iscsi_tcp_process_data_in, + rx_hash); + spin_unlock(&conn->session->lock); + return rc; } - /* fall through */ + rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); + spin_unlock(&conn->session->lock); + break; case ISCSI_OP_SCSI_CMD_RSP: if (tcp_conn->in.datalen) { iscsi_tcp_data_recv_prep(tcp_conn); @@ -806,15 +821,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; case ISCSI_OP_R2T: - ctask = session->cmds[itt]; - if (ahslen) + spin_lock(&conn->session->lock); + task = iscsi_itt_to_ctask(conn, hdr->itt); + if (!task) + rc = ISCSI_ERR_BAD_ITT; + else if (ahslen) rc = ISCSI_ERR_AHSLEN; - else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) { - spin_lock(&session->lock); - rc = iscsi_r2t_rsp(conn, ctask); - spin_unlock(&session->lock); - } else + else if (task->sc->sc_data_direction == DMA_TO_DEVICE) + rc = iscsi_r2t_rsp(conn, task); + else rc = ISCSI_ERR_PROTO; + spin_unlock(&conn->session->lock); break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: @@ -1176,7 +1193,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn, + debug_tcp("%s(%p%s)\n", __func__, tcp_conn, conn->hdrdgst_en? ", digest enabled" : ""); /* Clear the data segment - needs to be filled in by the @@ -1185,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen) /* If header digest is enabled, compute the CRC and * place the digest into the same buffer. We make - * sure that both iscsi_tcp_ctask and mtask have + * sure that both iscsi_tcp_task and mtask have * sufficient room. */ if (conn->hdrdgst_en) { @@ -1217,7 +1234,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg, struct hash_desc *tx_hash = NULL; unsigned int hdr_spec_len; - debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__, + debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__, tcp_conn, offset, len, conn->datadgst_en? ", digest enabled" : ""); @@ -1242,7 +1259,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data, struct hash_desc *tx_hash = NULL; unsigned int hdr_spec_len; - debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len, + debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len, conn->datadgst_en? ", digest enabled" : ""); /* Make sure the datalen matches what the caller @@ -1260,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data, /** * iscsi_solicit_data_cont - initialize next Data-Out * @conn: iscsi connection - * @ctask: scsi command task + * @task: scsi command task * @r2t: R2T info * @left: bytes left to transfer * @@ -1271,7 +1288,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data, * Called under connection lock. **/ static int -iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, +iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task, struct iscsi_r2t_info *r2t) { struct iscsi_data *hdr; @@ -1288,8 +1305,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, hdr->datasn = cpu_to_be32(r2t->solicit_datasn); r2t->solicit_datasn++; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; - memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); - hdr->itt = ctask->hdr->itt; + memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); + hdr->itt = task->hdr->itt; hdr->exp_statsn = r2t->exp_statsn; new_offset = r2t->data_offset + r2t->sent; hdr->offset = cpu_to_be32(new_offset); @@ -1307,89 +1324,76 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, } /** - * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands + * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands * @conn: iscsi connection - * @ctask: scsi command task + * @task: scsi command task * @sc: scsi command **/ static int -iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask) +iscsi_tcp_task_init(struct iscsi_task *task) { - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_conn *conn = ctask->conn; - struct scsi_cmnd *sc = ctask->sc; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct scsi_cmnd *sc = task->sc; int err; - BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); - tcp_ctask->sent = 0; - tcp_ctask->exp_datasn = 0; + if (!sc) { + /* + * mgmt tasks do not have a scatterlist since they come + * in from the iscsi interface. + */ + debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, + task->itt); + + /* Prepare PDU, optionally w/ immediate data */ + iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr)); + + /* If we have immediate data, attach a payload */ + if (task->data_count) + iscsi_tcp_send_linear_data_prepare(conn, task->data, + task->data_count); + return 0; + } + + BUG_ON(__kfifo_len(tcp_task->r2tqueue)); + tcp_task->sent = 0; + tcp_task->exp_datasn = 0; /* Prepare PDU, optionally w/ immediate data */ - debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n", - conn->id, ctask->itt, ctask->imm_count, - ctask->unsol_count); - iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len); + debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n", + conn->id, task->itt, task->imm_count, + task->unsol_count); + iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len); - if (!ctask->imm_count) + if (!task->imm_count) return 0; /* If we have immediate data, attach a payload */ err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl, scsi_out(sc)->table.nents, - 0, ctask->imm_count); + 0, task->imm_count); if (err) return err; - tcp_ctask->sent += ctask->imm_count; - ctask->imm_count = 0; - return 0; -} - -/** - * iscsi_tcp_mtask_xmit - xmit management(immediate) task - * @conn: iscsi connection - * @mtask: task management task - * - * Notes: - * The function can return -EAGAIN in which case caller must - * call it again later, or recover. '0' return code means successful - * xmit. - **/ -static int -iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) -{ - int rc; - - /* Flush any pending data first. */ - rc = iscsi_tcp_flush(conn); - if (rc < 0) - return rc; - - if (mtask->hdr->itt == RESERVED_ITT) { - struct iscsi_session *session = conn->session; - - spin_lock_bh(&session->lock); - iscsi_free_mgmt_task(conn, mtask); - spin_unlock_bh(&session->lock); - } - + tcp_task->sent += task->imm_count; + task->imm_count = 0; return 0; } /* - * iscsi_tcp_ctask_xmit - xmit normal PDU task - * @conn: iscsi connection - * @ctask: iscsi command task + * iscsi_tcp_task_xmit - xmit normal PDU task + * @task: iscsi command task * * We're expected to return 0 when everything was transmitted succesfully, * -EAGAIN if there's still data in the queue, or != 0 for any other kind * of error. */ static int -iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_tcp_task_xmit(struct iscsi_task *task) { - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct scsi_cmnd *sc = ctask->sc; - struct scsi_data_buffer *sdb = scsi_out(sc); + struct iscsi_conn *conn = task->conn; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct scsi_cmnd *sc = task->sc; + struct scsi_data_buffer *sdb; int rc = 0; flush: @@ -1398,31 +1402,39 @@ flush: if (rc < 0) return rc; + /* mgmt command */ + if (!sc) { + if (task->hdr->itt == RESERVED_ITT) + iscsi_put_task(task); + return 0; + } + /* Are we done already? */ if (sc->sc_data_direction != DMA_TO_DEVICE) return 0; - if (ctask->unsol_count != 0) { - struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr; + sdb = scsi_out(sc); + if (task->unsol_count != 0) { + struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr; /* Prepare a header for the unsolicited PDU. * The amount of data we want to send will be - * in ctask->data_count. + * in task->data_count. * FIXME: return the data count instead. */ - iscsi_prep_unsolicit_data_pdu(ctask, hdr); + iscsi_prep_unsolicit_data_pdu(task, hdr); debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n", - ctask->itt, tcp_ctask->sent, ctask->data_count); + task->itt, tcp_task->sent, task->data_count); iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr)); rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl, - sdb->table.nents, tcp_ctask->sent, - ctask->data_count); + sdb->table.nents, tcp_task->sent, + task->data_count); if (rc) goto fail; - tcp_ctask->sent += ctask->data_count; - ctask->unsol_count -= ctask->data_count; + tcp_task->sent += task->data_count; + task->unsol_count -= task->data_count; goto flush; } else { struct iscsi_session *session = conn->session; @@ -1431,22 +1443,22 @@ flush: /* All unsolicited PDUs sent. Check for solicited PDUs. */ spin_lock_bh(&session->lock); - r2t = tcp_ctask->r2t; + r2t = tcp_task->r2t; if (r2t != NULL) { /* Continue with this R2T? */ - if (!iscsi_solicit_data_cont(conn, ctask, r2t)) { + if (!iscsi_solicit_data_cont(conn, task, r2t)) { debug_scsi(" done with r2t %p\n", r2t); - __kfifo_put(tcp_ctask->r2tpool.queue, + __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); - tcp_ctask->r2t = r2t = NULL; + tcp_task->r2t = r2t = NULL; } } if (r2t == NULL) { - __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, + __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t, sizeof(void*)); - r2t = tcp_ctask->r2t; + r2t = tcp_task->r2t; } spin_unlock_bh(&session->lock); @@ -1457,7 +1469,7 @@ flush: } debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n", - r2t, r2t->solicit_datasn - 1, ctask->itt, + r2t, r2t->solicit_datasn - 1, task->itt, r2t->data_offset + r2t->sent, r2t->data_count); iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr, @@ -1469,7 +1481,7 @@ flush: r2t->data_count); if (rc) goto fail; - tcp_ctask->sent += r2t->data_count; + tcp_task->sent += r2t->data_count; r2t->sent += r2t->data_count; goto flush; } @@ -1486,7 +1498,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) struct iscsi_cls_conn *cls_conn; struct iscsi_tcp_conn *tcp_conn; - cls_conn = iscsi_conn_setup(cls_session, conn_idx); + cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; @@ -1496,18 +1508,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) */ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN; - tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); - if (!tcp_conn) - goto tcp_conn_alloc_fail; - - conn->dd_data = tcp_conn; + tcp_conn = conn->dd_data; tcp_conn->iscsi_conn = conn; tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, CRYPTO_ALG_ASYNC); tcp_conn->tx_hash.flags = 0; if (IS_ERR(tcp_conn->tx_hash.tfm)) - goto free_tcp_conn; + goto free_conn; tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, CRYPTO_ALG_ASYNC); @@ -1519,14 +1527,12 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) free_tx_tfm: crypto_free_hash(tcp_conn->tx_hash.tfm); -free_tcp_conn: +free_conn: iscsi_conn_printk(KERN_ERR, conn, "Could not create connection due to crc32c " "loading error. Make sure the crc32c " "module is built as a module or into the " "kernel\n"); - kfree(tcp_conn); -tcp_conn_alloc_fail: iscsi_conn_teardown(cls_conn); return NULL; } @@ -1547,7 +1553,6 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn) spin_lock_bh(&session->lock); tcp_conn->sock = NULL; - conn->recv_lock = NULL; spin_unlock_bh(&session->lock); sockfd_put(sock); } @@ -1559,20 +1564,32 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) struct iscsi_tcp_conn *tcp_conn = conn->dd_data; iscsi_tcp_release_conn(conn); - iscsi_conn_teardown(cls_conn); if (tcp_conn->tx_hash.tfm) crypto_free_hash(tcp_conn->tx_hash.tfm); if (tcp_conn->rx_hash.tfm) crypto_free_hash(tcp_conn->rx_hash.tfm); - kfree(tcp_conn); + iscsi_conn_teardown(cls_conn); } static void iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + + /* userspace may have goofed up and not bound us */ + if (!tcp_conn->sock) + return; + /* + * Make sure our recv side is stopped. + * Older tools called conn stop before ep_disconnect + * so IO could still be coming in. + */ + write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock); + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock); iscsi_conn_stop(cls_conn, flag); iscsi_tcp_release_conn(conn); @@ -1623,6 +1640,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading) { + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct iscsi_host *ihost = shost_priv(shost); struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct sock *sk; @@ -1646,8 +1665,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, if (err) goto free_socket; - err = iscsi_tcp_get_addr(conn, sock, conn->local_address, - &conn->local_port, kernel_getsockname); + err = iscsi_tcp_get_addr(conn, sock, ihost->local_address, + &ihost->local_port, kernel_getsockname); if (err) goto free_socket; @@ -1664,13 +1683,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ sk->sk_allocation = GFP_ATOMIC; - /* FIXME: disable Nagle's algorithm */ - - /* - * Intercept TCP callbacks for sendfile like receive - * processing. - */ - conn->recv_lock = &sk->sk_callback_lock; iscsi_conn_set_callbacks(conn); tcp_conn->sendpage = tcp_conn->sock->ops->sendpage; /* @@ -1684,21 +1696,6 @@ free_socket: return err; } -/* called with host lock */ -static void -iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) -{ - debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt); - - /* Prepare PDU, optionally w/ immediate data */ - iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr)); - - /* If we have immediate data, attach a payload */ - if (mtask->data_count) - iscsi_tcp_send_linear_data_prepare(conn, mtask->data, - mtask->data_count); -} - static int iscsi_r2tpool_alloc(struct iscsi_session *session) { @@ -1709,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session) * initialize per-task: R2T pool and xmit queue */ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { - struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_task *task = session->cmds[cmd_i]; + struct iscsi_tcp_task *tcp_task = task->dd_data; /* * pre-allocated x4 as much r2ts to handle race when @@ -1719,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session) */ /* R2T pool */ - if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL, + if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL, sizeof(struct iscsi_r2t_info))) { goto r2t_alloc_fail; } /* R2T xmit queue */ - tcp_ctask->r2tqueue = kfifo_alloc( + tcp_task->r2tqueue = kfifo_alloc( session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); - if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { - iscsi_pool_free(&tcp_ctask->r2tpool); + if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) { + iscsi_pool_free(&tcp_task->r2tpool); goto r2t_alloc_fail; } } @@ -1737,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session) r2t_alloc_fail: for (i = 0; i < cmd_i; i++) { - struct iscsi_cmd_task *ctask = session->cmds[i]; - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_task *task = session->cmds[i]; + struct iscsi_tcp_task *tcp_task = task->dd_data; - kfifo_free(tcp_ctask->r2tqueue); - iscsi_pool_free(&tcp_ctask->r2tpool); + kfifo_free(tcp_task->r2tqueue); + iscsi_pool_free(&tcp_task->r2tpool); } return -ENOMEM; } @@ -1752,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session) int i; for (i = 0; i < session->cmds_max; i++) { - struct iscsi_cmd_task *ctask = session->cmds[i]; - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_task *task = session->cmds[i]; + struct iscsi_tcp_task *tcp_task = task->dd_data; - kfifo_free(tcp_ctask->r2tqueue); - iscsi_pool_free(&tcp_ctask->r2tpool); + kfifo_free(tcp_task->r2tqueue); + iscsi_pool_free(&tcp_task->r2tpool); } } @@ -1821,29 +1818,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, return len; } -static int -iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, - char *buf) -{ - struct iscsi_session *session = iscsi_hostdata(shost->hostdata); - int len; - - switch (param) { - case ISCSI_HOST_PARAM_IPADDRESS: - spin_lock_bh(&session->lock); - if (!session->leadconn) - len = -ENODEV; - else - len = sprintf(buf, "%s\n", - session->leadconn->local_address); - spin_unlock_bh(&session->lock); - break; - default: - return iscsi_host_get_param(shost, param, buf); - } - return len; -} - static void iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { @@ -1869,54 +1843,70 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) } static struct iscsi_cls_session * -iscsi_tcp_session_create(struct iscsi_transport *iscsit, - struct scsi_transport_template *scsit, - uint16_t cmds_max, uint16_t qdepth, - uint32_t initial_cmdsn, uint32_t *hostno) +iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, + uint16_t qdepth, uint32_t initial_cmdsn, + uint32_t *hostno) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; - uint32_t hn; + struct Scsi_Host *shost; int cmd_i; - cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth, - sizeof(struct iscsi_tcp_cmd_task), - sizeof(struct iscsi_tcp_mgmt_task), - initial_cmdsn, &hn); - if (!cls_session) + if (ep) { + printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep); return NULL; - *hostno = hn; - - session = class_to_transport_session(cls_session); - for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { - struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - - ctask->hdr = &tcp_ctask->hdr.cmd_hdr; - ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE; } - for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { - struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; - struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; + shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth); + if (!shost) + return NULL; + shost->transportt = iscsi_tcp_scsi_transport; + shost->max_lun = iscsi_max_lun; + shost->max_id = 0; + shost->max_channel = 0; + shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; + + if (iscsi_host_add(shost, NULL)) + goto free_host; + *hostno = shost->host_no; + + cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max, + sizeof(struct iscsi_tcp_task), + initial_cmdsn, 0); + if (!cls_session) + goto remove_host; + session = cls_session->dd_data; + + shost->can_queue = session->scsi_cmds_max; + for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { + struct iscsi_task *task = session->cmds[cmd_i]; + struct iscsi_tcp_task *tcp_task = task->dd_data; - mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr; + task->hdr = &tcp_task->hdr.cmd_hdr; + task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE; } - if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) - goto r2tpool_alloc_fail; - + if (iscsi_r2tpool_alloc(session)) + goto remove_session; return cls_session; -r2tpool_alloc_fail: +remove_session: iscsi_session_teardown(cls_session); +remove_host: + iscsi_host_remove(shost); +free_host: + iscsi_host_free(shost); return NULL; } static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) { - iscsi_r2tpool_free(class_to_transport_session(cls_session)); - iscsi_session_teardown(cls_session); + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + + iscsi_r2tpool_free(cls_session->dd_data); + + iscsi_host_remove(shost); + iscsi_host_free(shost); } static int iscsi_tcp_slave_configure(struct scsi_device *sdev) @@ -1971,14 +1961,11 @@ static struct iscsi_transport iscsi_tcp_transport = { ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | ISCSI_LU_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO, + ISCSI_PING_TMO | ISCSI_RECV_TMO | + ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME, - .host_template = &iscsi_sht, - .conndata_size = sizeof(struct iscsi_conn), - .max_conn = 1, - .max_cmd_len = 16, /* session management */ .create_session = iscsi_tcp_session_create, .destroy_session = iscsi_tcp_session_destroy, @@ -1992,16 +1979,14 @@ static struct iscsi_transport iscsi_tcp_transport = { .start_conn = iscsi_conn_start, .stop_conn = iscsi_tcp_conn_stop, /* iscsi host params */ - .get_host_param = iscsi_tcp_host_get_param, + .get_host_param = iscsi_host_get_param, .set_host_param = iscsi_host_set_param, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_conn_get_stats, - .init_cmd_task = iscsi_tcp_ctask_init, - .init_mgmt_task = iscsi_tcp_mtask_init, - .xmit_cmd_task = iscsi_tcp_ctask_xmit, - .xmit_mgmt_task = iscsi_tcp_mtask_xmit, - .cleanup_cmd_task = iscsi_tcp_cleanup_ctask, + .init_task = iscsi_tcp_task_init, + .xmit_task = iscsi_tcp_task_xmit, + .cleanup_task = iscsi_tcp_cleanup_task, /* recovery */ .session_recovery_timedout = iscsi_session_recovery_timedout, }; @@ -2014,9 +1999,10 @@ iscsi_tcp_init(void) iscsi_max_lun); return -EINVAL; } - iscsi_tcp_transport.max_lun = iscsi_max_lun; - if (!iscsi_register_transport(&iscsi_tcp_transport)) + iscsi_tcp_scsi_transport = iscsi_register_transport( + &iscsi_tcp_transport); + if (!iscsi_tcp_scsi_transport) return -ENODEV; return 0; diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index ed0b991d1e72..498d8ca39848 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -103,11 +103,6 @@ struct iscsi_data_task { char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */ }; -struct iscsi_tcp_mgmt_task { - struct iscsi_hdr hdr; - char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */ -}; - struct iscsi_r2t_info { __be32 ttt; /* copied from R2T */ __be32 exp_statsn; /* copied from R2T */ @@ -119,7 +114,7 @@ struct iscsi_r2t_info { struct iscsi_data_task dtask; /* Data-Out header buf */ }; -struct iscsi_tcp_cmd_task { +struct iscsi_tcp_task { struct iscsi_hdr_buff { struct iscsi_cmd cmd_hdr; char hdrextbuf[ISCSI_MAX_AHS_SIZE + diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b43bf1d60dac..299e075a7b34 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -38,14 +38,6 @@ #include <scsi/scsi_transport_iscsi.h> #include <scsi/libiscsi.h> -struct iscsi_session * -class_to_transport_session(struct iscsi_cls_session *cls_session) -{ - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - return iscsi_hostdata(shost->hostdata); -} -EXPORT_SYMBOL_GPL(class_to_transport_session); - /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ #define SNA32_CHECK 2147483648UL @@ -87,68 +79,70 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) * xmit thread */ if (!list_empty(&session->leadconn->xmitqueue) || - !list_empty(&session->leadconn->mgmtqueue)) - scsi_queue_work(session->host, - &session->leadconn->xmitwork); + !list_empty(&session->leadconn->mgmtqueue)) { + if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) + scsi_queue_work(session->host, + &session->leadconn->xmitwork); + } } } EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); -void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, +void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task, struct iscsi_data *hdr) { - struct iscsi_conn *conn = ctask->conn; + struct iscsi_conn *conn = task->conn; memset(hdr, 0, sizeof(struct iscsi_data)); hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); - hdr->datasn = cpu_to_be32(ctask->unsol_datasn); - ctask->unsol_datasn++; + hdr->datasn = cpu_to_be32(task->unsol_datasn); + task->unsol_datasn++; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; - memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); + memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); - hdr->itt = ctask->hdr->itt; + hdr->itt = task->hdr->itt; hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); - hdr->offset = cpu_to_be32(ctask->unsol_offset); + hdr->offset = cpu_to_be32(task->unsol_offset); - if (ctask->unsol_count > conn->max_xmit_dlength) { + if (task->unsol_count > conn->max_xmit_dlength) { hton24(hdr->dlength, conn->max_xmit_dlength); - ctask->data_count = conn->max_xmit_dlength; - ctask->unsol_offset += ctask->data_count; + task->data_count = conn->max_xmit_dlength; + task->unsol_offset += task->data_count; hdr->flags = 0; } else { - hton24(hdr->dlength, ctask->unsol_count); - ctask->data_count = ctask->unsol_count; + hton24(hdr->dlength, task->unsol_count); + task->data_count = task->unsol_count; hdr->flags = ISCSI_FLAG_CMD_FINAL; } } EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); -static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len) +static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) { - unsigned exp_len = ctask->hdr_len + len; + unsigned exp_len = task->hdr_len + len; - if (exp_len > ctask->hdr_max) { + if (exp_len > task->hdr_max) { WARN_ON(1); return -EINVAL; } WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ - ctask->hdr_len = exp_len; + task->hdr_len = exp_len; return 0; } /* * make an extended cdb AHS */ -static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask) +static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) { - struct scsi_cmnd *cmd = ctask->sc; + struct scsi_cmnd *cmd = task->sc; unsigned rlen, pad_len; unsigned short ahslength; struct iscsi_ecdb_ahdr *ecdb_ahdr; int rc; - ecdb_ahdr = iscsi_next_hdr(ctask); + ecdb_ahdr = iscsi_next_hdr(task); rlen = cmd->cmd_len - ISCSI_CDB_SIZE; BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); @@ -156,7 +150,7 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask) pad_len = iscsi_padding(rlen); - rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) + + rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); if (rc) return rc; @@ -171,19 +165,19 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask) debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d " "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n", - cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len); + cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len); return 0; } -static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask) +static int iscsi_prep_bidi_ahs(struct iscsi_task *task) { - struct scsi_cmnd *sc = ctask->sc; + struct scsi_cmnd *sc = task->sc; struct iscsi_rlength_ahdr *rlen_ahdr; int rc; - rlen_ahdr = iscsi_next_hdr(ctask); - rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr)); + rlen_ahdr = iscsi_next_hdr(task); + rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr)); if (rc) return rc; @@ -203,28 +197,28 @@ static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask) /** * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu - * @ctask: iscsi cmd task + * @task: iscsi task * * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set * fields like dlength or final based on how much data it sends */ -static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) +static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) { - struct iscsi_conn *conn = ctask->conn; + struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; - struct iscsi_cmd *hdr = ctask->hdr; - struct scsi_cmnd *sc = ctask->sc; + struct iscsi_cmd *hdr = task->hdr; + struct scsi_cmnd *sc = task->sc; unsigned hdrlength, cmd_len; int rc; - ctask->hdr_len = 0; - rc = iscsi_add_hdr(ctask, sizeof(*hdr)); + task->hdr_len = 0; + rc = iscsi_add_hdr(task, sizeof(*hdr)); if (rc) return rc; hdr->opcode = ISCSI_OP_SCSI_CMD; hdr->flags = ISCSI_ATTR_SIMPLE; int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); - hdr->itt = build_itt(ctask->itt, session->age); + hdr->itt = build_itt(task->itt, session->age); hdr->cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++; hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); @@ -232,17 +226,17 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) if (cmd_len < ISCSI_CDB_SIZE) memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); else if (cmd_len > ISCSI_CDB_SIZE) { - rc = iscsi_prep_ecdb_ahs(ctask); + rc = iscsi_prep_ecdb_ahs(task); if (rc) return rc; cmd_len = ISCSI_CDB_SIZE; } memcpy(hdr->cdb, sc->cmnd, cmd_len); - ctask->imm_count = 0; + task->imm_count = 0; if (scsi_bidi_cmnd(sc)) { hdr->flags |= ISCSI_FLAG_CMD_READ; - rc = iscsi_prep_bidi_ahs(ctask); + rc = iscsi_prep_bidi_ahs(task); if (rc) return rc; } @@ -264,28 +258,28 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) * * pad_count bytes to be sent as zero-padding */ - ctask->unsol_count = 0; - ctask->unsol_offset = 0; - ctask->unsol_datasn = 0; + task->unsol_count = 0; + task->unsol_offset = 0; + task->unsol_datasn = 0; if (session->imm_data_en) { if (out_len >= session->first_burst) - ctask->imm_count = min(session->first_burst, + task->imm_count = min(session->first_burst, conn->max_xmit_dlength); else - ctask->imm_count = min(out_len, + task->imm_count = min(out_len, conn->max_xmit_dlength); - hton24(hdr->dlength, ctask->imm_count); + hton24(hdr->dlength, task->imm_count); } else zero_data(hdr->dlength); if (!session->initial_r2t_en) { - ctask->unsol_count = min(session->first_burst, out_len) - - ctask->imm_count; - ctask->unsol_offset = ctask->imm_count; + task->unsol_count = min(session->first_burst, out_len) + - task->imm_count; + task->unsol_offset = task->imm_count; } - if (!ctask->unsol_count) + if (!task->unsol_count) /* No unsolicit Data-Out's */ hdr->flags |= ISCSI_FLAG_CMD_FINAL; } else { @@ -298,7 +292,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) } /* calculate size of additional header segments (AHSs) */ - hdrlength = ctask->hdr_len - sizeof(*hdr); + hdrlength = task->hdr_len - sizeof(*hdr); WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); hdrlength /= ISCSI_PAD_LEN; @@ -306,76 +300,115 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) WARN_ON(hdrlength >= 256); hdr->hlength = hdrlength & 0xFF; - if (conn->session->tt->init_cmd_task(conn->ctask)) - return EIO; + if (conn->session->tt->init_task && + conn->session->tt->init_task(task)) + return -EIO; + + task->state = ISCSI_TASK_RUNNING; + list_move_tail(&task->running, &conn->run_list); conn->scsicmd_pdus_cnt++; - debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x " - "len %d bidi_len %d cmdsn %d win %d]\n", - scsi_bidi_cmnd(sc) ? "bidirectional" : - sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", - conn->id, sc, sc->cmnd[0], ctask->itt, - scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, - session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); + debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " + "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ? + "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? + "write" : "read", conn->id, sc, sc->cmnd[0], task->itt, + scsi_bufflen(sc), + scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, + session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); return 0; } /** - * iscsi_complete_command - return command back to scsi-ml - * @ctask: iscsi cmd task + * iscsi_complete_command - finish a task + * @task: iscsi cmd task * * Must be called with session lock. - * This function returns the scsi command to scsi-ml and returns - * the cmd task to the pool of available cmd tasks. + * This function returns the scsi command to scsi-ml or cleans + * up mgmt tasks then returns the task to the pool. */ -static void iscsi_complete_command(struct iscsi_cmd_task *ctask) +static void iscsi_complete_command(struct iscsi_task *task) { - struct iscsi_conn *conn = ctask->conn; + struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; - struct scsi_cmnd *sc = ctask->sc; + struct scsi_cmnd *sc = task->sc; - ctask->state = ISCSI_TASK_COMPLETED; - ctask->sc = NULL; - /* SCSI eh reuses commands to verify us */ - sc->SCp.ptr = NULL; - if (conn->ctask == ctask) - conn->ctask = NULL; - list_del_init(&ctask->running); - __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); - sc->scsi_done(sc); + list_del_init(&task->running); + task->state = ISCSI_TASK_COMPLETED; + task->sc = NULL; + + if (conn->task == task) + conn->task = NULL; + /* + * login task is preallocated so do not free + */ + if (conn->login_task == task) + return; + + __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); + + if (conn->ping_task == task) + conn->ping_task = NULL; + + if (sc) { + task->sc = NULL; + /* SCSI eh reuses commands to verify us */ + sc->SCp.ptr = NULL; + /* + * queue command may call this to free the task, but + * not have setup the sc callback + */ + if (sc->scsi_done) + sc->scsi_done(sc); + } +} + +void __iscsi_get_task(struct iscsi_task *task) +{ + atomic_inc(&task->refcount); } +EXPORT_SYMBOL_GPL(__iscsi_get_task); -static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask) +static void __iscsi_put_task(struct iscsi_task *task) { - atomic_inc(&ctask->refcount); + if (atomic_dec_and_test(&task->refcount)) + iscsi_complete_command(task); } -static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask) +void iscsi_put_task(struct iscsi_task *task) { - if (atomic_dec_and_test(&ctask->refcount)) - iscsi_complete_command(ctask); + struct iscsi_session *session = task->conn->session; + + spin_lock_bh(&session->lock); + __iscsi_put_task(task); + spin_unlock_bh(&session->lock); } +EXPORT_SYMBOL_GPL(iscsi_put_task); /* * session lock must be held */ -static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, +static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, int err) { struct scsi_cmnd *sc; - sc = ctask->sc; + sc = task->sc; if (!sc) return; - if (ctask->state == ISCSI_TASK_PENDING) + if (task->state == ISCSI_TASK_PENDING) /* * cmd never made it to the xmit thread, so we should not count * the cmd in the sequencing */ conn->session->queued_cmdsn--; else - conn->session->tt->cleanup_cmd_task(conn, ctask); + conn->session->tt->cleanup_task(conn, task); + /* + * Check if cleanup_task dropped the lock and the command completed, + */ + if (!task->sc) + return; sc->result = err; if (!scsi_bidi_cmnd(sc)) @@ -384,39 +417,63 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, scsi_out(sc)->resid = scsi_out(sc)->length; scsi_in(sc)->resid = scsi_in(sc)->length; } - if (conn->ctask == ctask) - conn->ctask = NULL; + + if (conn->task == task) + conn->task = NULL; /* release ref from queuecommand */ - __iscsi_put_ctask(ctask); + __iscsi_put_task(task); } -/** - * iscsi_free_mgmt_task - return mgmt task back to pool - * @conn: iscsi connection - * @mtask: mtask - * - * Must be called with session lock. - */ -void iscsi_free_mgmt_task(struct iscsi_conn *conn, - struct iscsi_mgmt_task *mtask) +static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, + struct iscsi_task *task) { - list_del_init(&mtask->running); - if (conn->login_mtask == mtask) - return; + struct iscsi_session *session = conn->session; + struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr; + struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; + + if (conn->session->state == ISCSI_STATE_LOGGING_OUT) + return -ENOTCONN; + + if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) && + hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) + nop->exp_statsn = cpu_to_be32(conn->exp_statsn); + /* + * pre-format CmdSN for outgoing PDU. + */ + nop->cmdsn = cpu_to_be32(session->cmdsn); + if (hdr->itt != RESERVED_ITT) { + hdr->itt = build_itt(task->itt, session->age); + /* + * TODO: We always use immediate, so we never hit this. + * If we start to send tmfs or nops as non-immediate then + * we should start checking the cmdsn numbers for mgmt tasks. + */ + if (conn->c_stage == ISCSI_CONN_STARTED && + !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { + session->queued_cmdsn++; + session->cmdsn++; + } + } - if (conn->ping_mtask == mtask) - conn->ping_mtask = NULL; - __kfifo_put(conn->session->mgmtpool.queue, - (void*)&mtask, sizeof(void*)); + if (session->tt->init_task) + session->tt->init_task(task); + + if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) + session->state = ISCSI_STATE_LOGGING_OUT; + + list_move_tail(&task->running, &conn->mgmt_run_list); + debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", + hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, + task->data_count); + return 0; } -EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task); -static struct iscsi_mgmt_task * +static struct iscsi_task * __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_session *session = conn->session; - struct iscsi_mgmt_task *mtask; + struct iscsi_task *task; if (session->state == ISCSI_STATE_TERMINATE) return NULL; @@ -426,29 +483,56 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, /* * Login and Text are sent serially, in * request-followed-by-response sequence. - * Same mtask can be used. Same ITT must be used. - * Note that login_mtask is preallocated at conn_create(). + * Same task can be used. Same ITT must be used. + * Note that login_task is preallocated at conn_create(). */ - mtask = conn->login_mtask; + task = conn->login_task; else { BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); - if (!__kfifo_get(session->mgmtpool.queue, - (void*)&mtask, sizeof(void*))) + if (!__kfifo_get(session->cmdpool.queue, + (void*)&task, sizeof(void*))) return NULL; + + if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) && + hdr->ttt == RESERVED_ITT) { + conn->ping_task = task; + conn->last_ping = jiffies; + } } + /* + * released in complete pdu for task we expect a response for, and + * released by the lld when it has transmitted the task for + * pdus we do not expect a response for. + */ + atomic_set(&task->refcount, 1); + task->conn = conn; + task->sc = NULL; if (data_size) { - memcpy(mtask->data, data, data_size); - mtask->data_count = data_size; + memcpy(task->data, data, data_size); + task->data_count = data_size; + } else + task->data_count = 0; + + memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); + INIT_LIST_HEAD(&task->running); + list_add_tail(&task->running, &conn->mgmtqueue); + + if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { + if (iscsi_prep_mgmt_task(conn, task)) { + __iscsi_put_task(task); + return NULL; + } + + if (session->tt->xmit_task(task)) + task = NULL; + } else - mtask->data_count = 0; + scsi_queue_work(conn->session->host, &conn->xmitwork); - memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr)); - INIT_LIST_HEAD(&mtask->running); - list_add_tail(&mtask->running, &conn->mgmtqueue); - return mtask; + return task; } int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, @@ -462,7 +546,6 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) err = -EPERM; spin_unlock_bh(&session->lock); - scsi_queue_work(session->host, &conn->xmitwork); return err; } EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); @@ -471,7 +554,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); * iscsi_cmd_rsp - SCSI Command Response processing * @conn: iscsi connection * @hdr: iscsi header - * @ctask: scsi command task + * @task: scsi command task * @data: cmd data buffer * @datalen: len of buffer * @@ -479,12 +562,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); * then completes the command and task. **/ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, - struct iscsi_cmd_task *ctask, char *data, + struct iscsi_task *task, char *data, int datalen) { struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; struct iscsi_session *session = conn->session; - struct scsi_cmnd *sc = ctask->sc; + struct scsi_cmnd *sc = task->sc; iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; @@ -508,7 +591,7 @@ invalid_datalen: goto out; } - senselen = be16_to_cpu(get_unaligned((__be16 *) data)); + senselen = get_unaligned_be16(data); if (datalen < senselen) goto invalid_datalen; @@ -544,10 +627,10 @@ invalid_datalen: } out: debug_scsi("done [sc %lx res %d itt 0x%x]\n", - (long)sc, sc->result, ctask->itt); + (long)sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - __iscsi_put_ctask(ctask); + __iscsi_put_task(task); } static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) @@ -572,9 +655,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) { struct iscsi_nopout hdr; - struct iscsi_mgmt_task *mtask; + struct iscsi_task *task; - if (!rhdr && conn->ping_mtask) + if (!rhdr && conn->ping_task) return; memset(&hdr, 0, sizeof(struct iscsi_nopout)); @@ -588,18 +671,9 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) } else hdr.ttt = RESERVED_ITT; - mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); - if (!mtask) { + task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); + if (!task) iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); - return; - } - - /* only track our nops */ - if (!rhdr) { - conn->ping_mtask = mtask; - conn->last_ping = jiffies; - } - scsi_queue_work(conn->session->host, &conn->xmitwork); } static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, @@ -628,6 +702,31 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } /** + * iscsi_itt_to_task - look up task by itt + * @conn: iscsi connection + * @itt: itt + * + * This should be used for mgmt tasks like login and nops, or if + * the LDD's itt space does not include the session age. + * + * The session lock must be held. + */ +static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) +{ + struct iscsi_session *session = conn->session; + uint32_t i; + + if (itt == RESERVED_ITT) + return NULL; + + i = get_itt(itt); + if (i >= session->cmds_max) + return NULL; + + return session->cmds[i]; +} + +/** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn * @hdr: iscsi header @@ -638,108 +737,28 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * queuecommand or send generic. session lock must be held and verify * itt must have been called. */ -static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, - char *data, int datalen) +int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, int datalen) { struct iscsi_session *session = conn->session; int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; - struct iscsi_cmd_task *ctask; - struct iscsi_mgmt_task *mtask; + struct iscsi_task *task; uint32_t itt; conn->last_recv = jiffies; + rc = iscsi_verify_itt(conn, hdr->itt); + if (rc) + return rc; + if (hdr->itt != RESERVED_ITT) itt = get_itt(hdr->itt); else itt = ~0U; - if (itt < session->cmds_max) { - ctask = session->cmds[itt]; - - debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n", - opcode, conn->id, ctask->itt, datalen); - - switch(opcode) { - case ISCSI_OP_SCSI_CMD_RSP: - BUG_ON((void*)ctask != ctask->sc->SCp.ptr); - iscsi_scsi_cmd_rsp(conn, hdr, ctask, data, - datalen); - break; - case ISCSI_OP_SCSI_DATA_IN: - BUG_ON((void*)ctask != ctask->sc->SCp.ptr); - if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { - conn->scsirsp_pdus_cnt++; - __iscsi_put_ctask(ctask); - } - break; - case ISCSI_OP_R2T: - /* LLD handles this for now */ - break; - default: - rc = ISCSI_ERR_BAD_OPCODE; - break; - } - } else if (itt >= ISCSI_MGMT_ITT_OFFSET && - itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) { - mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET]; - - debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n", - opcode, conn->id, mtask->itt, datalen); + debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n", + opcode, conn->id, itt, datalen); - iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); - switch(opcode) { - case ISCSI_OP_LOGOUT_RSP: - if (datalen) { - rc = ISCSI_ERR_PROTO; - break; - } - conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - /* fall through */ - case ISCSI_OP_LOGIN_RSP: - case ISCSI_OP_TEXT_RSP: - /* - * login related PDU's exp_statsn is handled in - * userspace - */ - if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) - rc = ISCSI_ERR_CONN_FAILED; - iscsi_free_mgmt_task(conn, mtask); - break; - case ISCSI_OP_SCSI_TMFUNC_RSP: - if (datalen) { - rc = ISCSI_ERR_PROTO; - break; - } - - iscsi_tmf_rsp(conn, hdr); - iscsi_free_mgmt_task(conn, mtask); - break; - case ISCSI_OP_NOOP_IN: - if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || - datalen) { - rc = ISCSI_ERR_PROTO; - break; - } - conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - - if (conn->ping_mtask != mtask) { - /* - * If this is not in response to one of our - * nops then it must be from userspace. - */ - if (iscsi_recv_pdu(conn->cls_conn, hdr, data, - datalen)) - rc = ISCSI_ERR_CONN_FAILED; - } else - mod_timer(&conn->transport_timer, - jiffies + conn->recv_timeout); - iscsi_free_mgmt_task(conn, mtask); - break; - default: - rc = ISCSI_ERR_BAD_OPCODE; - break; - } - } else if (itt == ~0U) { + if (itt == ~0U) { iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); switch(opcode) { @@ -766,11 +785,104 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, rc = ISCSI_ERR_BAD_OPCODE; break; } - } else - rc = ISCSI_ERR_BAD_ITT; + goto out; + } + switch(opcode) { + case ISCSI_OP_SCSI_CMD_RSP: + case ISCSI_OP_SCSI_DATA_IN: + task = iscsi_itt_to_ctask(conn, hdr->itt); + if (!task) + return ISCSI_ERR_BAD_ITT; + break; + case ISCSI_OP_R2T: + /* + * LLD handles R2Ts if they need to. + */ + return 0; + case ISCSI_OP_LOGOUT_RSP: + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_TEXT_RSP: + case ISCSI_OP_SCSI_TMFUNC_RSP: + case ISCSI_OP_NOOP_IN: + task = iscsi_itt_to_task(conn, hdr->itt); + if (!task) + return ISCSI_ERR_BAD_ITT; + break; + default: + return ISCSI_ERR_BAD_OPCODE; + } + + switch(opcode) { + case ISCSI_OP_SCSI_CMD_RSP: + iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); + break; + case ISCSI_OP_SCSI_DATA_IN: + if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { + conn->scsirsp_pdus_cnt++; + iscsi_update_cmdsn(session, + (struct iscsi_nopin*) hdr); + __iscsi_put_task(task); + } + break; + case ISCSI_OP_LOGOUT_RSP: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + goto recv_pdu; + case ISCSI_OP_LOGIN_RSP: + case ISCSI_OP_TEXT_RSP: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + /* + * login related PDU's exp_statsn is handled in + * userspace + */ + goto recv_pdu; + case ISCSI_OP_SCSI_TMFUNC_RSP: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } + + iscsi_tmf_rsp(conn, hdr); + __iscsi_put_task(task); + break; + case ISCSI_OP_NOOP_IN: + iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); + if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { + rc = ISCSI_ERR_PROTO; + break; + } + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + + if (conn->ping_task != task) + /* + * If this is not in response to one of our + * nops then it must be from userspace. + */ + goto recv_pdu; + + mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); + __iscsi_put_task(task); + break; + default: + rc = ISCSI_ERR_BAD_OPCODE; + break; + } + +out: + return rc; +recv_pdu: + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; + __iscsi_put_task(task); return rc; } +EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) @@ -784,51 +896,63 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } EXPORT_SYMBOL_GPL(iscsi_complete_pdu); -/* verify itt (itt encoding: age+cid+itt) */ -int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr, - uint32_t *ret_itt) +int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; - struct iscsi_cmd_task *ctask; - uint32_t itt; + uint32_t i; - if (hdr->itt != RESERVED_ITT) { - if (((__force u32)hdr->itt & ISCSI_AGE_MASK) != - (session->age << ISCSI_AGE_SHIFT)) { - iscsi_conn_printk(KERN_ERR, conn, - "received itt %x expected session " - "age (%x)\n", (__force u32)hdr->itt, - session->age & ISCSI_AGE_MASK); - return ISCSI_ERR_BAD_ITT; - } + if (itt == RESERVED_ITT) + return 0; - itt = get_itt(hdr->itt); - } else - itt = ~0U; + if (((__force u32)itt & ISCSI_AGE_MASK) != + (session->age << ISCSI_AGE_SHIFT)) { + iscsi_conn_printk(KERN_ERR, conn, + "received itt %x expected session age (%x)\n", + (__force u32)itt, session->age); + return ISCSI_ERR_BAD_ITT; + } - if (itt < session->cmds_max) { - ctask = session->cmds[itt]; + i = get_itt(itt); + if (i >= session->cmds_max) { + iscsi_conn_printk(KERN_ERR, conn, + "received invalid itt index %u (max cmds " + "%u.\n", i, session->cmds_max); + return ISCSI_ERR_BAD_ITT; + } + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_verify_itt); - if (!ctask->sc) { - iscsi_conn_printk(KERN_INFO, conn, "dropping ctask " - "with itt 0x%x\n", ctask->itt); - /* force drop */ - return ISCSI_ERR_NO_SCSI_CMD; - } +/** + * iscsi_itt_to_ctask - look up ctask by itt + * @conn: iscsi connection + * @itt: itt + * + * This should be used for cmd tasks. + * + * The session lock must be held. + */ +struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) +{ + struct iscsi_task *task; - if (ctask->sc->SCp.phase != session->age) { - iscsi_conn_printk(KERN_ERR, conn, - "iscsi: ctask's session age %d, " - "expected %d\n", ctask->sc->SCp.phase, - session->age); - return ISCSI_ERR_SESSION_FAILED; - } + if (iscsi_verify_itt(conn, itt)) + return NULL; + + task = iscsi_itt_to_task(conn, itt); + if (!task || !task->sc) + return NULL; + + if (task->sc->SCp.phase != conn->session->age) { + iscsi_session_printk(KERN_ERR, conn->session, + "task's session age %d, expected %d\n", + task->sc->SCp.phase, conn->session->age); + return NULL; } - *ret_itt = itt; - return 0; + return task; } -EXPORT_SYMBOL_GPL(iscsi_verify_itt); +EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) { @@ -850,61 +974,6 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) } EXPORT_SYMBOL_GPL(iscsi_conn_failure); -static void iscsi_prep_mtask(struct iscsi_conn *conn, - struct iscsi_mgmt_task *mtask) -{ - struct iscsi_session *session = conn->session; - struct iscsi_hdr *hdr = mtask->hdr; - struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; - - if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) && - hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) - nop->exp_statsn = cpu_to_be32(conn->exp_statsn); - /* - * pre-format CmdSN for outgoing PDU. - */ - nop->cmdsn = cpu_to_be32(session->cmdsn); - if (hdr->itt != RESERVED_ITT) { - hdr->itt = build_itt(mtask->itt, session->age); - /* - * TODO: We always use immediate, so we never hit this. - * If we start to send tmfs or nops as non-immediate then - * we should start checking the cmdsn numbers for mgmt tasks. - */ - if (conn->c_stage == ISCSI_CONN_STARTED && - !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { - session->queued_cmdsn++; - session->cmdsn++; - } - } - - if (session->tt->init_mgmt_task) - session->tt->init_mgmt_task(conn, mtask); - - debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", - hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, - mtask->data_count); -} - -static int iscsi_xmit_mtask(struct iscsi_conn *conn) -{ - struct iscsi_hdr *hdr = conn->mtask->hdr; - int rc; - - if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) - conn->session->state = ISCSI_STATE_LOGGING_OUT; - spin_unlock_bh(&conn->session->lock); - - rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask); - spin_lock_bh(&conn->session->lock); - if (rc) - return rc; - - /* done with this in-progress mtask */ - conn->mtask = NULL; - return 0; -} - static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) { struct iscsi_session *session = conn->session; @@ -922,37 +991,38 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) return 0; } -static int iscsi_xmit_ctask(struct iscsi_conn *conn) +static int iscsi_xmit_task(struct iscsi_conn *conn) { - struct iscsi_cmd_task *ctask = conn->ctask; + struct iscsi_task *task = conn->task; int rc; - __iscsi_get_ctask(ctask); + __iscsi_get_task(task); spin_unlock_bh(&conn->session->lock); - rc = conn->session->tt->xmit_cmd_task(conn, ctask); + rc = conn->session->tt->xmit_task(task); spin_lock_bh(&conn->session->lock); - __iscsi_put_ctask(ctask); + __iscsi_put_task(task); if (!rc) - /* done with this ctask */ - conn->ctask = NULL; + /* done with this task */ + conn->task = NULL; return rc; } /** - * iscsi_requeue_ctask - requeue ctask to run from session workqueue - * @ctask: ctask to requeue + * iscsi_requeue_task - requeue task to run from session workqueue + * @task: task to requeue * - * LLDs that need to run a ctask from the session workqueue should call - * this. The session lock must be held. + * LLDs that need to run a task from the session workqueue should call + * this. The session lock must be held. This should only be called + * by software drivers. */ -void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask) +void iscsi_requeue_task(struct iscsi_task *task) { - struct iscsi_conn *conn = ctask->conn; + struct iscsi_conn *conn = task->conn; - list_move_tail(&ctask->running, &conn->requeue); + list_move_tail(&task->running, &conn->requeue); scsi_queue_work(conn->session->host, &conn->xmitwork); } -EXPORT_SYMBOL_GPL(iscsi_requeue_ctask); +EXPORT_SYMBOL_GPL(iscsi_requeue_task); /** * iscsi_data_xmit - xmit any command into the scheduled connection @@ -974,14 +1044,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) return -ENODATA; } - if (conn->ctask) { - rc = iscsi_xmit_ctask(conn); - if (rc) - goto again; - } - - if (conn->mtask) { - rc = iscsi_xmit_mtask(conn); + if (conn->task) { + rc = iscsi_xmit_task(conn); if (rc) goto again; } @@ -993,17 +1057,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) */ check_mgmt: while (!list_empty(&conn->mgmtqueue)) { - conn->mtask = list_entry(conn->mgmtqueue.next, - struct iscsi_mgmt_task, running); - if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { - iscsi_free_mgmt_task(conn, conn->mtask); - conn->mtask = NULL; + conn->task = list_entry(conn->mgmtqueue.next, + struct iscsi_task, running); + if (iscsi_prep_mgmt_task(conn, conn->task)) { + __iscsi_put_task(conn->task); + conn->task = NULL; continue; } - - iscsi_prep_mtask(conn, conn->mtask); - list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list); - rc = iscsi_xmit_mtask(conn); + rc = iscsi_xmit_task(conn); if (rc) goto again; } @@ -1013,24 +1074,21 @@ check_mgmt: if (conn->tmf_state == TMF_QUEUED) break; - conn->ctask = list_entry(conn->xmitqueue.next, - struct iscsi_cmd_task, running); + conn->task = list_entry(conn->xmitqueue.next, + struct iscsi_task, running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { - fail_command(conn, conn->ctask, DID_IMM_RETRY << 16); + fail_command(conn, conn->task, DID_IMM_RETRY << 16); continue; } - if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) { - fail_command(conn, conn->ctask, DID_ABORT << 16); + if (iscsi_prep_scsi_cmd_pdu(conn->task)) { + fail_command(conn, conn->task, DID_ABORT << 16); continue; } - - conn->ctask->state = ISCSI_TASK_RUNNING; - list_move_tail(conn->xmitqueue.next, &conn->run_list); - rc = iscsi_xmit_ctask(conn); + rc = iscsi_xmit_task(conn); if (rc) goto again; /* - * we could continuously get new ctask requests so + * we could continuously get new task requests so * we need to check the mgmt queue for nops that need to * be sent to aviod starvation */ @@ -1048,11 +1106,11 @@ check_mgmt: if (conn->session->state == ISCSI_STATE_LOGGING_OUT) break; - conn->ctask = list_entry(conn->requeue.next, - struct iscsi_cmd_task, running); - conn->ctask->state = ISCSI_TASK_RUNNING; + conn->task = list_entry(conn->requeue.next, + struct iscsi_task, running); + conn->task->state = ISCSI_TASK_RUNNING; list_move_tail(conn->requeue.next, &conn->run_list); - rc = iscsi_xmit_ctask(conn); + rc = iscsi_xmit_task(conn); if (rc) goto again; if (!list_empty(&conn->mgmtqueue)) @@ -1096,11 +1154,12 @@ enum { int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { + struct iscsi_cls_session *cls_session; struct Scsi_Host *host; int reason = 0; struct iscsi_session *session; struct iscsi_conn *conn; - struct iscsi_cmd_task *ctask = NULL; + struct iscsi_task *task = NULL; sc->scsi_done = done; sc->result = 0; @@ -1109,10 +1168,11 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) host = sc->device->host; spin_unlock(host->host_lock); - session = iscsi_hostdata(host->hostdata); + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; spin_lock(&session->lock); - reason = iscsi_session_chkready(session_to_cls(session)); + reason = iscsi_session_chkready(cls_session); if (reason) { sc->result = reason; goto fault; @@ -1167,26 +1227,39 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) goto reject; } - if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask, + if (!__kfifo_get(session->cmdpool.queue, (void*)&task, sizeof(void*))) { reason = FAILURE_OOM; goto reject; } - session->queued_cmdsn++; - sc->SCp.phase = session->age; - sc->SCp.ptr = (char *)ctask; - - atomic_set(&ctask->refcount, 1); - ctask->state = ISCSI_TASK_PENDING; - ctask->conn = conn; - ctask->sc = sc; - INIT_LIST_HEAD(&ctask->running); + sc->SCp.ptr = (char *)task; + + atomic_set(&task->refcount, 1); + task->state = ISCSI_TASK_PENDING; + task->conn = conn; + task->sc = sc; + INIT_LIST_HEAD(&task->running); + list_add_tail(&task->running, &conn->xmitqueue); + + if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { + if (iscsi_prep_scsi_cmd_pdu(task)) { + sc->result = DID_ABORT << 16; + sc->scsi_done = NULL; + iscsi_complete_command(task); + goto fault; + } + if (session->tt->xmit_task(task)) { + sc->scsi_done = NULL; + iscsi_complete_command(task); + reason = FAILURE_SESSION_NOT_READY; + goto reject; + } + } else + scsi_queue_work(session->host, &conn->xmitwork); - list_add_tail(&ctask->running, &conn->xmitqueue); + session->queued_cmdsn++; spin_unlock(&session->lock); - - scsi_queue_work(host, &conn->xmitwork); spin_lock(host->host_lock); return 0; @@ -1205,7 +1278,7 @@ fault: scsi_out(sc)->resid = scsi_out(sc)->length; scsi_in(sc)->resid = scsi_in(sc)->length; } - sc->scsi_done(sc); + done(sc); spin_lock(host->host_lock); return 0; } @@ -1222,7 +1295,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) { - struct iscsi_session *session = class_to_transport_session(cls_session); + struct iscsi_session *session = cls_session->dd_data; spin_lock_bh(&session->lock); if (session->state != ISCSI_STATE_LOGGED_IN) { @@ -1236,9 +1309,13 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); int iscsi_eh_host_reset(struct scsi_cmnd *sc) { - struct Scsi_Host *host = sc->device->host; - struct iscsi_session *session = iscsi_hostdata(host->hostdata); - struct iscsi_conn *conn = session->leadconn; + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + struct iscsi_conn *conn; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + conn = session->leadconn; mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); @@ -1300,11 +1377,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, int timeout) { struct iscsi_session *session = conn->session; - struct iscsi_mgmt_task *mtask; + struct iscsi_task *task; - mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, + task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); - if (!mtask) { + if (!task) { spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); spin_lock_bh(&session->lock); @@ -1320,7 +1397,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); - scsi_queue_work(session->host, &conn->xmitwork); /* * block eh thread until: @@ -1339,7 +1415,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); - /* if the session drops it will clean up the mtask */ + /* if the session drops it will clean up the task */ if (age != session->age || session->state != ISCSI_STATE_LOGGED_IN) return -ENOTCONN; @@ -1353,48 +1429,51 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, int error) { - struct iscsi_cmd_task *ctask, *tmp; + struct iscsi_task *task, *tmp; - if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1)) - conn->ctask = NULL; + if (conn->task && (conn->task->sc->device->lun == lun || lun == -1)) + conn->task = NULL; /* flush pending */ - list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { - if (lun == ctask->sc->device->lun || lun == -1) { + list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { + if (lun == task->sc->device->lun || lun == -1) { debug_scsi("failing pending sc %p itt 0x%x\n", - ctask->sc, ctask->itt); - fail_command(conn, ctask, error << 16); + task->sc, task->itt); + fail_command(conn, task, error << 16); } } - list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) { - if (lun == ctask->sc->device->lun || lun == -1) { + list_for_each_entry_safe(task, tmp, &conn->requeue, running) { + if (lun == task->sc->device->lun || lun == -1) { debug_scsi("failing requeued sc %p itt 0x%x\n", - ctask->sc, ctask->itt); - fail_command(conn, ctask, error << 16); + task->sc, task->itt); + fail_command(conn, task, error << 16); } } /* fail all other running */ - list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) { - if (lun == ctask->sc->device->lun || lun == -1) { + list_for_each_entry_safe(task, tmp, &conn->run_list, running) { + if (lun == task->sc->device->lun || lun == -1) { debug_scsi("failing in progress sc %p itt 0x%x\n", - ctask->sc, ctask->itt); - fail_command(conn, ctask, DID_BUS_BUSY << 16); + task->sc, task->itt); + fail_command(conn, task, DID_BUS_BUSY << 16); } } } -static void iscsi_suspend_tx(struct iscsi_conn *conn) +void iscsi_suspend_tx(struct iscsi_conn *conn) { set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - scsi_flush_work(conn->session->host); + if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) + scsi_flush_work(conn->session->host); } +EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - scsi_queue_work(conn->session->host, &conn->xmitwork); + if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) + scsi_queue_work(conn->session->host, &conn->xmitwork); } static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) @@ -1405,7 +1484,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) enum scsi_eh_timer_return rc = EH_NOT_HANDLED; cls_session = starget_to_session(scsi_target(scmd->device)); - session = class_to_transport_session(cls_session); + session = cls_session->dd_data; debug_scsi("scsi cmd %p timedout\n", scmd); @@ -1443,7 +1522,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) jiffies)) rc = EH_RESET_TIMER; /* if in the middle of checking the transport then give us more time */ - if (conn->ping_mtask) + if (conn->ping_task) rc = EH_RESET_TIMER; done: spin_unlock(&session->lock); @@ -1467,7 +1546,7 @@ static void iscsi_check_transport_timeouts(unsigned long data) recv_timeout *= HZ; last_recv = conn->last_recv; - if (conn->ping_mtask && + if (conn->ping_task && time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), jiffies)) { iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " @@ -1493,27 +1572,30 @@ done: spin_unlock(&session->lock); } -static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask, +static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, struct iscsi_tm *hdr) { memset(hdr, 0, sizeof(*hdr)); hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; - memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); - hdr->rtt = ctask->hdr->itt; - hdr->refcmdsn = ctask->hdr->cmdsn; + memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); + hdr->rtt = task->hdr->itt; + hdr->refcmdsn = task->hdr->cmdsn; } int iscsi_eh_abort(struct scsi_cmnd *sc) { - struct Scsi_Host *host = sc->device->host; - struct iscsi_session *session = iscsi_hostdata(host->hostdata); + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; struct iscsi_conn *conn; - struct iscsi_cmd_task *ctask; + struct iscsi_task *task; struct iscsi_tm *hdr; int rc, age; + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* @@ -1542,17 +1624,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) conn->eh_abort_cnt++; age = session->age; - ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; - debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); + task = (struct iscsi_task *)sc->SCp.ptr; + debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt); - /* ctask completed before time out */ - if (!ctask->sc) { + /* task completed before time out */ + if (!task->sc) { debug_scsi("sc completed while abort in progress\n"); goto success; } - if (ctask->state == ISCSI_TASK_PENDING) { - fail_command(conn, ctask, DID_ABORT << 16); + if (task->state == ISCSI_TASK_PENDING) { + fail_command(conn, task, DID_ABORT << 16); goto success; } @@ -1562,7 +1644,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) conn->tmf_state = TMF_QUEUED; hdr = &conn->tmhdr; - iscsi_prep_abort_task_pdu(ctask, hdr); + iscsi_prep_abort_task_pdu(task, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { rc = FAILED; @@ -1572,16 +1654,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) switch (conn->tmf_state) { case TMF_SUCCESS: spin_unlock_bh(&session->lock); + /* + * stop tx side incase the target had sent a abort rsp but + * the initiator was still writing out data. + */ iscsi_suspend_tx(conn); /* - * clean up task if aborted. grab the recv lock as a writer + * we do not stop the recv side because targets have been + * good and have never sent us a successful tmf response + * then sent more data for the cmd. */ - write_lock_bh(conn->recv_lock); spin_lock(&session->lock); - fail_command(conn, ctask, DID_ABORT << 16); + fail_command(conn, task, DID_ABORT << 16); conn->tmf_state = TMF_INITIAL; spin_unlock(&session->lock); - write_unlock_bh(conn->recv_lock); iscsi_start_tx(conn); goto success_unlocked; case TMF_TIMEDOUT: @@ -1591,7 +1677,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) case TMF_NOT_FOUND: if (!sc->SCp.ptr) { conn->tmf_state = TMF_INITIAL; - /* ctask completed before tmf abort response */ + /* task completed before tmf abort response */ debug_scsi("sc completed while abort in progress\n"); goto success; } @@ -1604,7 +1690,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) success: spin_unlock_bh(&session->lock); success_unlocked: - debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); + debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt); mutex_unlock(&session->eh_mutex); return SUCCESS; @@ -1612,7 +1698,7 @@ failed: spin_unlock_bh(&session->lock); failed_unlocked: debug_scsi("abort failed [sc %p itt 0x%x]\n", sc, - ctask ? ctask->itt : 0); + task ? task->itt : 0); mutex_unlock(&session->eh_mutex); return FAILED; } @@ -1630,12 +1716,15 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) int iscsi_eh_device_reset(struct scsi_cmnd *sc) { - struct Scsi_Host *host = sc->device->host; - struct iscsi_session *session = iscsi_hostdata(host->hostdata); + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tm *hdr; int rc = FAILED; + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun); mutex_lock(&session->eh_mutex); @@ -1678,13 +1767,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) spin_unlock_bh(&session->lock); iscsi_suspend_tx(conn); - /* need to grab the recv lock then session lock */ - write_lock_bh(conn->recv_lock); + spin_lock(&session->lock); fail_all_commands(conn, sc->device->lun, DID_ERROR); conn->tmf_state = TMF_INITIAL; spin_unlock(&session->lock); - write_unlock_bh(conn->recv_lock); iscsi_start_tx(conn); goto done; @@ -1760,177 +1847,203 @@ void iscsi_pool_free(struct iscsi_pool *q) } EXPORT_SYMBOL_GPL(iscsi_pool_free); -/* - * iSCSI Session's hostdata organization: +/** + * iscsi_host_add - add host to system + * @shost: scsi host + * @pdev: parent device + * + * This should be called by partial offload and software iscsi drivers + * to add a host to the system. + */ +int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) +{ + if (!shost->can_queue) + shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; + + return scsi_add_host(shost, pdev); +} +EXPORT_SYMBOL_GPL(iscsi_host_add); + +/** + * iscsi_host_alloc - allocate a host and driver data + * @sht: scsi host template + * @dd_data_size: driver host data size + * @qdepth: default device queue depth + * + * This should be called by partial offload and software iscsi drivers. + * To access the driver specific memory use the iscsi_host_priv() macro. + */ +struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, + int dd_data_size, uint16_t qdepth) +{ + struct Scsi_Host *shost; + + shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); + if (!shost) + return NULL; + shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; + + if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { + if (qdepth != 0) + printk(KERN_ERR "iscsi: invalid queue depth of %d. " + "Queue depth must be between 1 and %d.\n", + qdepth, ISCSI_MAX_CMD_PER_LUN); + qdepth = ISCSI_DEF_CMD_PER_LUN; + } + shost->cmd_per_lun = qdepth; + return shost; +} +EXPORT_SYMBOL_GPL(iscsi_host_alloc); + +/** + * iscsi_host_remove - remove host and sessions + * @shost: scsi host * - * *------------------* <== hostdata_session(host->hostdata) - * | ptr to class sess| - * |------------------| <== iscsi_hostdata(host->hostdata) - * | iscsi_session | - * *------------------* + * This will also remove any sessions attached to the host, but if userspace + * is managing the session at the same time this will break. TODO: add + * refcounting to the netlink iscsi interface so a rmmod or host hot unplug + * does not remove the memory from under us. */ +void iscsi_host_remove(struct Scsi_Host *shost) +{ + iscsi_host_for_each_session(shost, iscsi_session_teardown); + scsi_remove_host(shost); +} +EXPORT_SYMBOL_GPL(iscsi_host_remove); -#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \ - _sz % sizeof(unsigned long)) +void iscsi_host_free(struct Scsi_Host *shost) +{ + struct iscsi_host *ihost = shost_priv(shost); -#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata)) + kfree(ihost->netdev); + kfree(ihost->hwaddress); + kfree(ihost->initiatorname); + scsi_host_put(shost); +} +EXPORT_SYMBOL_GPL(iscsi_host_free); /** * iscsi_session_setup - create iscsi cls session and host and session - * @scsit: scsi transport template * @iscsit: iscsi transport template - * @cmds_max: scsi host can queue - * @qdepth: scsi host cmds per lun - * @cmd_task_size: LLD ctask private data size - * @mgmt_task_size: LLD mtask private data size + * @shost: scsi host + * @cmds_max: session can queue + * @cmd_task_size: LLD task private data size * @initial_cmdsn: initial CmdSN - * @hostno: host no allocated * * This can be used by software iscsi_transports that allocate * a session per scsi host. - **/ + * + * Callers should set cmds_max to the largest total numer (mgmt + scsi) of + * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks + * for nop handling and login/logout requests. + */ struct iscsi_cls_session * -iscsi_session_setup(struct iscsi_transport *iscsit, - struct scsi_transport_template *scsit, - uint16_t cmds_max, uint16_t qdepth, - int cmd_task_size, int mgmt_task_size, - uint32_t initial_cmdsn, uint32_t *hostno) +iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, + uint16_t cmds_max, int cmd_task_size, + uint32_t initial_cmdsn, unsigned int id) { - struct Scsi_Host *shost; struct iscsi_session *session; struct iscsi_cls_session *cls_session; - int cmd_i; + int cmd_i, scsi_cmds, total_cmds = cmds_max; - if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { - if (qdepth != 0) - printk(KERN_ERR "iscsi: invalid queue depth of %d. " - "Queue depth must be between 1 and %d.\n", - qdepth, ISCSI_MAX_CMD_PER_LUN); - qdepth = ISCSI_DEF_CMD_PER_LUN; + if (!total_cmds) + total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; + /* + * The iscsi layer needs some tasks for nop handling and tmfs, + * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX + * + 1 command for scsi IO. + */ + if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { + printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " + "must be a power of two that is at least %d.\n", + total_cmds, ISCSI_TOTAL_CMDS_MIN); + return NULL; } - if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET || - cmds_max < 2) { - if (cmds_max != 0) - printk(KERN_ERR "iscsi: invalid can_queue of %d. " - "can_queue must be a power of 2 and between " - "2 and %d - setting to %d.\n", cmds_max, - ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX); - cmds_max = ISCSI_DEF_XMIT_CMDS_MAX; + if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { + printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " + "must be a power of 2 less than or equal to %d.\n", + cmds_max, ISCSI_TOTAL_CMDS_MAX); + total_cmds = ISCSI_TOTAL_CMDS_MAX; } - shost = scsi_host_alloc(iscsit->host_template, - hostdata_privsize(sizeof(*session))); - if (!shost) - return NULL; - - /* the iscsi layer takes one task for reserve */ - shost->can_queue = cmds_max - 1; - shost->cmd_per_lun = qdepth; - shost->max_id = 1; - shost->max_channel = 0; - shost->max_lun = iscsit->max_lun; - shost->max_cmd_len = iscsit->max_cmd_len; - shost->transportt = scsit; - shost->transportt->create_work_queue = 1; - shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; - *hostno = shost->host_no; + if (!is_power_of_2(total_cmds)) { + printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " + "must be a power of 2.\n", total_cmds); + total_cmds = rounddown_pow_of_two(total_cmds); + if (total_cmds < ISCSI_TOTAL_CMDS_MIN) + return NULL; + printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n", + total_cmds); + } + scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; - session = iscsi_hostdata(shost->hostdata); - memset(session, 0, sizeof(struct iscsi_session)); + cls_session = iscsi_alloc_session(shost, iscsit, + sizeof(struct iscsi_session)); + if (!cls_session) + return NULL; + session = cls_session->dd_data; + session->cls_session = cls_session; session->host = shost; session->state = ISCSI_STATE_FREE; session->fast_abort = 1; session->lu_reset_timeout = 15; session->abort_timeout = 10; - session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; - session->cmds_max = cmds_max; + session->scsi_cmds_max = scsi_cmds; + session->cmds_max = total_cmds; session->queued_cmdsn = session->cmdsn = initial_cmdsn; session->exp_cmdsn = initial_cmdsn + 1; session->max_cmdsn = initial_cmdsn + 1; session->max_r2t = 1; session->tt = iscsit; mutex_init(&session->eh_mutex); + spin_lock_init(&session->lock); /* initialize SCSI PDU commands pool */ if (iscsi_pool_init(&session->cmdpool, session->cmds_max, (void***)&session->cmds, - cmd_task_size + sizeof(struct iscsi_cmd_task))) + cmd_task_size + sizeof(struct iscsi_task))) goto cmdpool_alloc_fail; /* pre-format cmds pool with ITT */ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { - struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; + struct iscsi_task *task = session->cmds[cmd_i]; if (cmd_task_size) - ctask->dd_data = &ctask[1]; - ctask->itt = cmd_i; - INIT_LIST_HEAD(&ctask->running); - } - - spin_lock_init(&session->lock); - - /* initialize immediate command pool */ - if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max, - (void***)&session->mgmt_cmds, - mgmt_task_size + sizeof(struct iscsi_mgmt_task))) - goto mgmtpool_alloc_fail; - - - /* pre-format immediate cmds pool with ITT */ - for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { - struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; - - if (mgmt_task_size) - mtask->dd_data = &mtask[1]; - mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; - INIT_LIST_HEAD(&mtask->running); + task->dd_data = &task[1]; + task->itt = cmd_i; + INIT_LIST_HEAD(&task->running); } - if (scsi_add_host(shost, NULL)) - goto add_host_fail; - if (!try_module_get(iscsit->owner)) - goto cls_session_fail; - - cls_session = iscsi_create_session(shost, iscsit, 0); - if (!cls_session) - goto module_put; - *(unsigned long*)shost->hostdata = (unsigned long)cls_session; + goto module_get_fail; + if (iscsi_add_session(cls_session, id)) + goto cls_session_fail; return cls_session; -module_put: - module_put(iscsit->owner); cls_session_fail: - scsi_remove_host(shost); -add_host_fail: - iscsi_pool_free(&session->mgmtpool); -mgmtpool_alloc_fail: + module_put(iscsit->owner); +module_get_fail: iscsi_pool_free(&session->cmdpool); cmdpool_alloc_fail: - scsi_host_put(shost); + iscsi_free_session(cls_session); return NULL; } EXPORT_SYMBOL_GPL(iscsi_session_setup); /** * iscsi_session_teardown - destroy session, host, and cls_session - * shost: scsi host + * @cls_session: iscsi session * - * This can be used by software iscsi_transports that allocate - * a session per scsi host. - **/ + * The driver must have called iscsi_remove_session before + * calling this. + */ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct iscsi_session *session = cls_session->dd_data; struct module *owner = cls_session->transport->owner; - iscsi_remove_session(cls_session); - scsi_remove_host(shost); - - iscsi_pool_free(&session->mgmtpool); iscsi_pool_free(&session->cmdpool); kfree(session->password); @@ -1938,12 +2051,10 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) kfree(session->username); kfree(session->username_in); kfree(session->targetname); - kfree(session->netdev); - kfree(session->hwaddress); kfree(session->initiatorname); + kfree(session->ifacename); - iscsi_free_session(cls_session); - scsi_host_put(shost); + iscsi_destroy_session(cls_session); module_put(owner); } EXPORT_SYMBOL_GPL(iscsi_session_teardown); @@ -1951,22 +2062,26 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown); /** * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn * @cls_session: iscsi_cls_session + * @dd_size: private driver data size * @conn_idx: cid - **/ + */ struct iscsi_cls_conn * -iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) +iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, + uint32_t conn_idx) { - struct iscsi_session *session = class_to_transport_session(cls_session); + struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn; struct iscsi_cls_conn *cls_conn; char *data; - cls_conn = iscsi_create_conn(cls_session, conn_idx); + cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size, + conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; - memset(conn, 0, sizeof(*conn)); + memset(conn, 0, sizeof(*conn) + dd_size); + conn->dd_data = cls_conn->dd_data + sizeof(*conn); conn->session = session; conn->cls_conn = cls_conn; conn->c_stage = ISCSI_CONN_INITIAL_STAGE; @@ -1985,30 +2100,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) INIT_LIST_HEAD(&conn->requeue); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); - /* allocate login_mtask used for the login/text sequences */ + /* allocate login_task used for the login/text sequences */ spin_lock_bh(&session->lock); - if (!__kfifo_get(session->mgmtpool.queue, - (void*)&conn->login_mtask, + if (!__kfifo_get(session->cmdpool.queue, + (void*)&conn->login_task, sizeof(void*))) { spin_unlock_bh(&session->lock); - goto login_mtask_alloc_fail; + goto login_task_alloc_fail; } spin_unlock_bh(&session->lock); data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); if (!data) - goto login_mtask_data_alloc_fail; - conn->login_mtask->data = conn->data = data; + goto login_task_data_alloc_fail; + conn->login_task->data = conn->data = data; init_timer(&conn->tmf_timer); init_waitqueue_head(&conn->ehwait); return cls_conn; -login_mtask_data_alloc_fail: - __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, +login_task_data_alloc_fail: + __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); -login_mtask_alloc_fail: +login_task_alloc_fail: iscsi_destroy_conn(cls_conn); return NULL; } @@ -2068,7 +2183,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) spin_lock_bh(&session->lock); kfree(conn->data); kfree(conn->persistent_address); - __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, + __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); if (session->leadconn == conn) session->leadconn = NULL; @@ -2140,7 +2255,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) } spin_unlock_bh(&session->lock); - iscsi_unblock_session(session_to_cls(session)); + iscsi_unblock_session(session->cls_session); wake_up(&conn->ehwait); return 0; } @@ -2149,21 +2264,23 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start); static void flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) { - struct iscsi_mgmt_task *mtask, *tmp; + struct iscsi_task *task, *tmp; /* handle pending */ - list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) { - debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt); - iscsi_free_mgmt_task(conn, mtask); + list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { + debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt); + /* release ref from prep task */ + __iscsi_put_task(task); } /* handle running */ - list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) { - debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt); - iscsi_free_mgmt_task(conn, mtask); + list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { + debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt); + /* release ref from prep task */ + __iscsi_put_task(task); } - conn->mtask = NULL; + conn->task = NULL; } static void iscsi_start_session_recovery(struct iscsi_session *session, @@ -2182,17 +2299,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, } /* - * The LLD either freed/unset the lock on us, or userspace called - * stop but did not create a proper connection (connection was never - * bound or it was unbound then stop was called). - */ - if (!conn->recv_lock) { - spin_unlock_bh(&session->lock); - mutex_unlock(&session->eh_mutex); - return; - } - - /* * When this is called for the in_login state, we only want to clean * up the login task and connection. We do not need to block and set * the recovery state again @@ -2208,11 +2314,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, spin_unlock_bh(&session->lock); iscsi_suspend_tx(conn); - - write_lock_bh(conn->recv_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - write_unlock_bh(conn->recv_lock); - /* * for connection level recovery we should not calculate * header digest. conn->hdr_size used for optimization @@ -2225,7 +2326,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, if (session->state == ISCSI_STATE_IN_RECOVERY && old_stop_stage != STOP_CONN_RECOVER) { debug_scsi("blocking session\n"); - iscsi_block_session(session_to_cls(session)); + iscsi_block_session(session->cls_session); } } @@ -2260,7 +2361,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop); int iscsi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, int is_leading) { - struct iscsi_session *session = class_to_transport_session(cls_session); + struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; spin_lock_bh(&session->lock); @@ -2399,6 +2500,14 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, if (!conn->persistent_address) return -ENOMEM; break; + case ISCSI_PARAM_IFACE_NAME: + if (!session->ifacename) + session->ifacename = kstrdup(buf, GFP_KERNEL); + break; + case ISCSI_PARAM_INITIATOR_NAME: + if (!session->initiatorname) + session->initiatorname = kstrdup(buf, GFP_KERNEL); + break; default: return -ENOSYS; } @@ -2410,8 +2519,7 @@ EXPORT_SYMBOL_GPL(iscsi_set_param); int iscsi_session_get_param(struct iscsi_cls_session *cls_session, enum iscsi_param param, char *buf) { - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct iscsi_session *session = cls_session->dd_data; int len; switch(param) { @@ -2466,6 +2574,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, case ISCSI_PARAM_PASSWORD_IN: len = sprintf(buf, "%s\n", session->password_in); break; + case ISCSI_PARAM_IFACE_NAME: + len = sprintf(buf, "%s\n", session->ifacename); + break; + case ISCSI_PARAM_INITIATOR_NAME: + if (!session->initiatorname) + len = sprintf(buf, "%s\n", "unknown"); + else + len = sprintf(buf, "%s\n", session->initiatorname); + break; default: return -ENOSYS; } @@ -2525,29 +2642,35 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param); int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { - struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct iscsi_host *ihost = shost_priv(shost); int len; switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - if (!session->netdev) + if (!ihost->netdev) len = sprintf(buf, "%s\n", "default"); else - len = sprintf(buf, "%s\n", session->netdev); + len = sprintf(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - if (!session->hwaddress) + if (!ihost->hwaddress) len = sprintf(buf, "%s\n", "default"); else - len = sprintf(buf, "%s\n", session->hwaddress); + len = sprintf(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - if (!session->initiatorname) + if (!ihost->initiatorname) len = sprintf(buf, "%s\n", "unknown"); else - len = sprintf(buf, "%s\n", session->initiatorname); + len = sprintf(buf, "%s\n", ihost->initiatorname); + break; + case ISCSI_HOST_PARAM_IPADDRESS: + if (!strlen(ihost->local_address)) + len = sprintf(buf, "%s\n", "unknown"); + else + len = sprintf(buf, "%s\n", + ihost->local_address); break; - default: return -ENOSYS; } @@ -2559,20 +2682,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param); int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf, int buflen) { - struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct iscsi_host *ihost = shost_priv(shost); switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - if (!session->netdev) - session->netdev = kstrdup(buf, GFP_KERNEL); + if (!ihost->netdev) + ihost->netdev = kstrdup(buf, GFP_KERNEL); break; case ISCSI_HOST_PARAM_HWADDRESS: - if (!session->hwaddress) - session->hwaddress = kstrdup(buf, GFP_KERNEL); + if (!ihost->hwaddress) + ihost->hwaddress = kstrdup(buf, GFP_KERNEL); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - if (!session->initiatorname) - session->initiatorname = kstrdup(buf, GFP_KERNEL); + if (!ihost->initiatorname) + ihost->initiatorname = kstrdup(buf, GFP_KERNEL); break; default: return -ENOSYS; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index ec0b0f6e5e1a..e0e018d12653 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -33,6 +33,7 @@ struct lpfc_sli2_slim; #define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ +#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ /* * Following time intervals are used of adjusting SCSI device @@ -59,6 +60,9 @@ struct lpfc_sli2_slim; #define MAX_HBAEVT 32 +/* lpfc wait event data ready flag */ +#define LPFC_DATA_READY (1<<0) + enum lpfc_polling_flags { ENABLE_FCP_RING_POLLING = 0x1, DISABLE_FCP_RING_INT = 0x2 @@ -425,9 +429,6 @@ struct lpfc_hba { uint16_t pci_cfg_value; - uint8_t work_found; -#define LPFC_MAX_WORKER_ITERATION 4 - uint8_t fc_linkspeed; /* Link speed after last READ_LA */ uint32_t fc_eventTag; /* event tag for link attention */ @@ -489,8 +490,9 @@ struct lpfc_hba { uint32_t work_hs; /* HS stored in case of ERRAT */ uint32_t work_status[2]; /* Extra status from SLIM */ - wait_queue_head_t *work_wait; + wait_queue_head_t work_waitq; struct task_struct *worker_thread; + long data_flags; uint32_t hbq_in_use; /* HBQs in use flag */ struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ @@ -637,6 +639,17 @@ lpfc_is_link_up(struct lpfc_hba *phba) phba->link_state == LPFC_HBA_READY; } +static inline void +lpfc_worker_wake_up(struct lpfc_hba *phba) +{ + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + + /* Wake up worker thread */ + wake_up(&phba->work_waitq); + return; +} + #define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ #define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature event */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 960baaf11fb1..37bfa0bd1dae 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1995,8 +1995,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, /* Don't allow mailbox commands to be sent when blocked * or when in the middle of discovery */ - if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO || - vport->fc_flag & FC_NDISC_ACTIVE) { + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EAGAIN; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 7c9f8317d972..1b8245213b83 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -142,7 +142,7 @@ int lpfc_config_port_post(struct lpfc_hba *); int lpfc_hba_down_prep(struct lpfc_hba *); int lpfc_hba_down_post(struct lpfc_hba *); void lpfc_hba_init(struct lpfc_hba *, uint32_t *); -int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int); +int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int); void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int); int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); @@ -263,6 +263,7 @@ extern int lpfc_sli_mode; extern int lpfc_enable_npiv; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); +int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); void lpfc_terminate_rport_io(struct fc_rport *); void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 153afae567b5..7fc74cf5823b 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -101,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* Not enough posted buffers; Try posting more buffers */ phba->fc_stat.NoRcvBuf++; if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) - lpfc_post_buffer(phba, pring, 2, 1); + lpfc_post_buffer(phba, pring, 2); return; } @@ -151,7 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } list_del(&iocbq->list); lpfc_sli_release_iocbq(phba, iocbq); - lpfc_post_buffer(phba, pring, i, 1); + lpfc_post_buffer(phba, pring, i); } } } @@ -990,7 +990,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, return; } -static int +int lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, size_t size) { @@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr) { struct lpfc_vport *vport = (struct lpfc_vport *)ptr; struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; unsigned long iflag; spin_lock_irqsave(&vport->work_port_lock, iflag); - if (!(vport->work_port_events & WORKER_FDMI_TMO)) { + tmo_posted = vport->work_port_events & WORKER_FDMI_TMO; + if (!tmo_posted) vport->work_port_events |= WORKER_FDMI_TMO; - spin_unlock_irqrestore(&vport->work_port_lock, iflag); + spin_unlock_irqrestore(&vport->work_port_lock, iflag); - spin_lock_irqsave(&phba->hbalock, iflag); - if (phba->work_wait) - lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, iflag); - } - else - spin_unlock_irqrestore(&vport->work_port_lock, iflag); + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; } void diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 886c5f1b11d2..f54e0f7eaee3 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1754,29 +1754,34 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_work_evt *evtp; + if (!(nlp->nlp_flag & NLP_DELAY_TMO)) + return; spin_lock_irq(shost->host_lock); nlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; - if (!list_empty(&nlp->els_retry_evt.evt_listp)) { list_del_init(&nlp->els_retry_evt.evt_listp); /* Decrement nlp reference count held for the delayed retry */ evtp = &nlp->els_retry_evt; lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); } - if (nlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); nlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); if (vport->num_disc_nodes) { - /* Check to see if there are more - * PLOGIs to be sent - */ - lpfc_more_plogi(vport); - + if (vport->port_state < LPFC_VPORT_READY) { + /* Check if there are more ADISCs to be sent */ + lpfc_more_adisc(vport); + if ((vport->num_disc_nodes == 0) && + (vport->fc_npr_cnt)) + lpfc_els_disc_plogi(vport); + } else { + /* Check if there are more PLOGIs to be sent */ + lpfc_more_plogi(vport); + } if (vport->num_disc_nodes == 0) { spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_NDISC_ACTIVE; @@ -1798,10 +1803,6 @@ lpfc_els_retry_delay(unsigned long ptr) unsigned long flags; struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; - ndlp = (struct lpfc_nodelist *) ptr; - phba = ndlp->vport->phba; - evtp = &ndlp->els_retry_evt; - spin_lock_irqsave(&phba->hbalock, flags); if (!list_empty(&evtp->evt_listp)) { spin_unlock_irqrestore(&phba->hbalock, flags); @@ -1812,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr) * count until the queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); - evtp->evt = LPFC_EVT_ELS_RETRY; - list_add_tail(&evtp->evt_listp, &phba->work_list); - if (phba->work_wait) + if (evtp->evt_arg1) { + evtp->evt = LPFC_EVT_ELS_RETRY; + list_add_tail(&evtp->evt_listp, &phba->work_list); lpfc_worker_wake_up(phba); - + } spin_unlock_irqrestore(&phba->hbalock, flags); return; } @@ -2761,10 +2762,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, npr = (PRLI *) pcmd; vpd = &phba->vpd; /* - * If our firmware version is 3.20 or later, - * set the following bits for FC-TAPE support. + * If the remote port is a target and our firmware version is 3.20 or + * later, set the following bits for FC-TAPE support. */ - if (vpd->rev.feaLevelHigh >= 0x02) { + if ((ndlp->nlp_type & NLP_FCP_TARGET) && + (vpd->rev.feaLevelHigh >= 0x02)) { npr->ConfmComplAllowed = 1; npr->Retry = 1; npr->TaskRetryIdReq = 1; @@ -3056,27 +3058,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp = NULL; - /* Look at all nodes effected by pending RSCNs and move - * them to NPR state. - */ - + /* Move all affected nodes by pending RSCNs to NPR state. */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp) || - ndlp->nlp_state == NLP_STE_UNUSED_NODE || - lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) + (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || + !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) continue; - lpfc_disc_state_machine(vport, ndlp, NULL, - NLP_EVT_DEVICE_RECOVERY); - - /* - * Make sure NLP_DELAY_TMO is NOT running after a device - * recovery event. - */ - if (ndlp->nlp_flag & NLP_DELAY_TMO) - lpfc_cancel_retry_delay_tmo(vport, ndlp); + NLP_EVT_DEVICE_RECOVERY); + lpfc_cancel_retry_delay_tmo(vport, ndlp); } - return 0; } @@ -3781,91 +3772,27 @@ static int lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *fan_ndlp) { - struct lpfc_dmabuf *pcmd; + struct lpfc_hba *phba = vport->phba; uint32_t *lp; - IOCB_t *icmd; - uint32_t cmd, did; FAN *fp; - struct lpfc_nodelist *ndlp, *next_ndlp; - struct lpfc_hba *phba = vport->phba; - - /* FAN received */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0265 FAN received\n"); - icmd = &cmdiocb->iocb; - did = icmd->un.elsreq64.remoteID; - pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; - lp = (uint32_t *)pcmd->virt; - - cmd = *lp++; - fp = (FAN *) lp; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); + lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; + fp = (FAN *) ++lp; /* FAN received; Fan does not have a reply sequence */ - - if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) { + if ((vport == phba->pport) && + (vport->port_state == LPFC_LOCAL_CFG_LINK)) { if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, - sizeof(struct lpfc_name)) != 0) || + sizeof(struct lpfc_name))) || (memcmp(&phba->fc_fabparam.portName, &fp->FportName, - sizeof(struct lpfc_name)) != 0)) { - /* - * This node has switched fabrics. FLOGI is required - * Clean up the old rpi's - */ - - list_for_each_entry_safe(ndlp, next_ndlp, - &vport->fc_nodes, nlp_listp) { - if (!NLP_CHK_NODE_ACT(ndlp)) - continue; - if (ndlp->nlp_state != NLP_STE_NPR_NODE) - continue; - if (ndlp->nlp_type & NLP_FABRIC) { - /* - * Clean up old Fabric, Nameserver and - * other NLP_FABRIC logins - */ - lpfc_drop_node(vport, ndlp); - - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { - /* Fail outstanding I/O now since this - * device is marked for PLOGI - */ - lpfc_unreg_rpi(vport, ndlp); - } - } - + sizeof(struct lpfc_name)))) { + /* This port has switched fabrics. FLOGI is required */ lpfc_initial_flogi(vport); - return 0; - } - /* Discovery not needed, - * move the nodes to their original state. - */ - list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, - nlp_listp) { - if (!NLP_CHK_NODE_ACT(ndlp)) - continue; - if (ndlp->nlp_state != NLP_STE_NPR_NODE) - continue; - - switch (ndlp->nlp_prev_state) { - case NLP_STE_UNMAPPED_NODE: - ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(vport, ndlp, - NLP_STE_UNMAPPED_NODE); - break; - - case NLP_STE_MAPPED_NODE: - ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(vport, ndlp, - NLP_STE_MAPPED_NODE); - break; - - default: - break; - } + } else { + /* FAN verified - skip FLOGI */ + vport->fc_myDID = vport->fc_prevDID; + lpfc_issue_fabric_reglogin(vport); } - - /* Start discovery - this should just do CLEAR_LA */ - lpfc_disc_start(vport); } return 0; } @@ -3875,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr) { struct lpfc_vport *vport = (struct lpfc_vport *) ptr; struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; unsigned long iflag; spin_lock_irqsave(&vport->work_port_lock, iflag); - if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { + tmo_posted = vport->work_port_events & WORKER_ELS_TMO; + if (!tmo_posted) vport->work_port_events |= WORKER_ELS_TMO; - spin_unlock_irqrestore(&vport->work_port_lock, iflag); + spin_unlock_irqrestore(&vport->work_port_lock, iflag); - spin_lock_irqsave(&phba->hbalock, iflag); - if (phba->work_wait) - lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, iflag); - } - else - spin_unlock_irqrestore(&vport->work_port_lock, iflag); + if (!tmo_posted) + lpfc_worker_wake_up(phba); return; } @@ -3933,9 +3857,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) els_command == ELS_CMD_FDISC) continue; - if (vport != piocb->vport) - continue; - if (piocb->drvrTimeout > 0) { if (piocb->drvrTimeout >= timeout) piocb->drvrTimeout -= timeout; @@ -4089,7 +4010,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; cmd = *payload; if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) - lpfc_post_buffer(phba, pring, 1, 1); + lpfc_post_buffer(phba, pring, 1); did = icmd->un.rcvels.remoteID; if (icmd->ulpStatus) { @@ -4398,7 +4319,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.NoRcvBuf++; /* Not enough posted buffers; Try posting more buffers */ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) - lpfc_post_buffer(phba, pring, 0, 1); + lpfc_post_buffer(phba, pring, 0); return; } @@ -4842,18 +4763,16 @@ lpfc_fabric_block_timeout(unsigned long ptr) struct lpfc_hba *phba = (struct lpfc_hba *) ptr; unsigned long iflags; uint32_t tmo_posted; + spin_lock_irqsave(&phba->pport->work_port_lock, iflags); tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; if (!tmo_posted) phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); - if (!tmo_posted) { - spin_lock_irqsave(&phba->hbalock, iflags); - if (phba->work_wait) - lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, iflags); - } + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; } static void diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 7cb68feb04fd..a98d11bf3576 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) * count until this queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); - evtp->evt = LPFC_EVT_DEV_LOSS; - list_add_tail(&evtp->evt_listp, &phba->work_list); - if (phba->work_wait) - wake_up(phba->work_wait); - + if (evtp->evt_arg1) { + evtp->evt = LPFC_EVT_DEV_LOSS; + list_add_tail(&evtp->evt_listp, &phba->work_list); + lpfc_worker_wake_up(phba); + } spin_unlock_irq(&phba->hbalock); return; @@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } - -void -lpfc_worker_wake_up(struct lpfc_hba *phba) -{ - wake_up(phba->work_wait); - return; -} - static void lpfc_work_list_done(struct lpfc_hba *phba) { @@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba) || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); } else { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, @@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_work_list_done(phba); } -static int -check_work_wait_done(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - int rc = 0; - - spin_lock_irq(&phba->hbalock); - list_for_each_entry(vport, &phba->port_list, listentry) { - if (vport->work_port_events) { - rc = 1; - break; - } - } - if (rc || phba->work_ha || (!list_empty(&phba->work_list)) || - kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) { - rc = 1; - phba->work_found++; - } else - phba->work_found = 0; - spin_unlock_irq(&phba->hbalock); - return rc; -} - - int lpfc_do_work(void *p) { struct lpfc_hba *phba = p; int rc; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq); set_user_nice(current, -20); - phba->work_wait = &work_waitq; - phba->work_found = 0; + phba->data_flags = 0; while (1) { - - rc = wait_event_interruptible(work_waitq, - check_work_wait_done(phba)); - + /* wait and check worker queue activities */ + rc = wait_event_interruptible(phba->work_waitq, + (test_and_clear_bit(LPFC_DATA_READY, + &phba->data_flags) + || kthread_should_stop())); BUG_ON(rc); if (kthread_should_stop()) break; + /* Attend pending lpfc data processing */ lpfc_work_done(phba); - - /* If there is alot of slow ring work, like during link up - * check_work_wait_done() may cause this thread to not give - * up the CPU for very long periods of time. This may cause - * soft lockups or other problems. To avoid these situations - * give up the CPU here after LPFC_MAX_WORKER_ITERATION - * consecutive iterations. - */ - if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) { - phba->work_found = 0; - schedule(); - } } - spin_lock_irq(&phba->hbalock); - phba->work_wait = NULL; - spin_unlock_irq(&phba->hbalock); return 0; } @@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, spin_lock_irqsave(&phba->hbalock, flags); list_add_tail(&evtp->evt_listp, &phba->work_list); - if (phba->work_wait) - lpfc_worker_wake_up(phba); spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_worker_wake_up(phba); + return 1; } @@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) if (phba->fc_topology == TOPOLOGY_LOOP) { phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; + if (phba->cfg_enable_npiv) + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1309 Link Up Event npiv not supported in loop " + "topology\n"); /* Get Loop Map information */ if (la->il) vport->fc_flag |= FC_LBIT; @@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); + /* Unblock ELS traffic */ + phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if (mb->mbxStatus) { lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, @@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_DID, old_state, state); if (old_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { @@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) - lpfc_cancel_retry_delay_tmo(vport, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) lpfc_nlp_counters(vport, ndlp->nlp_state, -1); spin_lock_irq(shost->host_lock); @@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) static void lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) - lpfc_cancel_retry_delay_tmo(vport, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) lpfc_nlp_counters(vport, ndlp->nlp_state, -1); lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, @@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); - if (!list_empty(&ndlp->els_retry_evt.evt_listp)) - list_del_init(&ndlp->els_retry_evt.evt_listp); - if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) - list_del_init(&ndlp->dev_loss_evt.evt_listp); + list_del_init(&ndlp->els_retry_evt.evt_listp); + list_del_init(&ndlp->dev_loss_evt.evt_listp); lpfc_unreg_rpi(vport, ndlp); @@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LPFC_MBOXQ_t *mbox; int rc; - if (ndlp->nlp_flag & NLP_DELAY_TMO) { - lpfc_cancel_retry_delay_tmo(vport, ndlp); - } - + lpfc_cancel_retry_delay_tmo(vport, ndlp); if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { /* For this case we need to cleanup the default rpi * allocated by the firmware. @@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) /* Since this node is marked for discovery, * delay timeout is not needed. */ - if (ndlp->nlp_flag & NLP_DELAY_TMO) - lpfc_cancel_retry_delay_tmo(vport, ndlp); + lpfc_cancel_retry_delay_tmo(vport, ndlp); } else ndlp = NULL; } else { @@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr) { struct lpfc_vport *vport = (struct lpfc_vport *) ptr; struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; unsigned long flags = 0; if (unlikely(!phba)) return; - if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { - spin_lock_irqsave(&vport->work_port_lock, flags); + spin_lock_irqsave(&vport->work_port_lock, flags); + tmo_posted = vport->work_port_events & WORKER_DISC_TMO; + if (!tmo_posted) vport->work_port_events |= WORKER_DISC_TMO; - spin_unlock_irqrestore(&vport->work_port_lock, flags); + spin_unlock_irqrestore(&vport->work_port_lock, flags); - spin_lock_irqsave(&phba->hbalock, flags); - if (phba->work_wait) - lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, flags); - } + if (!tmo_posted) + lpfc_worker_wake_up(phba); return; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index fa757b251f82..5b6e5395c8eb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -145,8 +145,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba) return -ERESTART; } - if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) + if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { + mempool_free(pmb, phba->mbox_mem_pool); return -EINVAL; + } /* Save information as VPD data */ vp->rev.rBit = 1; @@ -551,18 +553,18 @@ static void lpfc_hb_timeout(unsigned long ptr) { struct lpfc_hba *phba; + uint32_t tmo_posted; unsigned long iflag; phba = (struct lpfc_hba *)ptr; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - if (!(phba->pport->work_port_events & WORKER_HB_TMO)) + tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; + if (!tmo_posted) phba->pport->work_port_events |= WORKER_HB_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); - spin_lock_irqsave(&phba->hbalock, iflag); - if (phba->work_wait) - wake_up(phba->work_wait); - spin_unlock_irqrestore(&phba->hbalock, iflag); + if (!tmo_posted) + lpfc_worker_wake_up(phba); return; } @@ -851,6 +853,8 @@ lpfc_handle_latt(struct lpfc_hba *phba) lpfc_read_la(phba, pmb, mp); pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; pmb->vport = vport; + /* Block ELS IOCBs until we have processed this mbox command */ + phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { rc = 4; @@ -866,6 +870,7 @@ lpfc_handle_latt(struct lpfc_hba *phba) return; lpfc_handle_latt_free_mbuf: + phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; lpfc_mbuf_free(phba, mp->virt, mp->phys); lpfc_handle_latt_free_mp: kfree(mp); @@ -1194,8 +1199,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) /* Returns the number of buffers NOT posted. */ /**************************************************/ int -lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, - int type) +lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) { IOCB_t *icmd; struct lpfc_iocbq *iocb; @@ -1295,7 +1299,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; /* Ring 0, ELS / CT buffers */ - lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); + lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); /* Ring 2 - FCP no buffers needed */ return 0; @@ -1454,6 +1458,15 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + + /* nlp_type zero is not defined, nlp_flag zero also not defined, + * nlp_state is unused, this happens when + * an initiator has logged + * into us so cleanup this ndlp. + */ + if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) && + (ndlp->nlp_state == 0)) + lpfc_nlp_put(ndlp); } /* At this point, ALL ndlp's should be gone @@ -2101,6 +2114,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index d08c4c890744..6688a8689b56 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) (iocb->iocb_cmpl) (phba, iocb, iocb); } } - - /* If we are delaying issuing an ELS command, cancel it */ - if (ndlp->nlp_flag & NLP_DELAY_TMO) - lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); return 0; } @@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; - struct lpfc_work_evt *evtp; uint32_t *lp; IOCB_t *icmd; struct serv_parm *sp; @@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp, mbox); return 1; } - - /* If the remote NPort logs into us, before we can initiate - * discovery to them, cleanup the NPort from discovery accordingly. - */ - if (ndlp->nlp_state == NLP_STE_NPR_NODE) { - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(shost->host_lock); - del_timer_sync(&ndlp->nlp_delayfunc); - ndlp->nlp_last_elscmd = 0; - - if (!list_empty(&ndlp->els_retry_evt.evt_listp)) { - list_del_init(&ndlp->els_retry_evt.evt_listp); - /* Decrement ndlp reference count held for the - * delayed retry - */ - evtp = &ndlp->els_retry_evt; - lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); - } - - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(shost->host_lock); - - if ((ndlp->nlp_flag & NLP_ADISC_SND) && - (vport->num_disc_nodes)) { - /* Check to see if there are more - * ADISCs to be sent - */ - lpfc_more_adisc(vport); - - if ((vport->num_disc_nodes == 0) && - (vport->fc_npr_cnt)) - lpfc_els_disc_plogi(vport); - - if (vport->num_disc_nodes == 0) { - spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(shost->host_lock); - lpfc_can_disctmo(vport); - lpfc_end_rscn(vport); - } - } - } - } else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (vport->num_disc_nodes)) { - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(shost->host_lock); - /* Check to see if there are more - * PLOGIs to be sent - */ - lpfc_more_plogi(vport); - if (vport->num_disc_nodes == 0) { - spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(shost->host_lock); - lpfc_can_disctmo(vport); - lpfc_end_rscn(vport); - } - } - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); return 1; - out: stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; @@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if (!(ndlp->nlp_type & NLP_FABRIC) || + if ((!(ndlp->nlp_type & NLP_FABRIC) && + ((ndlp->nlp_type & NLP_FCP_TARGET) || + !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); @@ -751,6 +684,7 @@ static uint32_t lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb = arg; struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; @@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); } else { - lpfc_rcv_plogi(vport, ndlp, cmdiocb); + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && + (ndlp->nlp_flag & NLP_NPR_2B_DISC) && + (vport->num_disc_nodes)) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); + /* Check if there are more PLOGIs to be sent */ + lpfc_more_plogi(vport); + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } } /* If our portname was less */ return ndlp->nlp_state; @@ -1040,6 +989,7 @@ static uint32_t lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; @@ -1048,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, cmdiocb = (struct lpfc_iocbq *) arg; - if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) - return ndlp->nlp_state; + if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { + if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); + if (vport->num_disc_nodes) { + lpfc_more_adisc(vport); + if ((vport->num_disc_nodes == 0) && + (vport->fc_npr_cnt)) + lpfc_els_disc_plogi(vport); + if (vport->num_disc_nodes == 0) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); + lpfc_end_rscn(vport); + } + } + } + return ndlp->nlp_state; + } ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); @@ -1742,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ - if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) { + if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) return ndlp->nlp_state; - } - if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { + lpfc_cancel_retry_delay_tmo(vport, ndlp); spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; + ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); - return ndlp->nlp_state; - } - - /* send PLOGI immediately, move to PLOGI issue state */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + /* send PLOGI immediately, move to PLOGI issue state */ + if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + ndlp->nlp_prev_state = NLP_STE_NPR_NODE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); + } } - return ndlp->nlp_state; } @@ -1810,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; lpfc_rcv_padisc(vport, ndlp, cmdiocb); - /* * Do not start discovery if discovery is about to start * or discovery in progress for this node. Starting discovery @@ -1973,9 +1938,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); spin_unlock_irq(shost->host_lock); - if (ndlp->nlp_flag & NLP_DELAY_TMO) { - lpfc_cancel_retry_delay_tmo(vport, ndlp); - } + lpfc_cancel_retry_delay_tmo(vport, ndlp); return ndlp->nlp_state; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 0910a9ab76a5..c94da4f2b8a6 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -50,6 +50,7 @@ void lpfc_adjust_queue_depth(struct lpfc_hba *phba) { unsigned long flags; + uint32_t evt_posted; spin_lock_irqsave(&phba->hbalock, flags); atomic_inc(&phba->num_rsrc_err); @@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba) spin_unlock_irqrestore(&phba->hbalock, flags); spin_lock_irqsave(&phba->pport->work_port_lock, flags); - if ((phba->pport->work_port_events & - WORKER_RAMP_DOWN_QUEUE) == 0) { + evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; + if (!evt_posted) phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; - } spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - spin_lock_irqsave(&phba->hbalock, flags); - if (phba->work_wait) - wake_up(phba->work_wait); - spin_unlock_irqrestore(&phba->hbalock, flags); - + if (!evt_posted) + lpfc_worker_wake_up(phba); return; } @@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, { unsigned long flags; struct lpfc_hba *phba = vport->phba; + uint32_t evt_posted; atomic_inc(&phba->num_cmd_success); if (vport->cfg_lun_queue_depth <= sdev->queue_depth) @@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport, spin_unlock_irqrestore(&phba->hbalock, flags); spin_lock_irqsave(&phba->pport->work_port_lock, flags); - if ((phba->pport->work_port_events & - WORKER_RAMP_UP_QUEUE) == 0) { + evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; + if (!evt_posted) phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; - } spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - spin_lock_irqsave(&phba->hbalock, flags); - if (phba->work_wait) - wake_up(phba->work_wait); - spin_unlock_irqrestore(&phba->hbalock, flags); + if (!evt_posted) + lpfc_worker_wake_up(phba); + return; } void @@ -609,9 +605,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, result = cmd->result; sdev = cmd->device; lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); - spin_lock_irqsave(sdev->host->host_lock, flags); - lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */ - spin_unlock_irqrestore(sdev->host->host_lock, flags); cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { @@ -620,6 +613,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * wake up the thread. */ spin_lock_irqsave(sdev->host->host_lock, flags); + lpfc_cmd->pCmd = NULL; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock_irqrestore(sdev->host->host_lock, flags); @@ -690,6 +684,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * wake up the thread. */ spin_lock_irqsave(sdev->host->host_lock, flags); + lpfc_cmd->pCmd = NULL; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock_irqrestore(sdev->host->host_lock, flags); @@ -849,14 +844,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; + int status; if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) return FAILED; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, + status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, FCP_TARGET_RESET); - if (!ret) + if (!status) return FAILED; iocbq = &lpfc_cmd->cur_iocbq; @@ -869,12 +865,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); - ret = lpfc_sli_issue_iocb_wait(phba, + status = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); - if (ret != IOCB_SUCCESS) { - if (ret == IOCB_TIMEDOUT) + if (status != IOCB_SUCCESS) { + if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; + ret = TIMEOUT_ERROR; + } else + ret = FAILED; lpfc_cmd->status = IOSTAT_DRIVER_REJECT; } else { ret = SUCCESS; @@ -1142,121 +1141,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_iocbq *iocbq, *iocbqrsp; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *pnode = rdata->pnode; - uint32_t cmd_result = 0, cmd_status = 0; - int ret = FAILED; - int iocb_status = IOCB_SUCCESS; - int cnt, loopcnt; + unsigned long later; + int ret = SUCCESS; + int status; + int cnt; lpfc_block_error_handler(cmnd); - loopcnt = 0; /* * If target is not in a MAPPED state, delay the reset until * target is rediscovered or devloss timeout expires. */ - while (1) { + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies)) { if (!pnode || !NLP_CHK_NODE_ACT(pnode)) - goto out; - - if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { - schedule_timeout_uninterruptible(msecs_to_jiffies(500)); - loopcnt++; - rdata = cmnd->device->hostdata; - if (!rdata || - (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){ - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0721 LUN Reset rport " - "failure: cnt x%x rdata x%p\n", - loopcnt, rdata); - goto out; - } - pnode = rdata->pnode; - if (!pnode || !NLP_CHK_NODE_ACT(pnode)) - goto out; - } + return FAILED; if (pnode->nlp_state == NLP_STE_MAPPED_NODE) break; + schedule_timeout_uninterruptible(msecs_to_jiffies(500)); + rdata = cmnd->device->hostdata; + if (!rdata) + break; + pnode = rdata->pnode; + } + if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0721 LUN Reset rport " + "failure: msec x%x rdata x%p\n", + jiffies_to_msecs(jiffies - later), rdata); + return FAILED; } - lpfc_cmd = lpfc_get_scsi_buf(phba); if (lpfc_cmd == NULL) - goto out; - + return FAILED; lpfc_cmd->timeout = 60; lpfc_cmd->rdata = rdata; - ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, - FCP_TARGET_RESET); - if (!ret) - goto out_free_scsi_buf; - + status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, + cmnd->device->lun, + FCP_TARGET_RESET); + if (!status) { + lpfc_release_scsi_buf(phba, lpfc_cmd); + return FAILED; + } iocbq = &lpfc_cmd->cur_iocbq; /* get a buffer for this IOCB command response */ iocbqrsp = lpfc_sli_get_iocbq(phba); - if (iocbqrsp == NULL) - goto out_free_scsi_buf; - + if (iocbqrsp == NULL) { + lpfc_release_scsi_buf(phba, lpfc_cmd); + return FAILED; + } lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0703 Issue target reset to TGT %d LUN %d " "rpi x%x nlp_flag x%x\n", cmnd->device->id, cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); - iocb_status = lpfc_sli_issue_iocb_wait(phba, - &phba->sli.ring[phba->sli.fcp_ring], - iocbq, iocbqrsp, lpfc_cmd->timeout); - - if (iocb_status == IOCB_TIMEDOUT) + status = lpfc_sli_issue_iocb_wait(phba, + &phba->sli.ring[phba->sli.fcp_ring], + iocbq, iocbqrsp, lpfc_cmd->timeout); + if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; - - if (iocb_status == IOCB_SUCCESS) - ret = SUCCESS; - else - ret = iocb_status; - - cmd_result = iocbqrsp->iocb.un.ulpWord[4]; - cmd_status = iocbqrsp->iocb.ulpStatus; - + ret = TIMEOUT_ERROR; + } else { + if (status != IOCB_SUCCESS) + ret = FAILED; + lpfc_release_scsi_buf(phba, lpfc_cmd); + } + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0713 SCSI layer issued device reset (%d, %d) " + "return x%x status x%x result x%x\n", + cmnd->device->id, cmnd->device->lun, ret, + iocbqrsp->iocb.ulpStatus, + iocbqrsp->iocb.un.ulpWord[4]); lpfc_sli_release_iocbq(phba, iocbqrsp); - - /* - * All outstanding txcmplq I/Os should have been aborted by the device. - * Unfortunately, some targets do not abide by this forcing the driver - * to double check. - */ cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, - LPFC_CTX_LUN); + LPFC_CTX_TGT); if (cnt) lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], cmnd->device->id, cmnd->device->lun, - LPFC_CTX_LUN); - loopcnt = 0; - while(cnt) { - schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); - - if (++loopcnt - > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) - break; - + LPFC_CTX_TGT); + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies) && cnt) { + schedule_timeout_uninterruptible(msecs_to_jiffies(20)); cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, - cmnd->device->lun, LPFC_CTX_LUN); + cmnd->device->lun, LPFC_CTX_TGT); } - if (cnt) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0719 device reset I/O flush failure: " "cnt x%x\n", cnt); ret = FAILED; } - -out_free_scsi_buf: - if (iocb_status != IOCB_TIMEDOUT) { - lpfc_release_scsi_buf(phba, lpfc_cmd); - } - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0713 SCSI layer issued device reset (%d, %d) " - "return x%x status x%x result x%x\n", - cmnd->device->id, cmnd->device->lun, ret, - cmd_status, cmd_result); -out: return ret; } @@ -1268,19 +1242,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; int match; - int ret = FAILED, i, err_count = 0; - int cnt, loopcnt; + int ret = SUCCESS, status, i; + int cnt; struct lpfc_scsi_buf * lpfc_cmd; + unsigned long later; lpfc_block_error_handler(cmnd); - - lpfc_cmd = lpfc_get_scsi_buf(phba); - if (lpfc_cmd == NULL) - goto out; - - /* The lpfc_cmd storage is reused. Set all loop invariants. */ - lpfc_cmd->timeout = 60; - /* * Since the driver manages a single bus device, reset all * targets known to the driver. Should any target reset @@ -1294,7 +1261,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && - i == ndlp->nlp_sid && + ndlp->nlp_sid == i && ndlp->rport) { match = 1; break; @@ -1303,27 +1270,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) spin_unlock_irq(shost->host_lock); if (!match) continue; - - ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, - cmnd->device->lun, - ndlp->rport->dd_data); - if (ret != SUCCESS) { + lpfc_cmd = lpfc_get_scsi_buf(phba); + if (lpfc_cmd) { + lpfc_cmd->timeout = 60; + status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, + cmnd->device->lun, + ndlp->rport->dd_data); + if (status != TIMEOUT_ERROR) + lpfc_release_scsi_buf(phba, lpfc_cmd); + } + if (!lpfc_cmd || status != SUCCESS) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0700 Bus Reset on target %d failed\n", i); - err_count++; - break; + ret = FAILED; } } - - if (ret != IOCB_TIMEDOUT) - lpfc_release_scsi_buf(phba, lpfc_cmd); - - if (err_count == 0) - ret = SUCCESS; - else - ret = FAILED; - /* * All outstanding txcmplq I/Os should have been aborted by * the targets. Unfortunately, some targets do not abide by @@ -1333,27 +1295,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) if (cnt) lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, LPFC_CTX_HOST); - loopcnt = 0; - while(cnt) { - schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); - - if (++loopcnt - > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) - break; - + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies) && cnt) { + schedule_timeout_uninterruptible(msecs_to_jiffies(20)); cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); } - if (cnt) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0715 Bus Reset I/O flush failure: " "cnt x%x left x%x\n", cnt, i); ret = FAILED; } - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); -out: return ret; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 70a0a9eab211..f40aa7b905f7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) phba->work_ha |= HA_ERATT; phba->work_hs = HS_FFER3; - /* hbalock should already be held */ - if (phba->work_wait) - lpfc_worker_wake_up(phba); + lpfc_worker_wake_up(phba); return NULL; } @@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) phba->work_ha |= HA_ERATT; phba->work_hs = HS_FFER3; - /* hbalock should already be held */ - if (phba->work_wait) - lpfc_worker_wake_up(phba); + lpfc_worker_wake_up(phba); return; } @@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr) phba->pport->work_port_events |= WORKER_MBOX_TMO; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); - if (!tmo_posted) { - spin_lock_irqsave(&phba->hbalock, iflag); - if (phba->work_wait) - lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, iflag); - } + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; } void @@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; prev_pring_flag = pring->flag; - if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } /* * Error everything on the txq since these iocbs have not been * given to the FW yet. @@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, flags); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ + /* Only slow rings */ + if (pring->ringno == LPFC_ELS_RING) { pring->flag |= LPFC_DEFERRED_RING_EVENT; + /* Set the lpfc data pending flag */ + set_bit(LPFC_DATA_READY, &phba->data_flags); + } /* * Error everything on the txq since these iocbs have not been @@ -3762,7 +3763,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, lpfc_ctx_cmd ctx_cmd) { struct lpfc_scsi_buf *lpfc_cmd; - struct scsi_cmnd *cmnd; int rc = 1; if (!(iocbq->iocb_flag & LPFC_IO_FCP)) @@ -3772,19 +3772,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, return rc; lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); - cmnd = lpfc_cmd->pCmd; - if (cmnd == NULL) + if (lpfc_cmd->pCmd == NULL) return rc; switch (ctx_cmd) { case LPFC_CTX_LUN: - if ((cmnd->device->id == tgt_id) && - (cmnd->device->lun == lun_id)) + if ((lpfc_cmd->rdata->pnode) && + (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && + (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) rc = 0; break; case LPFC_CTX_TGT: - if (cmnd->device->id == tgt_id) + if ((lpfc_cmd->rdata->pnode) && + (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) rc = 0; break; case LPFC_CTX_HOST: @@ -3994,6 +3995,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, if (pmboxq->context1) return MBX_NOT_FINISHED; + pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; /* setup wake call as IOCB callback */ pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; /* setup context field to pass wait_queue pointer to wake function */ @@ -4159,7 +4161,7 @@ lpfc_intr_handler(int irq, void *dev_id) "pwork:x%x hawork:x%x wait:x%x", phba->work_ha, work_ha_copy, (uint32_t)((unsigned long) - phba->work_wait)); + &phba->work_waitq)); control &= ~(HC_R0INT_ENA << LPFC_ELS_RING); @@ -4172,7 +4174,7 @@ lpfc_intr_handler(int irq, void *dev_id) "x%x hawork:x%x wait:x%x", phba->work_ha, work_ha_copy, (uint32_t)((unsigned long) - phba->work_wait)); + &phba->work_waitq)); } spin_unlock(&phba->hbalock); } @@ -4297,9 +4299,8 @@ send_current_mbox: spin_lock(&phba->hbalock); phba->work_ha |= work_ha_copy; - if (phba->work_wait) - lpfc_worker_wake_up(phba); spin_unlock(&phba->hbalock); + lpfc_worker_wake_up(phba); } ha_copy &= ~(phba->work_ha_mask); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index b22b893019f4..ad24cacfbe10 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.2.6" +#define LPFC_DRIVER_VERSION "8.2.7" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 6feaf59b0b1b..109f89d98830 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -216,6 +216,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) int vpi; int rc = VPORT_ERROR; int status; + int size; if ((phba->sli_rev < 3) || !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { @@ -278,7 +279,20 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); - + size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN); + if (size) { + vport->vname = kzalloc(size+1, GFP_KERNEL); + if (!vport->vname) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1814 Create VPORT failed. " + "vname allocation failed.\n"); + rc = VPORT_ERROR; + lpfc_free_vpi(phba, vpi); + destroy_port(vport); + goto error_out; + } + memcpy(vport->vname, fc_vport->symbolic_name, size+1); + } if (fc_vport->node_name != 0) u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); if (fc_vport->port_name != 0) diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index fd63b06d9ef1..11aa917629ac 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1765,7 +1765,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg) default: return 0; } - if (mesg.event == mdev->ofdev.dev.power.power_state.event) + if (ms->phase == sleeping) return 0; scsi_block_requests(ms->host); @@ -1780,8 +1780,6 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg) disable_irq(ms->meshintr); set_mesh_power(ms, 0); - mdev->ofdev.dev.power.power_state = mesg; - return 0; } @@ -1790,7 +1788,7 @@ static int mesh_resume(struct macio_dev *mdev) struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); unsigned long flags; - if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON) + if (ms->phase != sleeping) return 0; set_mesh_power(ms, 1); @@ -1801,8 +1799,6 @@ static int mesh_resume(struct macio_dev *mdev) enable_irq(ms->meshintr); scsi_unblock_requests(ms->host); - mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON; - return 0; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 0c786944d2c2..5822dd595826 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -113,9 +113,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | ISCSI_HOST_INITIATOR_NAME, - .sessiondata_size = sizeof(struct ddb_entry), - .host_template = &qla4xxx_driver_template, - .tgt_dscvr = qla4xxx_tgt_dscvr, .get_conn_param = qla4xxx_conn_get_param, .get_session_param = qla4xxx_sess_get_param, @@ -275,7 +272,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry) return err; } - ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0); + ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0); if (!ddb_entry->conn) { iscsi_remove_session(ddb_entry->sess); DEBUG2(printk(KERN_ERR "Could not add connection.\n")); @@ -292,7 +289,8 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha) struct ddb_entry *ddb_entry; struct iscsi_cls_session *sess; - sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport); + sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport, + sizeof(struct ddb_entry)); if (!sess) return NULL; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 110e776d1a07..36c92f961e15 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -855,9 +855,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd) good_bytes = scsi_bufflen(cmd); if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { + int old_good_bytes = good_bytes; drv = scsi_cmd_to_driver(cmd); if (drv->done) good_bytes = drv->done(cmd); + /* + * USB may not give sense identifying bad sector and + * simply return a residue instead, so subtract off the + * residue if drv->done() error processing indicates no + * change to the completion length. + */ + if (good_bytes == old_good_bytes) + good_bytes -= scsi_get_resid(cmd); } scsi_io_completion(cmd, good_bytes); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index f6600bfb5bde..01d11a01ffbf 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -94,6 +94,7 @@ static const char * scsi_debug_version_date = "20070104"; #define DEF_VIRTUAL_GB 0 #define DEF_FAKE_RW 0 #define DEF_VPD_USE_HOSTNO 1 +#define DEF_SECTOR_SIZE 512 /* bit mask values for scsi_debug_opts */ #define SCSI_DEBUG_OPT_NOISE 1 @@ -142,6 +143,7 @@ static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; static int scsi_debug_fake_rw = DEF_FAKE_RW; static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; +static int scsi_debug_sector_size = DEF_SECTOR_SIZE; static int scsi_debug_cmnd_count = 0; @@ -157,11 +159,6 @@ static int sdebug_heads; /* heads per disk */ static int sdebug_cylinders_per; /* cylinders per surface */ static int sdebug_sectors_per; /* sectors per cylinder */ -/* default sector size is 512 bytes, 2**9 bytes */ -#define POW2_SECT_SIZE 9 -#define SECT_SIZE (1 << POW2_SECT_SIZE) -#define SECT_SIZE_PER(TGT) SECT_SIZE - #define SDEBUG_MAX_PARTS 4 #define SDEBUG_SENSE_LEN 32 @@ -646,6 +643,14 @@ static int inquiry_evpd_b0(unsigned char * arr) return sizeof(vpdb0_data); } +static int inquiry_evpd_b1(unsigned char *arr) +{ + memset(arr, 0, 0x3c); + arr[0] = 0; + arr[1] = 1; + + return 0x3c; +} #define SDEBUG_LONG_INQ_SZ 96 #define SDEBUG_MAX_INQ_ARR_SZ 584 @@ -701,6 +706,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, arr[n++] = 0x88; /* SCSI ports */ arr[n++] = 0x89; /* ATA information */ arr[n++] = 0xb0; /* Block limits (SBC) */ + arr[n++] = 0xb1; /* Block characteristics (SBC) */ arr[3] = n - 4; /* number of supported VPD pages */ } else if (0x80 == cmd[2]) { /* unit serial number */ arr[1] = cmd[2]; /*sanity */ @@ -740,6 +746,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b0(&arr[4]); + } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_evpd_b1(&arr[4]); } else { /* Illegal request, invalid field in cdb */ mk_sense_buffer(devip, ILLEGAL_REQUEST, @@ -878,8 +887,8 @@ static int resp_readcap(struct scsi_cmnd * scp, arr[2] = 0xff; arr[3] = 0xff; } - arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; - arr[7] = SECT_SIZE_PER(target) & 0xff; + arr[6] = (scsi_debug_sector_size >> 8) & 0xff; + arr[7] = scsi_debug_sector_size & 0xff; return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); } @@ -902,10 +911,10 @@ static int resp_readcap16(struct scsi_cmnd * scp, capac = sdebug_capacity - 1; for (k = 0; k < 8; ++k, capac >>= 8) arr[7 - k] = capac & 0xff; - arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff; - arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff; - arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff; - arr[11] = SECT_SIZE_PER(target) & 0xff; + arr[8] = (scsi_debug_sector_size >> 24) & 0xff; + arr[9] = (scsi_debug_sector_size >> 16) & 0xff; + arr[10] = (scsi_debug_sector_size >> 8) & 0xff; + arr[11] = scsi_debug_sector_size & 0xff; return fill_from_dev_buffer(scp, arr, min(alloc_len, SDEBUG_READCAP16_ARR_SZ)); } @@ -1019,20 +1028,20 @@ static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target) static int resp_format_pg(unsigned char * p, int pcontrol, int target) { /* Format device page for mode_sense */ - unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0x40, 0, 0, 0}; - - memcpy(p, format_pg, sizeof(format_pg)); - p[10] = (sdebug_sectors_per >> 8) & 0xff; - p[11] = sdebug_sectors_per & 0xff; - p[12] = (SECT_SIZE >> 8) & 0xff; - p[13] = SECT_SIZE & 0xff; - if (DEV_REMOVEABLE(target)) - p[20] |= 0x20; /* should agree with INQUIRY */ - if (1 == pcontrol) - memset(p + 2, 0, sizeof(format_pg) - 2); - return sizeof(format_pg); + unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0x40, 0, 0, 0}; + + memcpy(p, format_pg, sizeof(format_pg)); + p[10] = (sdebug_sectors_per >> 8) & 0xff; + p[11] = sdebug_sectors_per & 0xff; + p[12] = (scsi_debug_sector_size >> 8) & 0xff; + p[13] = scsi_debug_sector_size & 0xff; + if (DEV_REMOVEABLE(target)) + p[20] |= 0x20; /* should agree with INQUIRY */ + if (1 == pcontrol) + memset(p + 2, 0, sizeof(format_pg) - 2); + return sizeof(format_pg); } static int resp_caching_pg(unsigned char * p, int pcontrol, int target) @@ -1206,8 +1215,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, ap[2] = (sdebug_capacity >> 8) & 0xff; ap[3] = sdebug_capacity & 0xff; } - ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff; - ap[7] = SECT_SIZE_PER(target) & 0xff; + ap[6] = (scsi_debug_sector_size >> 8) & 0xff; + ap[7] = scsi_debug_sector_size & 0xff; offset += bd_len; ap = arr + offset; } else if (16 == bd_len) { @@ -1215,10 +1224,10 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, for (k = 0; k < 8; ++k, capac >>= 8) ap[7 - k] = capac & 0xff; - ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff; - ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff; - ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff; - ap[15] = SECT_SIZE_PER(target) & 0xff; + ap[12] = (scsi_debug_sector_size >> 24) & 0xff; + ap[13] = (scsi_debug_sector_size >> 16) & 0xff; + ap[14] = (scsi_debug_sector_size >> 8) & 0xff; + ap[15] = scsi_debug_sector_size & 0xff; offset += bd_len; ap = arr + offset; } @@ -1519,10 +1528,10 @@ static int do_device_access(struct scsi_cmnd *scmd, if (block + num > sdebug_store_sectors) rest = block + num - sdebug_store_sectors; - ret = func(scmd, fake_storep + (block * SECT_SIZE), - (num - rest) * SECT_SIZE); + ret = func(scmd, fake_storep + (block * scsi_debug_sector_size), + (num - rest) * scsi_debug_sector_size); if (!ret && rest) - ret = func(scmd, fake_storep, rest * SECT_SIZE); + ret = func(scmd, fake_storep, rest * scsi_debug_sector_size); return ret; } @@ -1575,10 +1584,10 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) return (DID_ERROR << 16); - else if ((ret < (num * SECT_SIZE)) && + else if ((ret < (num * scsi_debug_sector_size)) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " - " IO sent=%d bytes\n", num * SECT_SIZE, ret); + " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); return 0; } @@ -2085,6 +2094,7 @@ module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, S_IRUGO | S_IWUSR); +module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_DESCRIPTION("SCSI debug adapter driver"); @@ -2106,6 +2116,7 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); +MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)"); static char sdebug_info[256]; @@ -2158,8 +2169,9 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth, scsi_debug_cmnd_count, scsi_debug_delay, scsi_debug_max_luns, scsi_debug_scsi_level, - SECT_SIZE, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, - num_aborts, num_dev_resets, num_bus_resets, num_host_resets); + scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, + sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets, + num_host_resets); if (pos < offset) { len = 0; begin = pos; @@ -2434,6 +2446,12 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp, DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show, sdebug_vpd_use_hostno_store); +static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf) +{ + return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size); +} +DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL); + /* Note: The following function creates attribute files in the /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these files (over those found in the /sys/module/scsi_debug/parameters @@ -2459,11 +2477,13 @@ static int do_create_driverfs_files(void) ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size); return ret; } static void do_remove_driverfs_files(void) { + driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); @@ -2499,10 +2519,22 @@ static int __init scsi_debug_init(void) int k; int ret; + switch (scsi_debug_sector_size) { + case 512: + case 1024: + case 2048: + case 4096: + break; + default: + printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n", + scsi_debug_sector_size); + return -EINVAL; + } + if (scsi_debug_dev_size_mb < 1) scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; - sdebug_store_sectors = sz / SECT_SIZE; + sdebug_store_sectors = sz / scsi_debug_sector_size; sdebug_capacity = get_sdebug_capacity(); /* play around with geometry, don't waste too much on track 0 */ diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index eaf5a8add1ba..006a95916f72 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -298,6 +298,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, */ static int scsi_check_sense(struct scsi_cmnd *scmd) { + struct scsi_device *sdev = scmd->device; struct scsi_sense_hdr sshdr; if (! scsi_command_normalize_sense(scmd, &sshdr)) @@ -306,6 +307,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; + if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && + sdev->scsi_dh_data->scsi_dh->check_sense) { + int rc; + + rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr); + if (rc != SCSI_RETURN_NOT_HANDLED) + return rc; + /* handler does not care. Drop down to default handling */ + } + /* * Previous logic looked for FILEMARK, EOM or ILI which are * mainly associated with tapes and returned SUCCESS. diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbf55d59a54c..88d1b5f44e59 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { }; #undef SP -static struct kmem_cache *scsi_bidi_sdb_cache; +static struct kmem_cache *scsi_sdb_cache; static void scsi_run_queue(struct request_queue *q); @@ -784,7 +784,7 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; scsi_free_sgtable(bidi_sdb); - kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); + kmem_cache_free(scsi_sdb_cache, bidi_sdb); cmd->request->next_rq->special = NULL; } } @@ -1059,7 +1059,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) if (blk_bidi_rq(cmd->request)) { struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( - scsi_bidi_sdb_cache, GFP_ATOMIC); + scsi_sdb_cache, GFP_ATOMIC); if (!bidi_sdb) { error = BLKPREP_DEFER; goto err_exit; @@ -1169,6 +1169,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) if (ret != BLKPREP_OK) return ret; + + if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh + && sdev->scsi_dh_data->scsi_dh->prep_fn)) { + ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); + if (ret != BLKPREP_OK) + return ret; + } + /* * Filesystem requests must transfer data. */ @@ -1329,7 +1337,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q, printk("scsi%d unblocking host at zero depth\n", shost->host_no)); } else { - blk_plug_device(q); return 0; } } @@ -1693,11 +1700,11 @@ int __init scsi_init_queue(void) return -ENOMEM; } - scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", - sizeof(struct scsi_data_buffer), - 0, 0, NULL); - if (!scsi_bidi_sdb_cache) { - printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); + scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", + sizeof(struct scsi_data_buffer), + 0, 0, NULL); + if (!scsi_sdb_cache) { + printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); goto cleanup_io_context; } @@ -1710,7 +1717,7 @@ int __init scsi_init_queue(void) if (!sgp->slab) { printk(KERN_ERR "SCSI: can't init sg slab %s\n", sgp->name); - goto cleanup_bidi_sdb; + goto cleanup_sdb; } sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, @@ -1718,13 +1725,13 @@ int __init scsi_init_queue(void) if (!sgp->pool) { printk(KERN_ERR "SCSI: can't init sg mempool %s\n", sgp->name); - goto cleanup_bidi_sdb; + goto cleanup_sdb; } } return 0; -cleanup_bidi_sdb: +cleanup_sdb: for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; if (sgp->pool) @@ -1732,7 +1739,7 @@ cleanup_bidi_sdb: if (sgp->slab) kmem_cache_destroy(sgp->slab); } - kmem_cache_destroy(scsi_bidi_sdb_cache); + kmem_cache_destroy(scsi_sdb_cache); cleanup_io_context: kmem_cache_destroy(scsi_io_context_cache); @@ -1744,7 +1751,7 @@ void scsi_exit_queue(void) int i; kmem_cache_destroy(scsi_io_context_cache); - kmem_cache_destroy(scsi_bidi_sdb_cache); + kmem_cache_destroy(scsi_sdb_cache); for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index a00eee6f7be9..196fe3af0d5e 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -346,7 +346,7 @@ static void scsi_target_dev_release(struct device *dev) put_device(parent); } -struct device_type scsi_target_type = { +static struct device_type scsi_target_type = { .name = "scsi_target", .release = scsi_target_dev_release, }; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 93d2b6714453..b6e561059779 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -439,6 +439,7 @@ struct bus_type scsi_bus_type = { .resume = scsi_bus_resume, .remove = scsi_bus_remove, }; +EXPORT_SYMBOL_GPL(scsi_bus_type); int scsi_sysfs_register(void) { diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 65d1737eb664..3af7cbcc5c5d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -30,10 +30,11 @@ #include <scsi/scsi_transport_iscsi.h> #include <scsi/iscsi_if.h> -#define ISCSI_SESSION_ATTRS 19 +#define ISCSI_SESSION_ATTRS 21 #define ISCSI_CONN_ATTRS 13 #define ISCSI_HOST_ATTRS 4 -#define ISCSI_TRANSPORT_VERSION "2.0-869" + +#define ISCSI_TRANSPORT_VERSION "2.0-870" struct iscsi_internal { int daemon_pid; @@ -101,16 +102,10 @@ show_transport_##name(struct device *dev, \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); show_transport_attr(caps, "0x%x"); -show_transport_attr(max_lun, "%d"); -show_transport_attr(max_conn, "%d"); -show_transport_attr(max_cmd_len, "%d"); static struct attribute *iscsi_transport_attrs[] = { &dev_attr_handle.attr, &dev_attr_caps.attr, - &dev_attr_max_lun.attr, - &dev_attr_max_conn.attr, - &dev_attr_max_cmd_len.attr, NULL, }; @@ -118,18 +113,139 @@ static struct attribute_group iscsi_transport_group = { .attrs = iscsi_transport_attrs, }; +/* + * iSCSI endpoint attrs + */ +#define iscsi_dev_to_endpoint(_dev) \ + container_of(_dev, struct iscsi_endpoint, dev) + +#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name,_mode,_show,_store) + +static void iscsi_endpoint_release(struct device *dev) +{ + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); + kfree(ep); +} + +static struct class iscsi_endpoint_class = { + .name = "iscsi_endpoint", + .dev_release = iscsi_endpoint_release, +}; + +static ssize_t +show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); + return sprintf(buf, "%u\n", ep->id); +} +static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); + +static struct attribute *iscsi_endpoint_attrs[] = { + &dev_attr_ep_handle.attr, + NULL, +}; + +static struct attribute_group iscsi_endpoint_group = { + .attrs = iscsi_endpoint_attrs, +}; +#define ISCSI_MAX_EPID -1 + +static int iscsi_match_epid(struct device *dev, void *data) +{ + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); + unsigned int *epid = (unsigned int *) data; + + return *epid == ep->id; +} + +struct iscsi_endpoint * +iscsi_create_endpoint(int dd_size) +{ + struct device *dev; + struct iscsi_endpoint *ep; + unsigned int id; + int err; + + for (id = 1; id < ISCSI_MAX_EPID; id++) { + dev = class_find_device(&iscsi_endpoint_class, &id, + iscsi_match_epid); + if (!dev) + break; + } + if (id == ISCSI_MAX_EPID) { + printk(KERN_ERR "Too many connections. Max supported %u\n", + ISCSI_MAX_EPID - 1); + return NULL; + } + + ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); + if (!ep) + return NULL; + + ep->id = id; + ep->dev.class = &iscsi_endpoint_class; + snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id); + err = device_register(&ep->dev); + if (err) + goto free_ep; + + err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); + if (err) + goto unregister_dev; + + if (dd_size) + ep->dd_data = &ep[1]; + return ep; + +unregister_dev: + device_unregister(&ep->dev); + return NULL; + +free_ep: + kfree(ep); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_endpoint); + +void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) +{ + sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group); + device_unregister(&ep->dev); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); + +struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) +{ + struct iscsi_endpoint *ep; + struct device *dev; + + dev = class_find_device(&iscsi_endpoint_class, &handle, + iscsi_match_epid); + if (!dev) + return NULL; + + ep = iscsi_dev_to_endpoint(dev); + /* + * we can drop this now because the interface will prevent + * removals and lookups from racing. + */ + put_device(dev); + return ep; +} +EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); static int iscsi_setup_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_cls_host *ihost = shost->shost_data; memset(ihost, 0, sizeof(*ihost)); - INIT_LIST_HEAD(&ihost->sessions); - mutex_init(&ihost->mutex); atomic_set(&ihost->nr_scans, 0); + mutex_init(&ihost->mutex); snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d", shost->host_no); @@ -144,7 +260,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_cls_host *ihost = shost->shost_data; destroy_workqueue(ihost->scan_workq); return 0; @@ -287,6 +403,24 @@ static int iscsi_is_session_dev(const struct device *dev) return dev->release == iscsi_session_release; } +static int iscsi_iter_session_fn(struct device *dev, void *data) +{ + void (* fn) (struct iscsi_cls_session *) = data; + + if (!iscsi_is_session_dev(dev)) + return 0; + fn(iscsi_dev_to_session(dev)); + return 0; +} + +void iscsi_host_for_each_session(struct Scsi_Host *shost, + void (*fn)(struct iscsi_cls_session *)) +{ + device_for_each_child(&shost->shost_gendev, fn, + iscsi_iter_session_fn); +} +EXPORT_SYMBOL_GPL(iscsi_host_for_each_session); + /** * iscsi_scan_finished - helper to report when running scans are done * @shost: scsi host @@ -297,7 +431,7 @@ static int iscsi_is_session_dev(const struct device *dev) */ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time) { - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_cls_host *ihost = shost->shost_data; /* * qla4xxx will have kicked off some session unblocks before calling * scsi_scan_host, so just wait for them to complete. @@ -306,42 +440,76 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time) } EXPORT_SYMBOL_GPL(iscsi_scan_finished); -static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, - uint id, uint lun) +struct iscsi_scan_data { + unsigned int channel; + unsigned int id; + unsigned int lun; +}; + +static int iscsi_user_scan_session(struct device *dev, void *data) { - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_scan_data *scan_data = data; struct iscsi_cls_session *session; + struct Scsi_Host *shost; + struct iscsi_cls_host *ihost; + unsigned long flags; + unsigned int id; + + if (!iscsi_is_session_dev(dev)) + return 0; + + session = iscsi_dev_to_session(dev); + shost = iscsi_session_to_shost(session); + ihost = shost->shost_data; mutex_lock(&ihost->mutex); - list_for_each_entry(session, &ihost->sessions, host_list) { - if ((channel == SCAN_WILD_CARD || channel == 0) && - (id == SCAN_WILD_CARD || id == session->target_id)) - scsi_scan_target(&session->dev, 0, - session->target_id, lun, 1); + spin_lock_irqsave(&session->lock, flags); + if (session->state != ISCSI_SESSION_LOGGED_IN) { + spin_unlock_irqrestore(&session->lock, flags); + mutex_unlock(&ihost->mutex); + return 0; } - mutex_unlock(&ihost->mutex); + id = session->target_id; + spin_unlock_irqrestore(&session->lock, flags); + if (id != ISCSI_MAX_TARGET) { + if ((scan_data->channel == SCAN_WILD_CARD || + scan_data->channel == 0) && + (scan_data->id == SCAN_WILD_CARD || + scan_data->id == id)) + scsi_scan_target(&session->dev, 0, id, + scan_data->lun, 1); + } + mutex_unlock(&ihost->mutex); return 0; } +static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, + uint id, uint lun) +{ + struct iscsi_scan_data scan_data; + + scan_data.channel = channel; + scan_data.id = id; + scan_data.lun = lun; + + return device_for_each_child(&shost->shost_gendev, &scan_data, + iscsi_user_scan_session); +} + static void iscsi_scan_session(struct work_struct *work) { struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, scan_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); - struct iscsi_host *ihost = shost->shost_data; - unsigned long flags; + struct iscsi_cls_host *ihost = shost->shost_data; + struct iscsi_scan_data scan_data; - spin_lock_irqsave(&session->lock, flags); - if (session->state != ISCSI_SESSION_LOGGED_IN) { - spin_unlock_irqrestore(&session->lock, flags); - goto done; - } - spin_unlock_irqrestore(&session->lock, flags); + scan_data.channel = 0; + scan_data.id = SCAN_WILD_CARD; + scan_data.lun = SCAN_WILD_CARD; - scsi_scan_target(&session->dev, 0, session->target_id, - SCAN_WILD_CARD, 1); -done: + iscsi_user_scan_session(&session->dev, &scan_data); atomic_dec(&ihost->nr_scans); } @@ -381,7 +549,7 @@ static void __iscsi_unblock_session(struct work_struct *work) container_of(work, struct iscsi_cls_session, unblock_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; /* @@ -449,15 +617,19 @@ static void __iscsi_unbind_session(struct work_struct *work) container_of(work, struct iscsi_cls_session, unbind_work); struct Scsi_Host *shost = iscsi_session_to_shost(session); - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_cls_host *ihost = shost->shost_data; + unsigned long flags; /* Prevent new scans and make sure scanning is not in progress */ mutex_lock(&ihost->mutex); - if (list_empty(&session->host_list)) { + spin_lock_irqsave(&session->lock, flags); + if (session->target_id == ISCSI_MAX_TARGET) { + spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); return; } - list_del_init(&session->host_list); + session->target_id = ISCSI_MAX_TARGET; + spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); scsi_remove_target(&session->dev); @@ -467,18 +639,18 @@ static void __iscsi_unbind_session(struct work_struct *work) static int iscsi_unbind_session(struct iscsi_cls_session *session) { struct Scsi_Host *shost = iscsi_session_to_shost(session); - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_cls_host *ihost = shost->shost_data; return queue_work(ihost->scan_workq, &session->unbind_work); } struct iscsi_cls_session * -iscsi_alloc_session(struct Scsi_Host *shost, - struct iscsi_transport *transport) +iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, + int dd_size) { struct iscsi_cls_session *session; - session = kzalloc(sizeof(*session) + transport->sessiondata_size, + session = kzalloc(sizeof(*session) + dd_size, GFP_KERNEL); if (!session) return NULL; @@ -487,7 +659,6 @@ iscsi_alloc_session(struct Scsi_Host *shost, session->recovery_tmo = 120; session->state = ISCSI_SESSION_FREE; INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); - INIT_LIST_HEAD(&session->host_list); INIT_LIST_HEAD(&session->sess_list); INIT_WORK(&session->unblock_work, __iscsi_unblock_session); INIT_WORK(&session->block_work, __iscsi_block_session); @@ -500,22 +671,57 @@ iscsi_alloc_session(struct Scsi_Host *shost, session->dev.parent = &shost->shost_gendev; session->dev.release = iscsi_session_release; device_initialize(&session->dev); - if (transport->sessiondata_size) + if (dd_size) session->dd_data = &session[1]; return session; } EXPORT_SYMBOL_GPL(iscsi_alloc_session); +static int iscsi_get_next_target_id(struct device *dev, void *data) +{ + struct iscsi_cls_session *session; + unsigned long flags; + int err = 0; + + if (!iscsi_is_session_dev(dev)) + return 0; + + session = iscsi_dev_to_session(dev); + spin_lock_irqsave(&session->lock, flags); + if (*((unsigned int *) data) == session->target_id) + err = -EEXIST; + spin_unlock_irqrestore(&session->lock, flags); + return err; +} + int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) { struct Scsi_Host *shost = iscsi_session_to_shost(session); - struct iscsi_host *ihost; + struct iscsi_cls_host *ihost; unsigned long flags; + unsigned int id = target_id; int err; ihost = shost->shost_data; session->sid = atomic_add_return(1, &iscsi_session_nr); - session->target_id = target_id; + + if (id == ISCSI_MAX_TARGET) { + for (id = 0; id < ISCSI_MAX_TARGET; id++) { + err = device_for_each_child(&shost->shost_gendev, &id, + iscsi_get_next_target_id); + if (!err) + break; + } + + if (id == ISCSI_MAX_TARGET) { + iscsi_cls_session_printk(KERN_ERR, session, + "Too many iscsi targets. Max " + "number of targets is %d.\n", + ISCSI_MAX_TARGET - 1); + goto release_host; + } + } + session->target_id = id; snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", session->sid); @@ -531,10 +737,6 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) list_add(&session->sess_list, &sesslist); spin_unlock_irqrestore(&sesslock, flags); - mutex_lock(&ihost->mutex); - list_add(&session->host_list, &ihost->sessions); - mutex_unlock(&ihost->mutex); - iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); return 0; @@ -548,18 +750,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session); * iscsi_create_session - create iscsi class session * @shost: scsi host * @transport: iscsi transport + * @dd_size: private driver data size * @target_id: which target * * This can be called from a LLD or iscsi_transport. */ struct iscsi_cls_session * -iscsi_create_session(struct Scsi_Host *shost, - struct iscsi_transport *transport, - unsigned int target_id) +iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport, + int dd_size, unsigned int target_id) { struct iscsi_cls_session *session; - session = iscsi_alloc_session(shost, transport); + session = iscsi_alloc_session(shost, transport, dd_size); if (!session) return NULL; @@ -595,7 +797,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) void iscsi_remove_session(struct iscsi_cls_session *session) { struct Scsi_Host *shost = iscsi_session_to_shost(session); - struct iscsi_host *ihost = shost->shost_data; + struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; int err; @@ -661,6 +863,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session); /** * iscsi_create_conn - create iscsi class connection * @session: iscsi cls session + * @dd_size: private driver data size * @cid: connection id * * This can be called from a LLD or iscsi_transport. The connection @@ -673,18 +876,17 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session); * non-zero. */ struct iscsi_cls_conn * -iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid) +iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) { struct iscsi_transport *transport = session->transport; struct iscsi_cls_conn *conn; unsigned long flags; int err; - conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL); + conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL); if (!conn) return NULL; - - if (transport->conndata_size) + if (dd_size) conn->dd_data = &conn[1]; INIT_LIST_HEAD(&conn->conn_list); @@ -1017,21 +1219,20 @@ int iscsi_session_event(struct iscsi_cls_session *session, EXPORT_SYMBOL_GPL(iscsi_session_event); static int -iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) +iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep, + struct iscsi_uevent *ev, uint32_t initial_cmdsn, + uint16_t cmds_max, uint16_t queue_depth) { struct iscsi_transport *transport = priv->iscsi_transport; struct iscsi_cls_session *session; - uint32_t hostno; + uint32_t host_no; - session = transport->create_session(transport, &priv->t, - ev->u.c_session.cmds_max, - ev->u.c_session.queue_depth, - ev->u.c_session.initial_cmdsn, - &hostno); + session = transport->create_session(ep, cmds_max, queue_depth, + initial_cmdsn, &host_no); if (!session) return -ENOMEM; - ev->r.c_session_ret.host_no = hostno; + ev->r.c_session_ret.host_no = host_no; ev->r.c_session_ret.sid = session->sid; return 0; } @@ -1106,6 +1307,7 @@ static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) { + struct iscsi_endpoint *ep; struct sockaddr *dst_addr; int rc = 0; @@ -1115,22 +1317,33 @@ iscsi_if_transport_ep(struct iscsi_transport *transport, return -EINVAL; dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); - rc = transport->ep_connect(dst_addr, - ev->u.ep_connect.non_blocking, - &ev->r.ep_connect_ret.handle); + ep = transport->ep_connect(dst_addr, + ev->u.ep_connect.non_blocking); + if (IS_ERR(ep)) + return PTR_ERR(ep); + + ev->r.ep_connect_ret.handle = ep->id; break; case ISCSI_UEVENT_TRANSPORT_EP_POLL: if (!transport->ep_poll) return -EINVAL; - ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle, + ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle); + if (!ep) + return -EINVAL; + + ev->r.retcode = transport->ep_poll(ep, ev->u.ep_poll.timeout_ms); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: if (!transport->ep_disconnect) return -EINVAL; - transport->ep_disconnect(ev->u.ep_disconnect.ep_handle); + ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle); + if (!ep) + return -EINVAL; + + transport->ep_disconnect(ep); break; } return rc; @@ -1195,6 +1408,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct iscsi_internal *priv; struct iscsi_cls_session *session; struct iscsi_cls_conn *conn; + struct iscsi_endpoint *ep = NULL; priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) @@ -1208,7 +1422,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: - err = iscsi_if_create_session(priv, ev); + err = iscsi_if_create_session(priv, ep, ev, + ev->u.c_session.initial_cmdsn, + ev->u.c_session.cmds_max, + ev->u.c_session.queue_depth); + break; + case ISCSI_UEVENT_CREATE_BOUND_SESSION: + ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle); + if (!ep) { + err = -EINVAL; + break; + } + + err = iscsi_if_create_session(priv, ep, ev, + ev->u.c_bound_session.initial_cmdsn, + ev->u.c_bound_session.cmds_max, + ev->u.c_bound_session.queue_depth); break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); @@ -1414,6 +1643,8 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); +iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); +iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0) static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, @@ -1580,6 +1811,8 @@ iscsi_register_transport(struct iscsi_transport *tt) priv->daemon_pid = -1; priv->iscsi_transport = tt; priv->t.user_scan = iscsi_user_scan; + if (!(tt->caps & CAP_DATA_PATH_OFFLOAD)) + priv->t.create_work_queue = 1; priv->dev.class = &iscsi_transport_class; snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name); @@ -1595,7 +1828,7 @@ iscsi_register_transport(struct iscsi_transport *tt) priv->t.host_attrs.ac.attrs = &priv->host_attrs[0]; priv->t.host_attrs.ac.class = &iscsi_host_class.class; priv->t.host_attrs.ac.match = iscsi_host_match; - priv->t.host_size = sizeof(struct iscsi_host); + priv->t.host_size = sizeof(struct iscsi_cls_host); transport_container_register(&priv->t.host_attrs); SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME); @@ -1653,6 +1886,8 @@ iscsi_register_transport(struct iscsi_transport *tt) SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); + SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); + SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); SETUP_PRIV_SESSION_RD_ATTR(state); @@ -1668,6 +1903,7 @@ iscsi_register_transport(struct iscsi_transport *tt) unregister_dev: device_unregister(&priv->dev); + return NULL; free_priv: kfree(priv); return NULL; @@ -1715,10 +1951,14 @@ static __init int iscsi_transport_init(void) if (err) return err; - err = transport_class_register(&iscsi_host_class); + err = class_register(&iscsi_endpoint_class); if (err) goto unregister_transport_class; + err = transport_class_register(&iscsi_host_class); + if (err) + goto unregister_endpoint_class; + err = transport_class_register(&iscsi_connection_class); if (err) goto unregister_host_class; @@ -1727,8 +1967,8 @@ static __init int iscsi_transport_init(void) if (err) goto unregister_conn_class; - nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL, - THIS_MODULE); + nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, + NULL, THIS_MODULE); if (!nls) { err = -ENOBUFS; goto unregister_session_class; @@ -1748,6 +1988,8 @@ unregister_conn_class: transport_class_unregister(&iscsi_connection_class); unregister_host_class: transport_class_unregister(&iscsi_host_class); +unregister_endpoint_class: + class_unregister(&iscsi_endpoint_class); unregister_transport_class: class_unregister(&iscsi_transport_class); return err; @@ -1760,6 +2002,7 @@ static void __exit iscsi_transport_exit(void) transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_host_class); + class_unregister(&iscsi_endpoint_class); class_unregister(&iscsi_transport_class); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d53312c42547..0c63947d8a9d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -58,8 +58,8 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsicam.h> -#include <scsi/sd.h> +#include "sd.h" #include "scsi_logging.h" MODULE_AUTHOR("Eric Youngdale"); @@ -295,11 +295,6 @@ static int sd_major(int major_idx) } } -static inline struct scsi_disk *scsi_disk(struct gendisk *disk) -{ - return container_of(disk->private_data, struct scsi_disk, driver); -} - static struct scsi_disk *__scsi_disk_get(struct gendisk *disk) { struct scsi_disk *sdkp = NULL; diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h new file mode 100644 index 000000000000..03a3d45cfa42 --- /dev/null +++ b/drivers/scsi/sd.h @@ -0,0 +1,62 @@ +#ifndef _SCSI_DISK_H +#define _SCSI_DISK_H + +/* + * More than enough for everybody ;) The huge number of majors + * is a leftover from 16bit dev_t days, we don't really need that + * much numberspace. + */ +#define SD_MAJORS 16 + +/* + * This is limited by the naming scheme enforced in sd_probe, + * add another character to it if you really need more disks. + */ +#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26) + +/* + * Time out in seconds for disks and Magneto-opticals (which are slower). + */ +#define SD_TIMEOUT (30 * HZ) +#define SD_MOD_TIMEOUT (75 * HZ) + +/* + * Number of allowed retries + */ +#define SD_MAX_RETRIES 5 +#define SD_PASSTHROUGH_RETRIES 1 + +/* + * Size of the initial data buffer for mode and read capacity data + */ +#define SD_BUF_SIZE 512 + +struct scsi_disk { + struct scsi_driver *driver; /* always &sd_template */ + struct scsi_device *device; + struct device dev; + struct gendisk *disk; + unsigned int openers; /* protected by BKL for now, yuck */ + sector_t capacity; /* size in 512-byte sectors */ + u32 index; + u8 media_present; + u8 write_prot; + unsigned previous_state : 1; + unsigned WCE : 1; /* state of disk WCE bit */ + unsigned RCD : 1; /* state of disk RCD bit, unused */ + unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ +}; +#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) + +static inline struct scsi_disk *scsi_disk(struct gendisk *disk) +{ + return container_of(disk->private_data, struct scsi_disk, driver); +} + +#define sd_printk(prefix, sdsk, fmt, a...) \ + (sdsk)->disk ? \ + sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \ + (sdsk)->disk->disk_name, ##a) : \ + sdev_printk(prefix, (sdsk)->device, fmt, ##a) + +#endif /* _SCSI_DISK_H */ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index fccd2e88d600..d3b8ebb83776 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1036,6 +1036,9 @@ sg_ioctl(struct inode *inode, struct file *filp, case SG_SCSI_RESET_DEVICE: val = SCSI_TRY_RESET_DEVICE; break; + case SG_SCSI_RESET_TARGET: + val = SCSI_TRY_RESET_TARGET; + break; case SG_SCSI_RESET_BUS: val = SCSI_TRY_RESET_BUS; break; diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h index 0433d5d0caf3..430537183c18 100644 --- a/drivers/scsi/sym53c8xx_2/sym_misc.h +++ b/drivers/scsi/sym53c8xx_2/sym_misc.h @@ -121,9 +121,7 @@ static __inline void sym_que_move(struct sym_quehead *orig, } } -#define sym_que_entry(ptr, type, member) \ - ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member))) - +#define sym_que_entry(ptr, type, member) container_of(ptr, type, member) #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 97c68d021d28..638b68649e79 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c @@ -383,21 +383,14 @@ static int __devinit check_name(char *name) return 0; } -static int __devinit check_resources(struct pnp_option *option) +static int __devinit check_resources(struct pnp_dev *dev) { - struct pnp_option *tmp; - if (!option) - return 0; + resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8}; + int i; - for (tmp = option; tmp; tmp = tmp->next) { - struct pnp_port *port; - for (port = tmp->port; port; port = port->next) - if ((port->size == 8) && - ((port->min == 0x2f8) || - (port->min == 0x3f8) || - (port->min == 0x2e8) || - (port->min == 0x3e8))) - return 1; + for (i = 0; i < ARRAY_SIZE(base); i++) { + if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8)) + return 1; } return 0; @@ -420,10 +413,7 @@ static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags) (dev->card && check_name(dev->card->name)))) return -ENODEV; - if (check_resources(dev->independent)) - return 0; - - if (check_resources(dev->dependent)) + if (check_resources(dev)) return 0; return -ENODEV; diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/serial/cpm_uart/cpm_uart.h index 0cc39f82d7c5..5c76e0ae0582 100644 --- a/drivers/serial/cpm_uart/cpm_uart.h +++ b/drivers/serial/cpm_uart/cpm_uart.h @@ -6,7 +6,7 @@ * Copyright (C) 2004 Freescale Semiconductor, Inc. * * 2006 (c) MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> + * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any @@ -28,7 +28,7 @@ #define SERIAL_CPM_MAJOR 204 #define SERIAL_CPM_MINOR 46 -#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC) +#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC) #define IS_DISCARDING(pinfo) (pinfo->flags & FLAG_DISCARDING) #define FLAG_DISCARDING 0x00000004 /* when set, don't discard */ #define FLAG_SMC 0x00000002 @@ -70,7 +70,7 @@ struct uart_cpm_port { void (*set_lineif)(struct uart_cpm_port *); u8 brg; uint dp_addr; - void *mem_addr; + void *mem_addr; dma_addr_t dma_addr; u32 mem_size; /* helpers */ @@ -79,14 +79,11 @@ struct uart_cpm_port { /* Keep track of 'odd' SMC2 wirings */ int is_portb; /* wait on close if needed */ - int wait_closing; + int wait_closing; /* value to combine with opcode to form cpm command */ u32 command; }; -#ifndef CONFIG_PPC_CPM_NEW_BINDING -extern int cpm_uart_port_map[UART_NR]; -#endif extern int cpm_uart_nr; extern struct uart_cpm_port cpm_uart_ports[UART_NR]; diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c index a19dc7ef8861..abe129cc927a 100644 --- a/drivers/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/serial/cpm_uart/cpm_uart_core.c @@ -13,7 +13,7 @@ * Copyright (C) 2004, 2007 Freescale Semiconductor, Inc. * (C) 2004 Intracom, S.A. * (C) 2005-2006 MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> + * Vitaly Bordug <vbordug@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -42,6 +42,7 @@ #include <linux/bootmem.h> #include <linux/dma-mapping.h> #include <linux/fs_uart_pd.h> +#include <linux/of_platform.h> #include <asm/io.h> #include <asm/irq.h> @@ -49,10 +50,6 @@ #include <asm/fs_pd.h> #include <asm/udbg.h> -#ifdef CONFIG_PPC_CPM_NEW_BINDING -#include <linux/of_platform.h> -#endif - #if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif @@ -72,59 +69,6 @@ static void cpm_uart_initbd(struct uart_cpm_port *pinfo); /**************************************************************/ -#ifndef CONFIG_PPC_CPM_NEW_BINDING -/* Track which ports are configured as uarts */ -int cpm_uart_port_map[UART_NR]; -/* How many ports did we config as uarts */ -int cpm_uart_nr; - -/* Place-holder for board-specific stuff */ -struct platform_device* __attribute__ ((weak)) __init -early_uart_get_pdev(int index) -{ - return NULL; -} - - -static void cpm_uart_count(void) -{ - cpm_uart_nr = 0; -#ifdef CONFIG_SERIAL_CPM_SMC1 - cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; -#endif -#ifdef CONFIG_SERIAL_CPM_SMC2 - cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; -#endif -#ifdef CONFIG_SERIAL_CPM_SCC1 - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; -#endif -#ifdef CONFIG_SERIAL_CPM_SCC2 - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; -#endif -#ifdef CONFIG_SERIAL_CPM_SCC3 - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; -#endif -#ifdef CONFIG_SERIAL_CPM_SCC4 - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; -#endif -} - -/* Get UART number by its id */ -static int cpm_uart_id2nr(int id) -{ - int i; - if (id < UART_NR) { - for (i=0; i<UART_NR; i++) { - if (cpm_uart_port_map[i] == id) - return i; - } - } - - /* not found or invalid argument */ - return -1; -} -#endif - /* * Check, if transmit buffers are processed */ @@ -547,6 +491,11 @@ static void cpm_uart_set_termios(struct uart_port *port, } /* + * Update the timeout + */ + uart_update_timeout(port, termios->c_cflag, baud); + + /* * Set up parity check flag */ #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) @@ -935,7 +884,6 @@ static struct uart_ops cpm_uart_pops = { .verify_port = cpm_uart_verify_port, }; -#ifdef CONFIG_PPC_CPM_NEW_BINDING struct uart_cpm_port cpm_uart_ports[UART_NR]; static int cpm_uart_init_port(struct device_node *np, @@ -995,6 +943,7 @@ static int cpm_uart_init_port(struct device_node *np, pinfo->port.type = PORT_CPM; pinfo->port.ops = &cpm_uart_pops, pinfo->port.iotype = UPIO_MEM; + pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize; spin_lock_init(&pinfo->port.lock); pinfo->port.irq = of_irq_to_resource(np, 0, NULL); @@ -1012,153 +961,6 @@ out_mem: return ret; } -#else - -struct uart_cpm_port cpm_uart_ports[UART_NR] = { - [UART_SMC1] = { - .port = { - .irq = SMC1_IRQ, - .ops = &cpm_uart_pops, - .iotype = UPIO_MEM, - .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SMC1].port.lock), - }, - .flags = FLAG_SMC, - .tx_nrfifos = TX_NUM_FIFO, - .tx_fifosize = TX_BUF_SIZE, - .rx_nrfifos = RX_NUM_FIFO, - .rx_fifosize = RX_BUF_SIZE, - .set_lineif = smc1_lineif, - }, - [UART_SMC2] = { - .port = { - .irq = SMC2_IRQ, - .ops = &cpm_uart_pops, - .iotype = UPIO_MEM, - .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SMC2].port.lock), - }, - .flags = FLAG_SMC, - .tx_nrfifos = TX_NUM_FIFO, - .tx_fifosize = TX_BUF_SIZE, - .rx_nrfifos = RX_NUM_FIFO, - .rx_fifosize = RX_BUF_SIZE, - .set_lineif = smc2_lineif, -#ifdef CONFIG_SERIAL_CPM_ALT_SMC2 - .is_portb = 1, -#endif - }, - [UART_SCC1] = { - .port = { - .irq = SCC1_IRQ, - .ops = &cpm_uart_pops, - .iotype = UPIO_MEM, - .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC1].port.lock), - }, - .tx_nrfifos = TX_NUM_FIFO, - .tx_fifosize = TX_BUF_SIZE, - .rx_nrfifos = RX_NUM_FIFO, - .rx_fifosize = RX_BUF_SIZE, - .set_lineif = scc1_lineif, - .wait_closing = SCC_WAIT_CLOSING, - }, - [UART_SCC2] = { - .port = { - .irq = SCC2_IRQ, - .ops = &cpm_uart_pops, - .iotype = UPIO_MEM, - .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC2].port.lock), - }, - .tx_nrfifos = TX_NUM_FIFO, - .tx_fifosize = TX_BUF_SIZE, - .rx_nrfifos = RX_NUM_FIFO, - .rx_fifosize = RX_BUF_SIZE, - .set_lineif = scc2_lineif, - .wait_closing = SCC_WAIT_CLOSING, - }, - [UART_SCC3] = { - .port = { - .irq = SCC3_IRQ, - .ops = &cpm_uart_pops, - .iotype = UPIO_MEM, - .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC3].port.lock), - }, - .tx_nrfifos = TX_NUM_FIFO, - .tx_fifosize = TX_BUF_SIZE, - .rx_nrfifos = RX_NUM_FIFO, - .rx_fifosize = RX_BUF_SIZE, - .set_lineif = scc3_lineif, - .wait_closing = SCC_WAIT_CLOSING, - }, - [UART_SCC4] = { - .port = { - .irq = SCC4_IRQ, - .ops = &cpm_uart_pops, - .iotype = UPIO_MEM, - .lock = __SPIN_LOCK_UNLOCKED(cpm_uart_ports[UART_SCC4].port.lock), - }, - .tx_nrfifos = TX_NUM_FIFO, - .tx_fifosize = TX_BUF_SIZE, - .rx_nrfifos = RX_NUM_FIFO, - .rx_fifosize = RX_BUF_SIZE, - .set_lineif = scc4_lineif, - .wait_closing = SCC_WAIT_CLOSING, - }, -}; - -int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con) -{ - struct resource *r; - struct fs_uart_platform_info *pdata = pdev->dev.platform_data; - int idx; /* It is UART_SMCx or UART_SCCx index */ - struct uart_cpm_port *pinfo; - int line; - u32 mem, pram; - - idx = pdata->fs_no = fs_uart_get_id(pdata); - - line = cpm_uart_id2nr(idx); - if(line < 0) { - printk(KERN_ERR"%s(): port %d is not registered", __func__, idx); - return -EINVAL; - } - - pinfo = (struct uart_cpm_port *) &cpm_uart_ports[idx]; - - pinfo->brg = pdata->brg; - - if (!is_con) { - pinfo->port.line = line; - pinfo->port.flags = UPF_BOOT_AUTOCONF; - } - - if (!(r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"))) - return -EINVAL; - mem = (u32)ioremap(r->start, r->end - r->start + 1); - - if (!(r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram"))) - return -EINVAL; - pram = (u32)ioremap(r->start, r->end - r->start + 1); - - if(idx > fsid_smc2_uart) { - pinfo->sccp = (scc_t *)mem; - pinfo->sccup = (scc_uart_t *)pram; - } else { - pinfo->smcp = (smc_t *)mem; - pinfo->smcup = (smc_uart_t *)pram; - } - pinfo->tx_nrfifos = pdata->tx_num_fifo; - pinfo->tx_fifosize = pdata->tx_buf_size; - - pinfo->rx_nrfifos = pdata->rx_num_fifo; - pinfo->rx_fifosize = pdata->rx_buf_size; - - pinfo->port.uartclk = pdata->uart_clk; - pinfo->port.mapbase = (unsigned long)mem; - pinfo->port.irq = platform_get_irq(pdev, 0); - - return 0; -} -#endif - #ifdef CONFIG_SERIAL_CPM_CONSOLE /* * Print a string to the serial port trying not to disturb @@ -1169,15 +971,18 @@ int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con) static void cpm_uart_console_write(struct console *co, const char *s, u_int count) { -#ifdef CONFIG_PPC_CPM_NEW_BINDING struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; -#else - struct uart_cpm_port *pinfo = - &cpm_uart_ports[cpm_uart_port_map[co->index]]; -#endif unsigned int i; cbd_t __iomem *bdp, *bdbase; unsigned char *cp; + unsigned long flags; + int nolock = oops_in_progress; + + if (unlikely(nolock)) { + local_irq_save(flags); + } else { + spin_lock_irqsave(&pinfo->port.lock, flags); + } /* Get the address of the host memory buffer. */ @@ -1239,6 +1044,12 @@ static void cpm_uart_console_write(struct console *co, const char *s, ; pinfo->tx_cur = bdp; + + if (unlikely(nolock)) { + local_irq_restore(flags); + } else { + spin_unlock_irqrestore(&pinfo->port.lock, flags); + } } @@ -1252,7 +1063,6 @@ static int __init cpm_uart_console_setup(struct console *co, char *options) struct uart_cpm_port *pinfo; struct uart_port *port; -#ifdef CONFIG_PPC_CPM_NEW_BINDING struct device_node *np = NULL; int i = 0; @@ -1284,35 +1094,6 @@ static int __init cpm_uart_console_setup(struct console *co, char *options) if (ret) return ret; -#else - - struct fs_uart_platform_info *pdata; - struct platform_device* pdev = early_uart_get_pdev(co->index); - - if (!pdev) { - pr_info("cpm_uart: console: compat mode\n"); - /* compatibility - will be cleaned up */ - cpm_uart_init_portdesc(); - } - - port = - (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]]; - pinfo = (struct uart_cpm_port *)port; - if (!pdev) { - if (pinfo->set_lineif) - pinfo->set_lineif(pinfo); - } else { - pdata = pdev->dev.platform_data; - if (pdata) - if (pdata->init_ioports) - pdata->init_ioports(pdata); - - cpm_uart_drv_get_platform_data(pdev, 1); - } - - pinfo->flags |= FLAG_CONSOLE; -#endif - if (options) { uart_parse_options(options, &baud, &parity, &bits, &flow); } else { @@ -1386,7 +1167,6 @@ static struct uart_driver cpm_reg = { .nr = UART_NR, }; -#ifdef CONFIG_PPC_CPM_NEW_BINDING static int probe_index; static int __devinit cpm_uart_probe(struct of_device *ofdev, @@ -1457,135 +1237,6 @@ static void __exit cpm_uart_exit(void) of_unregister_platform_driver(&cpm_uart_driver); uart_unregister_driver(&cpm_reg); } -#else -static int cpm_uart_drv_probe(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct fs_uart_platform_info *pdata; - int ret = -ENODEV; - - if(!pdev) { - printk(KERN_ERR"CPM UART: platform data missing!\n"); - return ret; - } - - pdata = pdev->dev.platform_data; - - if ((ret = cpm_uart_drv_get_platform_data(pdev, 0))) - return ret; - - pr_debug("cpm_uart_drv_probe: Adding CPM UART %d\n", cpm_uart_id2nr(pdata->fs_no)); - - if (pdata->init_ioports) - pdata->init_ioports(pdata); - - ret = uart_add_one_port(&cpm_reg, &cpm_uart_ports[pdata->fs_no].port); - - return ret; -} - -static int cpm_uart_drv_remove(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct fs_uart_platform_info *pdata = pdev->dev.platform_data; - - pr_debug("cpm_uart_drv_remove: Removing CPM UART %d\n", - cpm_uart_id2nr(pdata->fs_no)); - - uart_remove_one_port(&cpm_reg, &cpm_uart_ports[pdata->fs_no].port); - return 0; -} - -static struct device_driver cpm_smc_uart_driver = { - .name = "fsl-cpm-smc:uart", - .bus = &platform_bus_type, - .probe = cpm_uart_drv_probe, - .remove = cpm_uart_drv_remove, -}; - -static struct device_driver cpm_scc_uart_driver = { - .name = "fsl-cpm-scc:uart", - .bus = &platform_bus_type, - .probe = cpm_uart_drv_probe, - .remove = cpm_uart_drv_remove, -}; - -/* - This is supposed to match uart devices on platform bus, - */ -static int match_is_uart (struct device* dev, void* data) -{ - struct platform_device* pdev = container_of(dev, struct platform_device, dev); - int ret = 0; - /* this was setfunc as uart */ - if(strstr(pdev->name,":uart")) { - ret = 1; - } - return ret; -} - - -static int cpm_uart_init(void) { - - int ret; - int i; - struct device *dev; - printk(KERN_INFO "Serial: CPM driver $Revision: 0.02 $\n"); - - /* lookup the bus for uart devices */ - dev = bus_find_device(&platform_bus_type, NULL, 0, match_is_uart); - - /* There are devices on the bus - all should be OK */ - if (dev) { - cpm_uart_count(); - cpm_reg.nr = cpm_uart_nr; - - if (!(ret = uart_register_driver(&cpm_reg))) { - if ((ret = driver_register(&cpm_smc_uart_driver))) { - uart_unregister_driver(&cpm_reg); - return ret; - } - if ((ret = driver_register(&cpm_scc_uart_driver))) { - driver_unregister(&cpm_scc_uart_driver); - uart_unregister_driver(&cpm_reg); - } - } - } else { - /* No capable platform devices found - falling back to legacy mode */ - pr_info("cpm_uart: WARNING: no UART devices found on platform bus!\n"); - pr_info( - "cpm_uart: the driver will guess configuration, but this mode is no longer supported.\n"); - - /* Don't run this again, if the console driver did it already */ - if (cpm_uart_nr == 0) - cpm_uart_init_portdesc(); - - cpm_reg.nr = cpm_uart_nr; - ret = uart_register_driver(&cpm_reg); - - if (ret) - return ret; - - for (i = 0; i < cpm_uart_nr; i++) { - int con = cpm_uart_port_map[i]; - cpm_uart_ports[con].port.line = i; - cpm_uart_ports[con].port.flags = UPF_BOOT_AUTOCONF; - if (cpm_uart_ports[con].set_lineif) - cpm_uart_ports[con].set_lineif(&cpm_uart_ports[con]); - uart_add_one_port(&cpm_reg, &cpm_uart_ports[con].port); - } - - } - return ret; -} - -static void __exit cpm_uart_exit(void) -{ - driver_unregister(&cpm_scc_uart_driver); - driver_unregister(&cpm_smc_uart_driver); - uart_unregister_driver(&cpm_reg); -} -#endif module_init(cpm_uart_init); module_exit(cpm_uart_exit); diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c index 74f1432bb248..0f0aff06c596 100644 --- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c +++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c @@ -9,7 +9,7 @@ * Copyright (C) 2004 Freescale Semiconductor, Inc. * (C) 2004 Intracom, S.A. * (C) 2006 MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> + * Vitaly Bordug <vbordug@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -51,7 +51,6 @@ /**************************************************************/ -#ifdef CONFIG_PPC_CPM_NEW_BINDING void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) { cpm_command(port->command, cmd); @@ -68,75 +67,6 @@ void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram) iounmap(pram); } -#else -void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) -{ - ushort val; - int line = port - cpm_uart_ports; - volatile cpm8xx_t *cp = cpmp; - - switch (line) { - case UART_SMC1: - val = mk_cr_cmd(CPM_CR_CH_SMC1, cmd) | CPM_CR_FLG; - break; - case UART_SMC2: - val = mk_cr_cmd(CPM_CR_CH_SMC2, cmd) | CPM_CR_FLG; - break; - case UART_SCC1: - val = mk_cr_cmd(CPM_CR_CH_SCC1, cmd) | CPM_CR_FLG; - break; - case UART_SCC2: - val = mk_cr_cmd(CPM_CR_CH_SCC2, cmd) | CPM_CR_FLG; - break; - case UART_SCC3: - val = mk_cr_cmd(CPM_CR_CH_SCC3, cmd) | CPM_CR_FLG; - break; - case UART_SCC4: - val = mk_cr_cmd(CPM_CR_CH_SCC4, cmd) | CPM_CR_FLG; - break; - default: - return; - - } - cp->cp_cpcr = val; - while (cp->cp_cpcr & CPM_CR_FLG) ; -} - -void smc1_lineif(struct uart_cpm_port *pinfo) -{ - pinfo->brg = 1; -} - -void smc2_lineif(struct uart_cpm_port *pinfo) -{ - pinfo->brg = 2; -} - -void scc1_lineif(struct uart_cpm_port *pinfo) -{ - /* XXX SCC1: insert port configuration here */ - pinfo->brg = 1; -} - -void scc2_lineif(struct uart_cpm_port *pinfo) -{ - /* XXX SCC2: insert port configuration here */ - pinfo->brg = 2; -} - -void scc3_lineif(struct uart_cpm_port *pinfo) -{ - /* XXX SCC3: insert port configuration here */ - pinfo->brg = 3; -} - -void scc4_lineif(struct uart_cpm_port *pinfo) -{ - /* XXX SCC4: insert port configuration here */ - pinfo->brg = 4; -} -#endif - /* * Allocate DP-Ram and memory buffers. We need to allocate a transmit and * receive buffer descriptors from dual port ram, and a character @@ -205,101 +135,3 @@ void cpm_uart_freebuf(struct uart_cpm_port *pinfo) cpm_dpfree(pinfo->dp_addr); } - -#ifndef CONFIG_PPC_CPM_NEW_BINDING -/* Setup any dynamic params in the uart desc */ -int cpm_uart_init_portdesc(void) -{ - pr_debug("CPM uart[-]:init portdesc\n"); - - cpm_uart_nr = 0; -#ifdef CONFIG_SERIAL_CPM_SMC1 - cpm_uart_ports[UART_SMC1].smcp = &cpmp->cp_smc[0]; -/* - * Is SMC1 being relocated? - */ -# ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH - cpm_uart_ports[UART_SMC1].smcup = - (smc_uart_t *) & cpmp->cp_dparam[0x3C0]; -# else - cpm_uart_ports[UART_SMC1].smcup = - (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC1]; -# endif - cpm_uart_ports[UART_SMC1].port.mapbase = - (unsigned long)&cpmp->cp_smc[0]; - cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); - cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); - cpm_uart_ports[UART_SMC1].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; -#endif - -#ifdef CONFIG_SERIAL_CPM_SMC2 - cpm_uart_ports[UART_SMC2].smcp = &cpmp->cp_smc[1]; - cpm_uart_ports[UART_SMC2].smcup = - (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC2]; - cpm_uart_ports[UART_SMC2].port.mapbase = - (unsigned long)&cpmp->cp_smc[1]; - cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); - cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); - cpm_uart_ports[UART_SMC2].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC1 - cpm_uart_ports[UART_SCC1].sccp = &cpmp->cp_scc[0]; - cpm_uart_ports[UART_SCC1].sccup = - (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC1]; - cpm_uart_ports[UART_SCC1].port.mapbase = - (unsigned long)&cpmp->cp_scc[0]; - cpm_uart_ports[UART_SCC1].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC1].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC2 - cpm_uart_ports[UART_SCC2].sccp = &cpmp->cp_scc[1]; - cpm_uart_ports[UART_SCC2].sccup = - (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC2]; - cpm_uart_ports[UART_SCC2].port.mapbase = - (unsigned long)&cpmp->cp_scc[1]; - cpm_uart_ports[UART_SCC2].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC2].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC3 - cpm_uart_ports[UART_SCC3].sccp = &cpmp->cp_scc[2]; - cpm_uart_ports[UART_SCC3].sccup = - (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC3]; - cpm_uart_ports[UART_SCC3].port.mapbase = - (unsigned long)&cpmp->cp_scc[2]; - cpm_uart_ports[UART_SCC3].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC3].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC4 - cpm_uart_ports[UART_SCC4].sccp = &cpmp->cp_scc[3]; - cpm_uart_ports[UART_SCC4].sccup = - (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC4]; - cpm_uart_ports[UART_SCC4].port.mapbase = - (unsigned long)&cpmp->cp_scc[3]; - cpm_uart_ports[UART_SCC4].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC4].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; -#endif - return 0; -} -#endif diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h index ddf46d3c964b..10eecd6af6d4 100644 --- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h +++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h @@ -2,7 +2,7 @@ * linux/drivers/serial/cpm_uart/cpm_uart_cpm1.h * * Driver for CPM (SCC/SMC) serial ports - * + * * definitions for cpm1 * */ @@ -12,16 +12,6 @@ #include <asm/cpm1.h> -/* defines for IRQs */ -#ifndef CONFIG_PPC_CPM_NEW_BINDING -#define SMC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC1) -#define SMC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC2) -#define SCC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC1) -#define SCC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC2) -#define SCC3_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC3) -#define SCC4_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC4) -#endif - static inline void cpm_set_brg(int brg, int baud) { cpm_setbrg(brg, baud); diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c index bb862e2f54cf..b8db4d3eed36 100644 --- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c @@ -5,11 +5,11 @@ * * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2) * Pantelis Antoniou (panto@intracom.gr) (CPM1) - * + * * Copyright (C) 2004 Freescale Semiconductor, Inc. * (C) 2004 Intracom, S.A. * (C) 2006 MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> + * Vitaly Bordug <vbordug@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -41,9 +41,7 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/fs_pd.h> -#ifdef CONFIG_PPC_CPM_NEW_BINDING #include <asm/prom.h> -#endif #include <linux/serial_core.h> #include <linux/kernel.h> @@ -52,7 +50,6 @@ /**************************************************************/ -#ifdef CONFIG_PPC_CPM_NEW_BINDING void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) { cpm_command(port->command, cmd); @@ -106,174 +103,8 @@ void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram) iounmap(pram); } -#else -void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd) -{ - ulong val; - int line = port - cpm_uart_ports; - volatile cpm_cpm2_t *cp = cpm2_map(im_cpm); - - - switch (line) { - case UART_SMC1: - val = mk_cr_cmd(CPM_CR_SMC1_PAGE, CPM_CR_SMC1_SBLOCK, 0, - cmd) | CPM_CR_FLG; - break; - case UART_SMC2: - val = mk_cr_cmd(CPM_CR_SMC2_PAGE, CPM_CR_SMC2_SBLOCK, 0, - cmd) | CPM_CR_FLG; - break; - case UART_SCC1: - val = mk_cr_cmd(CPM_CR_SCC1_PAGE, CPM_CR_SCC1_SBLOCK, 0, - cmd) | CPM_CR_FLG; - break; - case UART_SCC2: - val = mk_cr_cmd(CPM_CR_SCC2_PAGE, CPM_CR_SCC2_SBLOCK, 0, - cmd) | CPM_CR_FLG; - break; - case UART_SCC3: - val = mk_cr_cmd(CPM_CR_SCC3_PAGE, CPM_CR_SCC3_SBLOCK, 0, - cmd) | CPM_CR_FLG; - break; - case UART_SCC4: - val = mk_cr_cmd(CPM_CR_SCC4_PAGE, CPM_CR_SCC4_SBLOCK, 0, - cmd) | CPM_CR_FLG; - break; - default: - return; - - } - cp->cp_cpcr = val; - while (cp->cp_cpcr & CPM_CR_FLG) ; - - cpm2_unmap(cp); -} - -void smc1_lineif(struct uart_cpm_port *pinfo) -{ - volatile iop_cpm2_t *io = cpm2_map(im_ioport); - volatile cpmux_t *cpmux = cpm2_map(im_cpmux); - - /* SMC1 is only on port D */ - io->iop_ppard |= 0x00c00000; - io->iop_pdird |= 0x00400000; - io->iop_pdird &= ~0x00800000; - io->iop_psord &= ~0x00c00000; - - /* Wire BRG1 to SMC1 */ - cpmux->cmx_smr &= 0x0f; - pinfo->brg = 1; - - cpm2_unmap(cpmux); - cpm2_unmap(io); -} - -void smc2_lineif(struct uart_cpm_port *pinfo) -{ - volatile iop_cpm2_t *io = cpm2_map(im_ioport); - volatile cpmux_t *cpmux = cpm2_map(im_cpmux); - - /* SMC2 is only on port A */ - io->iop_ppara |= 0x00c00000; - io->iop_pdira |= 0x00400000; - io->iop_pdira &= ~0x00800000; - io->iop_psora &= ~0x00c00000; - - /* Wire BRG2 to SMC2 */ - cpmux->cmx_smr &= 0xf0; - pinfo->brg = 2; - - cpm2_unmap(cpmux); - cpm2_unmap(io); -} - -void scc1_lineif(struct uart_cpm_port *pinfo) -{ - volatile iop_cpm2_t *io = cpm2_map(im_ioport); - volatile cpmux_t *cpmux = cpm2_map(im_cpmux); - - /* Use Port D for SCC1 instead of other functions. */ - io->iop_ppard |= 0x00000003; - io->iop_psord &= ~0x00000001; /* Rx */ - io->iop_psord |= 0x00000002; /* Tx */ - io->iop_pdird &= ~0x00000001; /* Rx */ - io->iop_pdird |= 0x00000002; /* Tx */ - - /* Wire BRG1 to SCC1 */ - cpmux->cmx_scr &= 0x00ffffff; - cpmux->cmx_scr |= 0x00000000; - pinfo->brg = 1; - - cpm2_unmap(cpmux); - cpm2_unmap(io); -} - -void scc2_lineif(struct uart_cpm_port *pinfo) -{ - /* - * STx GP3 uses the SCC2 secondary option pin assignment - * which this driver doesn't account for in the static - * pin assignments. This kind of board specific info - * really has to get out of the driver so boards can - * be supported in a sane fashion. - */ - volatile cpmux_t *cpmux = cpm2_map(im_cpmux); -#ifndef CONFIG_STX_GP3 - volatile iop_cpm2_t *io = cpm2_map(im_ioport); - - io->iop_pparb |= 0x008b0000; - io->iop_pdirb |= 0x00880000; - io->iop_psorb |= 0x00880000; - io->iop_pdirb &= ~0x00030000; - io->iop_psorb &= ~0x00030000; -#endif - cpmux->cmx_scr &= 0xff00ffff; - cpmux->cmx_scr |= 0x00090000; - pinfo->brg = 2; - - cpm2_unmap(cpmux); - cpm2_unmap(io); -} - -void scc3_lineif(struct uart_cpm_port *pinfo) -{ - volatile iop_cpm2_t *io = cpm2_map(im_ioport); - volatile cpmux_t *cpmux = cpm2_map(im_cpmux); - - io->iop_pparb |= 0x008b0000; - io->iop_pdirb |= 0x00880000; - io->iop_psorb |= 0x00880000; - io->iop_pdirb &= ~0x00030000; - io->iop_psorb &= ~0x00030000; - cpmux->cmx_scr &= 0xffff00ff; - cpmux->cmx_scr |= 0x00001200; - pinfo->brg = 3; - - cpm2_unmap(cpmux); - cpm2_unmap(io); -} - -void scc4_lineif(struct uart_cpm_port *pinfo) -{ - volatile iop_cpm2_t *io = cpm2_map(im_ioport); - volatile cpmux_t *cpmux = cpm2_map(im_cpmux); - - io->iop_ppard |= 0x00000600; - io->iop_psord &= ~0x00000600; /* Tx/Rx */ - io->iop_pdird &= ~0x00000200; /* Rx */ - io->iop_pdird |= 0x00000400; /* Tx */ - - cpmux->cmx_scr &= 0xffffff00; - cpmux->cmx_scr |= 0x0000001b; - pinfo->brg = 4; - - cpm2_unmap(cpmux); - cpm2_unmap(io); -} -#endif - /* - * Allocate DP-Ram and memory buffers. We need to allocate a transmit and + * Allocate DP-Ram and memory buffers. We need to allocate a transmit and * receive buffer descriptors from dual port ram, and a character * buffer area from host mem. If we are allocating for the console we need * to do it from bootmem @@ -340,111 +171,3 @@ void cpm_uart_freebuf(struct uart_cpm_port *pinfo) cpm_dpfree(pinfo->dp_addr); } - -#ifndef CONFIG_PPC_CPM_NEW_BINDING -/* Setup any dynamic params in the uart desc */ -int cpm_uart_init_portdesc(void) -{ -#if defined(CONFIG_SERIAL_CPM_SMC1) || defined(CONFIG_SERIAL_CPM_SMC2) - u16 *addr; -#endif - pr_debug("CPM uart[-]:init portdesc\n"); - - cpm_uart_nr = 0; -#ifdef CONFIG_SERIAL_CPM_SMC1 - cpm_uart_ports[UART_SMC1].smcp = (smc_t *) cpm2_map(im_smc[0]); - cpm_uart_ports[UART_SMC1].port.mapbase = - (unsigned long)cpm_uart_ports[UART_SMC1].smcp; - - cpm_uart_ports[UART_SMC1].smcup = - (smc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SMC1], PROFF_SMC_SIZE); - addr = (u16 *)cpm2_map_size(im_dprambase[PROFF_SMC1_BASE], 2); - *addr = PROFF_SMC1; - cpm2_unmap(addr); - - cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); - cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); - cpm_uart_ports[UART_SMC1].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; -#endif - -#ifdef CONFIG_SERIAL_CPM_SMC2 - cpm_uart_ports[UART_SMC2].smcp = (smc_t *) cpm2_map(im_smc[1]); - cpm_uart_ports[UART_SMC2].port.mapbase = - (unsigned long)cpm_uart_ports[UART_SMC2].smcp; - - cpm_uart_ports[UART_SMC2].smcup = - (smc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SMC2], PROFF_SMC_SIZE); - addr = (u16 *)cpm2_map_size(im_dprambase[PROFF_SMC2_BASE], 2); - *addr = PROFF_SMC2; - cpm2_unmap(addr); - - cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); - cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); - cpm_uart_ports[UART_SMC2].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC1 - cpm_uart_ports[UART_SCC1].sccp = (scc_t *) cpm2_map(im_scc[0]); - cpm_uart_ports[UART_SCC1].port.mapbase = - (unsigned long)cpm_uart_ports[UART_SCC1].sccp; - cpm_uart_ports[UART_SCC1].sccup = - (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC1], PROFF_SCC_SIZE); - - cpm_uart_ports[UART_SCC1].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC1].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC2 - cpm_uart_ports[UART_SCC2].sccp = (scc_t *) cpm2_map(im_scc[1]); - cpm_uart_ports[UART_SCC2].port.mapbase = - (unsigned long)cpm_uart_ports[UART_SCC2].sccp; - cpm_uart_ports[UART_SCC2].sccup = - (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC2], PROFF_SCC_SIZE); - - cpm_uart_ports[UART_SCC2].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC2].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC3 - cpm_uart_ports[UART_SCC3].sccp = (scc_t *) cpm2_map(im_scc[2]); - cpm_uart_ports[UART_SCC3].port.mapbase = - (unsigned long)cpm_uart_ports[UART_SCC3].sccp; - cpm_uart_ports[UART_SCC3].sccup = - (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC3], PROFF_SCC_SIZE); - - cpm_uart_ports[UART_SCC3].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC3].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; -#endif - -#ifdef CONFIG_SERIAL_CPM_SCC4 - cpm_uart_ports[UART_SCC4].sccp = (scc_t *) cpm2_map(im_scc[3]); - cpm_uart_ports[UART_SCC4].port.mapbase = - (unsigned long)cpm_uart_ports[UART_SCC4].sccp; - cpm_uart_ports[UART_SCC4].sccup = - (scc_uart_t *) cpm2_map_size(im_dprambase[PROFF_SCC4], PROFF_SCC_SIZE); - - cpm_uart_ports[UART_SCC4].sccp->scc_sccm &= - ~(UART_SCCM_TX | UART_SCCM_RX); - cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &= - ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); - cpm_uart_ports[UART_SCC4].port.uartclk = uart_clock(); - cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; -#endif - - return 0; -} -#endif diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/serial/cpm_uart/cpm_uart_cpm2.h index 40006a7dce46..7194c63dcf5f 100644 --- a/drivers/serial/cpm_uart/cpm_uart_cpm2.h +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.h @@ -2,7 +2,7 @@ * linux/drivers/serial/cpm_uart/cpm_uart_cpm2.h * * Driver for CPM (SCC/SMC) serial ports - * + * * definitions for cpm2 * */ @@ -12,16 +12,6 @@ #include <asm/cpm2.h> -/* defines for IRQs */ -#ifndef CONFIG_PPC_CPM_NEW_BINDING -#define SMC1_IRQ SIU_INT_SMC1 -#define SMC2_IRQ SIU_INT_SMC2 -#define SCC1_IRQ SIU_INT_SCC1 -#define SCC2_IRQ SIU_INT_SCC2 -#define SCC3_IRQ SIU_INT_SCC3 -#define SCC4_IRQ SIU_INT_SCC4 -#endif - static inline void cpm_set_brg(int brg, int baud) { cpm_setbrg(brg, baud); diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 25029c7570b6..8fa0ff561e9f 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c @@ -13,8 +13,8 @@ #include <linux/module.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> +#include <linux/of_platform.h> -#include <asm/of_platform.h> #include <asm/prom.h> struct of_serial_info { diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 681d62325d3d..604e5f0a2d95 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -17,7 +17,7 @@ #include <linux/interrupt.h> #if defined(CONFIG_PPC_MERGE) -#include <asm/of_platform.h> +#include <linux/of_platform.h> #else #include <linux/platform_device.h> #endif diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c index cbe71a5338d0..03b3670130a0 100644 --- a/drivers/video/platinumfb.c +++ b/drivers/video/platinumfb.c @@ -31,11 +31,11 @@ #include <linux/fb.h> #include <linux/init.h> #include <linux/nvram.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pgtable.h> -#include <asm/of_device.h> -#include <asm/of_platform.h> #include "macmodes.h" #include "platinumfb.h" diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index 0fd5820d5c61..df52cb355f7d 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c @@ -94,21 +94,31 @@ static const u8 ds2482_chan_rd[8] = #define DS2482_REG_STS_1WB 0x01 -static int ds2482_attach_adapter(struct i2c_adapter *adapter); -static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind); -static int ds2482_detach_client(struct i2c_client *client); +static int ds2482_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int ds2482_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int ds2482_remove(struct i2c_client *client); /** * Driver data (common to all clients) */ +static const struct i2c_device_id ds2482_id[] = { + { "ds2482", 0 }, + { } +}; + static struct i2c_driver ds2482_driver = { .driver = { .owner = THIS_MODULE, .name = "ds2482", }, - .attach_adapter = ds2482_attach_adapter, - .detach_client = ds2482_detach_client, + .probe = ds2482_probe, + .remove = ds2482_remove, + .id_table = ds2482_id, + .detect = ds2482_detect, + .address_data = &addr_data, }; /* @@ -124,7 +134,7 @@ struct ds2482_w1_chan { }; struct ds2482_data { - struct i2c_client client; + struct i2c_client *client; struct mutex access_lock; /* 1-wire interface(s) */ @@ -147,7 +157,7 @@ struct ds2482_data { static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr) { if (pdev->read_prt != read_ptr) { - if (i2c_smbus_write_byte_data(&pdev->client, + if (i2c_smbus_write_byte_data(pdev->client, DS2482_CMD_SET_READ_PTR, read_ptr) < 0) return -1; @@ -167,7 +177,7 @@ static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr) */ static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd) { - if (i2c_smbus_write_byte(&pdev->client, cmd) < 0) + if (i2c_smbus_write_byte(pdev->client, cmd) < 0) return -1; pdev->read_prt = DS2482_PTR_CODE_STATUS; @@ -187,7 +197,7 @@ static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd) static inline int ds2482_send_cmd_data(struct ds2482_data *pdev, u8 cmd, u8 byte) { - if (i2c_smbus_write_byte_data(&pdev->client, cmd, byte) < 0) + if (i2c_smbus_write_byte_data(pdev->client, cmd, byte) < 0) return -1; /* all cmds leave in STATUS, except CONFIG */ @@ -216,7 +226,7 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev) if (!ds2482_select_register(pdev, DS2482_PTR_CODE_STATUS)) { do { - temp = i2c_smbus_read_byte(&pdev->client); + temp = i2c_smbus_read_byte(pdev->client); } while ((temp >= 0) && (temp & DS2482_REG_STS_1WB) && (++retries < DS2482_WAIT_IDLE_TIMEOUT)); } @@ -238,13 +248,13 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev) */ static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel) { - if (i2c_smbus_write_byte_data(&pdev->client, DS2482_CMD_CHANNEL_SELECT, + if (i2c_smbus_write_byte_data(pdev->client, DS2482_CMD_CHANNEL_SELECT, ds2482_chan_wr[channel]) < 0) return -1; pdev->read_prt = DS2482_PTR_CODE_CHANNEL; pdev->channel = -1; - if (i2c_smbus_read_byte(&pdev->client) == ds2482_chan_rd[channel]) { + if (i2c_smbus_read_byte(pdev->client) == ds2482_chan_rd[channel]) { pdev->channel = channel; return 0; } @@ -368,7 +378,7 @@ static u8 ds2482_w1_read_byte(void *data) ds2482_select_register(pdev, DS2482_PTR_CODE_DATA); /* Read the data byte */ - result = i2c_smbus_read_byte(&pdev->client); + result = i2c_smbus_read_byte(pdev->client); mutex_unlock(&pdev->access_lock); @@ -415,47 +425,38 @@ static u8 ds2482_w1_reset_bus(void *data) } -/** - * Called to see if the device exists on an i2c bus. - */ -static int ds2482_attach_adapter(struct i2c_adapter *adapter) +static int ds2482_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { - return i2c_probe(adapter, &addr_data, ds2482_detect); -} + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WRITE_BYTE_DATA | + I2C_FUNC_SMBUS_BYTE)) + return -ENODEV; + strlcpy(info->type, "ds2482", I2C_NAME_SIZE); -/* - * The following function does more than just detection. If detection - * succeeds, it also registers the new chip. - */ -static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind) + return 0; +} + +static int ds2482_probe(struct i2c_client *client, + const struct i2c_device_id *id) { struct ds2482_data *data; - struct i2c_client *new_client; - int err = 0; + int err = -ENODEV; int temp1; int idx; - if (!i2c_check_functionality(adapter, - I2C_FUNC_SMBUS_WRITE_BYTE_DATA | - I2C_FUNC_SMBUS_BYTE)) - goto exit; - if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { err = -ENOMEM; goto exit; } - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->driver = &ds2482_driver; - new_client->adapter = adapter; + data->client = client; + i2c_set_clientdata(client, data); /* Reset the device (sets the read_ptr to status) */ if (ds2482_send_cmd(data, DS2482_CMD_RESET) < 0) { - dev_dbg(&adapter->dev, "DS2482 reset failed at 0x%02x.\n", - address); + dev_warn(&client->dev, "DS2482 reset failed.\n"); goto exit_free; } @@ -463,10 +464,10 @@ static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind) ndelay(525); /* Read the status byte - only reset bit and line should be set */ - temp1 = i2c_smbus_read_byte(new_client); + temp1 = i2c_smbus_read_byte(client); if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) { - dev_dbg(&adapter->dev, "DS2482 (0x%02x) reset status " - "0x%02X - not a DS2482\n", address, temp1); + dev_warn(&client->dev, "DS2482 reset status " + "0x%02X - not a DS2482\n", temp1); goto exit_free; } @@ -478,16 +479,8 @@ static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind) /* Set all config items to 0 (off) */ ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0); - /* We can fill in the remaining client fields */ - snprintf(new_client->name, sizeof(new_client->name), "ds2482-%d00", - data->w1_count); - mutex_init(&data->access_lock); - /* Tell the I2C layer a new client has arrived */ - if ((err = i2c_attach_client(new_client))) - goto exit_free; - /* Register 1-wire interface(s) */ for (idx = 0; idx < data->w1_count; idx++) { data->w1_ch[idx].pdev = data; @@ -511,8 +504,6 @@ static int ds2482_detect(struct i2c_adapter *adapter, int address, int kind) return 0; exit_w1_remove: - i2c_detach_client(new_client); - for (idx = 0; idx < data->w1_count; idx++) { if (data->w1_ch[idx].pdev != NULL) w1_remove_master_device(&data->w1_ch[idx].w1_bm); @@ -523,10 +514,10 @@ exit: return err; } -static int ds2482_detach_client(struct i2c_client *client) +static int ds2482_remove(struct i2c_client *client) { struct ds2482_data *data = i2c_get_clientdata(client); - int err, idx; + int idx; /* Unregister the 1-wire bridge(s) */ for (idx = 0; idx < data->w1_count; idx++) { @@ -534,13 +525,6 @@ static int ds2482_detach_client(struct i2c_client *client) w1_remove_master_device(&data->w1_ch[idx].w1_bm); } - /* Detach the i2c device */ - if ((err = i2c_detach_client(client))) { - dev_err(&client->dev, - "Deregistration failed, client not detached.\n"); - return err; - } - /* Free the memory */ kfree(data); return 0; diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c index 80a91d4cea11..77c1c2ae2cc2 100644 --- a/drivers/watchdog/mpc5200_wdt.c +++ b/drivers/watchdog/mpc5200_wdt.c @@ -4,7 +4,7 @@ #include <linux/watchdog.h> #include <linux/io.h> #include <linux/spinlock.h> -#include <asm/of_platform.h> +#include <linux/of_platform.h> #include <asm/uaccess.h> #include <asm/mpc52xx.h> |