diff options
Diffstat (limited to 'drivers')
884 files changed, 36124 insertions, 10032 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 002838d23b86..cc57bab146b5 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS config ACPI_PROCESSOR_CSTATE def_bool y + depends on ACPI_PROCESSOR depends on IA64 || X86 config ACPI_PROCESSOR_IDLE diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index 433376e819bb..953437a216f6 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -104,7 +104,7 @@ static void lpit_update_residency(struct lpit_residency_info *info, info->gaddr = lpit_native->residency_counter; if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - info->iomem_addr = ioremap_nocache(info->gaddr.address, + info->iomem_addr = ioremap(info->gaddr.address, info->gaddr.bit_width / 8); if (!info->iomem_addr) return; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 2c4dda0787e8..5379bc3f275d 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -705,3 +705,185 @@ void __init acpi_processor_init(void) acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); acpi_scan_add_handler(&processor_container_handler); } + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +/** + * acpi_processor_claim_cst_control - Request _CST control from the platform. + */ +bool acpi_processor_claim_cst_control(void) +{ + static bool cst_control_claimed; + acpi_status status; + + if (!acpi_gbl_FADT.cst_control || cst_control_claimed) + return true; + + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, + acpi_gbl_FADT.cst_control, 8); + if (ACPI_FAILURE(status)) { + pr_warn("ACPI: Failed to claim processor _CST control\n"); + return false; + } + + cst_control_claimed = true; + return true; +} +EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); + +/** + * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. + * @handle: ACPI handle of the processor object containing the _CST. + * @cpu: The numeric ID of the target CPU. + * @info: Object write the C-states information into. + * + * Extract the C-state information for the given CPU from the output of the _CST + * control method under the corresponding ACPI processor object (or processor + * device object) and populate @info with it. + * + * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke + * acpi_processor_ffh_cstate_probe() to verify them and update the + * cpu_cstate_entry data for @cpu. + */ +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *cst; + acpi_status status; + u64 count; + int last_index = 0; + int i, ret = 0; + + status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "No _CST\n"); + return -ENODEV; + } + + cst = buffer.pointer; + + /* There must be at least 2 elements. */ + if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { + acpi_handle_warn(handle, "Invalid _CST output\n"); + ret = -EFAULT; + goto end; + } + + count = cst->package.elements[0].integer.value; + + /* Validate the number of C-states. */ + if (count < 1 || count != cst->package.count - 1) { + acpi_handle_warn(handle, "Inconsistent _CST data\n"); + ret = -EFAULT; + goto end; + } + + for (i = 1; i <= count; i++) { + union acpi_object *element; + union acpi_object *obj; + struct acpi_power_register *reg; + struct acpi_processor_cx cx; + + /* + * If there is not enough space for all C-states, skip the + * excess ones and log a warning. + */ + if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { + acpi_handle_warn(handle, + "No room for more idle states (limit: %d)\n", + ACPI_PROCESSOR_MAX_POWER - 1); + break; + } + + memset(&cx, 0, sizeof(cx)); + + element = &cst->package.elements[i]; + if (element->type != ACPI_TYPE_PACKAGE) + continue; + + if (element->package.count != 4) + continue; + + obj = &element->package.elements[0]; + + if (obj->type != ACPI_TYPE_BUFFER) + continue; + + reg = (struct acpi_power_register *)obj->buffer.pointer; + + obj = &element->package.elements[1]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.type = obj->integer.value; + /* + * There are known cases in which the _CST output does not + * contain C1, so if the type of the first state found is not + * C1, leave an empty slot for C1 to be filled in later. + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + last_index = 1; + + cx.address = reg->address; + cx.index = last_index + 1; + + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { + /* + * In the majority of cases _CST describes C1 as + * a FIXED_HARDWARE C-state, but if the command + * line forbids using MWAIT, use CSTATE_HALT for + * C1 regardless. + */ + if (cx.type == ACPI_STATE_C1 && + boot_option_idle_override == IDLE_NOMWAIT) { + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + cx.entry_method = ACPI_CSTATE_FFH; + } + } else if (cx.type == ACPI_STATE_C1) { + /* + * In the special case of C1, FIXED_HARDWARE can + * be handled by executing the HLT instruction. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + continue; + } + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } else { + continue; + } + + if (cx.type == ACPI_STATE_C1) + cx.valid = 1; + + obj = &element->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.latency = obj->integer.value; + + obj = &element->package.elements[3]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + memcpy(&info->states[++last_index], &cx, sizeof(cx)); + } + + acpi_handle_info(handle, "Found %d idle states\n", last_index); + + info->count = last_index; + + end: + kfree(buffer.pointer); + + return ret; +} +EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); +#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 2f380e7381d6..15c5b272e698 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -2187,7 +2187,7 @@ int acpi_video_register(void) if (register_count) { /* * if the function of acpi_video_register is already called, - * don't register the acpi_vide_bus again and return no error. + * don't register the acpi_video_bus again and return no error. */ goto leave; } diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index 863ade9add6d..173447d50acf 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -17,7 +17,7 @@ /* Common info for tool signons */ #define ACPICA_NAME "Intel ACPI Component Architecture" -#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation" +#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation" #if ACPI_MACHINE_WIDTH == 64 #define ACPI_WIDTH " (64-bit version)" diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index 54f81eac7ec9..89101e53324b 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h @@ -3,7 +3,7 @@ * * Name: accommon.h - Common include files for generation of ACPICA source * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h index d5478cd4a857..ede4b9cc9e85 100644 --- a/drivers/acpi/acpica/acconvert.h +++ b/drivers/acpi/acpica/acconvert.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 694cf206fa9a..a676daaa2da5 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -3,7 +3,7 @@ * * Name: acdebug.h - ACPI/AML debugger * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 82f81501566b..7ba6e308f146 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -3,7 +3,7 @@ * * Name: acdispat.h - dispatcher (parser to interpreter interface) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index c8652f91054e..79f292687bd6 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -3,7 +3,7 @@ * * Name: acevents.h - Event subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index fd3beea93421..38ffa2c0a496 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -3,7 +3,7 @@ * * Name: acglobal.h - Declarations for global variables * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index bcf8f7501db7..67f282e9e0af 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -3,7 +3,7 @@ * * Name: achware.h -- hardware specific interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 20706adbc148..a6d896cda2a5 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -3,7 +3,7 @@ * * Name: acinterp.h - Interpreter subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 1ea52576f0a2..af58cd2dc9d3 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -3,7 +3,7 @@ * * Name: aclocal.h - Internal data types used across the ACPI subsystem * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 283614e82a20..2269e10bc21b 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -3,7 +3,7 @@ * * Name: acmacros.h - C macros for the entire subsystem. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 7da1864798a0..e618ddfab2fd 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -3,7 +3,7 @@ * * Name: acnamesp.h - Namespace subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 8def0e3d690f..9f0219a8cb98 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -3,7 +3,7 @@ * * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -260,7 +260,8 @@ struct acpi_object_index_field { /* The buffer_field is different in that it is part of a Buffer, not an op_region */ struct acpi_object_buffer_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */ + ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */ + union acpi_operand_object *buffer_obj; /* Containing Buffer object */ }; /****************************************************************************** diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 9d78134428e3..8825394be9ab 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -3,7 +3,7 @@ * * Name: acopcode.h - AML opcode information for the AML parser and interpreter * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 6e32c97cba6c..bc00b85c0a8f 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -3,7 +3,7 @@ * * Module Name: acparser.h - AML Parser subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 387163b962a7..cd0f5df0ea23 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -3,7 +3,7 @@ * * Name: acpredef - Information table for ACPI predefined methods and objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index 422cd8f2b92e..6de8a1650d3d 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -3,7 +3,7 @@ * * Name: acresrc.h - Resource Manager function prototypes * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 2043dff370b1..4c900c108f3f 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -3,7 +3,7 @@ * * Name: acstruct.h - Internal structs * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index dfbf1dbd4033..734624facda3 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -3,7 +3,7 @@ * * Name: actables.h - ACPI table management * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 5fb50634e08e..7c89b470ec81 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -3,7 +3,7 @@ * * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 49e412edd7c6..1d541bbac4a3 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -5,7 +5,7 @@ * Declarations and definitions contained herein are derived * directly from the ACPI specification. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 7c3bd4ab60fc..e5234e001acf 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -3,7 +3,7 @@ * * Module Name: amlresrc.h - AML resource descriptors * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c index 47d2e5059849..bb9600b867ee 100644 --- a/drivers/acpi/acpica/dbhistry.c +++ b/drivers/acpi/acpica/dbhistry.c @@ -3,7 +3,7 @@ * * Module Name: dbhistry - debugger HISTORY command * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index e1632b340182..aa71f65395d2 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -816,7 +816,7 @@ acpi_db_command_dispatch(char *input_buffer, if (ACPI_FAILURE(status) || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) { acpi_os_printf - ("Invalid adress space ID: must be between 0 and %u inclusive\n", + ("Invalid address space ID: must be between 0 and %u inclusive\n", ACPI_NUM_PREDEFINED_REGIONS - 1); return (AE_OK); } diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 85b34d02233e..ad17f62e51d9 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -4,7 +4,7 @@ * Module Name: dsargs - Support for execution of dynamic arguments for static * objects (regions, fields, buffer fields, etc.) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 5034fab9cf69..4b5b6e859f62 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -4,7 +4,7 @@ * Module Name: dscontrol - Support for execution control opcodes - * if/else/while/return * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 0d3e1ced1f57..63bc5f19fb82 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -3,7 +3,7 @@ * * Module Name: dsdebug - Parser/Interpreter interface - debugging * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index faa38a22263a..c901f5aec739 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -3,7 +3,7 @@ * * Module Name: dsfield - Dispatcher field routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -243,7 +243,7 @@ cleanup: * FUNCTION: acpi_ds_get_field_names * * PARAMETERS: info - create_field info structure - * ` walk_state - Current method state + * walk_state - Current method state * arg - First parser arg for the field name list * * RETURN: Status diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index a1ffed29903b..9be2a309424c 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -3,7 +3,7 @@ * * Module Name: dsinit - Object initialization namespace walk * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index f59b4d944f7f..cf67caff878a 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -3,7 +3,7 @@ * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 179129a2deb1..c0a14a6a2c20 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -3,7 +3,7 @@ * * Module Name: dsobject - Dispatcher object management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 10f32b62608e..d9c26e720cb7 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -3,7 +3,7 @@ * * Module Name: dsopcode - Dispatcher support for regions and fields * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -217,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, } obj_desc->buffer_field.buffer_obj = buffer_desc; + obj_desc->buffer_field.is_create_field = + aml_opcode == AML_CREATE_FIELD_OP; /* Reference count for buffer_desc inherits obj_desc count */ diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c index 997faa10f615..d869568d55c2 100644 --- a/drivers/acpi/acpica/dspkginit.c +++ b/drivers/acpi/acpica/dspkginit.c @@ -3,7 +3,7 @@ * * Module Name: dspkginit - Completion of deferred package initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index d75aae304595..5e81a1ae44cf 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -4,7 +4,7 @@ * Module Name: dswexec - Dispatcher method execution callbacks; * dispatch to interpreter. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index c88fd31208a5..697974e37edf 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -3,7 +3,7 @@ * * Module Name: dswload - Dispatcher first pass namespace load callbacks * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); + /* + * Disassembler: handle create field operators here. + * + * create_buffer_field is a deferred op that is typically processed in load + * pass 2. However, disassembly of control method contents walk the parse + * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed + * in a later walk. This is a problem when there is a control method that + * has the same name as the AML_CREATE object. In this case, any use of the + * name segment will be detected as a method call rather than a reference + * to a buffer field. + * + * This earlier creation during disassembly solves this issue by inserting + * the named object in the ACPI namespace so that references to this name + * would be a name string rather than a method call. + */ + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) && + (walk_state->op_info->flags & AML_CREATE)) { + status = acpi_ds_create_buffer_field(op, walk_state); + return_ACPI_STATUS(status); + } + /* We are only interested in opcodes that have an associated name */ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 935a8e2623e4..b31457ca926c 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -3,7 +3,7 @@ * * Module Name: dswload2 - Dispatcher second pass namespace load callbacks * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 39acf7b286da..9c397642fed7 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -3,7 +3,7 @@ * * Module Name: dswscope - Scope stack manipulation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index de79f835a373..809a0c0536b5 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -3,7 +3,7 @@ * * Module Name: dswstate - Dispatcher parse tree walk management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index 9e2f5a05c066..8c83d8c620dc 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -3,7 +3,7 @@ * * Module Name: evevent - Fixed Event handling and dispatch * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 5c77bee5d31f..0ced84ae13e4 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -3,7 +3,7 @@ * * Module Name: evglock - Global Lock support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 344feba29063..3e39907fedd9 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -3,7 +3,7 @@ * * Module Name: evgpe - General Purpose Event handling and dispatch * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 9c7adaa7b582..132adff1e131 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -3,7 +3,7 @@ * * Module Name: evgpeblk - GPE block creation and initialization. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 70d21d5ec5f3..6effd8076dcc 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -3,7 +3,7 @@ * * Module Name: evgpeinit - System GPE initialization and update * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 917892227e09..738873e876ca 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -3,7 +3,7 @@ * * Module Name: evgpeutil - GPE utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 3ef4e27995f0..5884eba047f7 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -3,7 +3,7 @@ * * Module Name: evhandler - Support for Address Space handlers * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index aa98fe07cd1b..ce1eda6beb84 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -3,7 +3,7 @@ * * Module Name: evmisc - Miscellaneous event manager support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 1ff126460007..738d4b231f34 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -3,7 +3,7 @@ * * Module Name: evregion - Operation Region support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index aee09640d710..aefc0145e583 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -3,7 +3,7 @@ * * Module Name: evrgnini- ACPI address_space (op_region) init * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 279ef0557aa3..e4e012297eee 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -3,7 +3,7 @@ * * Module Name: evxface - External interfaces for ACPI events * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index e528fe56b755..1a15b0087379 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -3,7 +3,7 @@ * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 04a40d563dd6..2c39ff2a7406 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -3,7 +3,7 @@ * * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 47265b073e6f..da97fd0c6b51 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -4,7 +4,7 @@ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index c7af07566b7b..43711412722f 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -3,7 +3,7 @@ * * Module Name: exconcat - Concatenate-type AML operators * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 46a8baf28bd0..68efd704e2dc 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -3,7 +3,7 @@ * * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index ca2966bacb50..50c7aad2e86d 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -3,7 +3,7 @@ * * Module Name: exconvrt - Object conversion routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index f376fc00064e..a17482428b46 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -3,7 +3,7 @@ * * Module Name: excreate - Named object creation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index b1aeec8cac55..a5223dcaee70 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -3,7 +3,7 @@ * * Module Name: exdebug - Support for stores to the AML Debug Object * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index a9bc938a3b55..47a4d9a40d6b 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -3,7 +3,7 @@ * * Module Name: exdump - Interpreter debug output routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index d3d2dbfba680..e85eb31e5075 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -3,7 +3,7 @@ * * Module Name: exfield - AML execution - field_unit read/write * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -96,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length) * RETURN: Status * * DESCRIPTION: Read from a named field. Returns either an Integer or a - * Buffer, depending on the size of the field. + * Buffer, depending on the size of the field and whether if a + * field is created by the create_field() operator. * ******************************************************************************/ @@ -154,12 +155,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, * the use of arithmetic operators on the returned value if the * field size is equal or smaller than an Integer. * + * However, all buffer fields created by create_field operator needs to + * remain as a buffer to match other AML interpreter implementations. + * * Note: Field.length is in bits. */ buffer_length = (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length); - if (buffer_length > acpi_gbl_integer_byte_width) { + if (buffer_length > acpi_gbl_integer_byte_width || + (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD && + obj_desc->buffer_field.is_create_field)) { /* Field is too large for an Integer, create a Buffer instead */ diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 95a0dcb4f7b9..ade35ff1c7ba 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -3,7 +3,7 @@ * * Module Name: exfldio - Aml Field I/O * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 60e854965af9..717e3998fd77 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -3,7 +3,7 @@ * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index 775cd62af5b3..9ff247cba571 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -3,7 +3,7 @@ * * Module Name: exmutex - ASL Mutex Acquire/Release functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index 6b76be5212a4..74f8b0d0452b 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -3,7 +3,7 @@ * * Module Name: exnames - interpreter/scanner name load/execute * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 06e35ea09823..a46d685a3ffc 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -3,7 +3,7 @@ * * Module Name: exoparg1 - AML execution - opcodes with 1 argument * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 5e4a31a11df4..03241d18ac1d 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -3,7 +3,7 @@ * * Module Name: exoparg2 - AML execution - opcodes with 2 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index a4ebce417930..c8d0d75fc450 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -3,7 +3,7 @@ * * Module Name: exoparg3 - AML execution - opcodes with 3 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 31385a0b2dab..55d0fa056fe7 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -3,7 +3,7 @@ * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 728d752f7adc..a4e306690a21 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -3,7 +3,7 @@ * * Module Name: exprep - ACPI AML field prep utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index c08521194b29..d15a66de26c0 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -3,7 +3,7 @@ * * Module Name: exregion - ACPI default op_region (address space) handlers * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index b223d01e6bf8..3e4018678c09 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -3,7 +3,7 @@ * * Module Name: exresnte - AML Interpreter object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 36da5c0ef69c..912a078c60a4 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -3,7 +3,7 @@ * * Module Name: exresolv - AML Interpreter object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index bdfe4d33b483..4d1b22971d58 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -3,7 +3,7 @@ * * Module Name: exresop - AML Interpreter operand/object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c index c5aa4b0deb70..760bc7cef55a 100644 --- a/drivers/acpi/acpica/exserial.c +++ b/drivers/acpi/acpica/exserial.c @@ -3,7 +3,7 @@ * * Module Name: exserial - field_unit support for serial address spaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 7f3c3571c292..3adc0a29d890 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -3,7 +3,7 @@ * * Module Name: exstore - AML Interpreter object store support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 4e43c8277f07..8c34f4e2ab8f 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -4,7 +4,7 @@ * Module Name: exstoren - AML Interpreter object store support, * Store to Node (namespace object) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index dc9e2b1c1ad9..dc66696080a5 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -3,7 +3,7 @@ * * Module Name: exstorob - AML object store support, store to object * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index a538f7799b78..f329b01672bb 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -3,7 +3,7 @@ * * Module Name: exsystem - Interface to OS services * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index db7f93ca539f..832a47885b99 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -3,7 +3,7 @@ * * Module Name: extrace - Support for interpreter execution tracing * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 75380be1c2ef..8fefa6feac2f 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -3,7 +3,7 @@ * * Module Name: exutils - interpreter/scanner utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 926f7e080f22..9b9aac27ff7e 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -3,7 +3,7 @@ * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index dee3affaca49..d9be5d0545d4 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -4,7 +4,7 @@ * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the * extended FADT-V5 sleep registers. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 565bd3f29f31..1b4252bdcd0b 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -3,7 +3,7 @@ * * Module Name: hwgpe - Low level GPE enable/disable/clear functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index b62db8ec446f..243a25add28f 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -4,7 +4,7 @@ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the * original/legacy sleep/PM registers. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 2fb9f75d71c5..07473ddfa9a9 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -3,7 +3,7 @@ * * Name: hwtimer.c - ACPI Power Management Timer Interface * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index cd576153257c..4d94861e6093 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -3,7 +3,7 @@ * * Module Name: hwvalid - I/O request validation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index c4fd97104024..134dbfadcd15 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -3,7 +3,7 @@ * * Module Name: hwxface - Public ACPICA hardware interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 2919746c9041..a4b66f4b2714 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -3,7 +3,7 @@ * * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index 0e97ed38973f..d5e8405e9d8f 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c @@ -3,7 +3,7 @@ * * Module Name: nsarguments - Validation of args for ACPI predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index c86d0770ed6e..c86c82939ebb 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -4,7 +4,7 @@ * Module Name: nsconvert - Object conversions for objects returned by * predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 9ad340f644a1..994f0b556c60 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 73e5c83c8c9f..b691fe20e384 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 61e9dfc9fe8c..e16f6a0c2c3f 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -3,7 +3,7 @@ * * Module Name: nsinit - namespace initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index d7c4d6e8e21e..9ba17891edb6 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -3,7 +3,7 @@ * * Module Name: nsload - namespace loading/expanding/contracting procedures * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index f16cf5e4742c..7e74a765e785 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -3,7 +3,7 @@ * * Module Name: nsparse - namespace interface to AML parser * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 2f9d93122d0c..0cea9c363ace 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -3,7 +3,7 @@ * * Module Name: nspredef - Validation of ACPI predefined methods and objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 9a80e3b23496..237b3ddeb075 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -3,7 +3,7 @@ * * Module Name: nsprepkg - Validation of package objects for predefined names * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index be86fea8e4d4..90db2d85e7f5 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -3,7 +3,7 @@ * * Module Name: nsrepair - Repair for objects returned by predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 663d85e0adba..125143c41bb8 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -4,7 +4,7 @@ * Module Name: nsrepair2 - Repair for objects returned by specific * predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index b8d007c84d32..e66abdab8f31 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -4,7 +4,7 @@ * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index ceea6af79d12..b7f3e8603ad8 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -3,7 +3,7 @@ * * Module Name: nswalk - Functions for walking the ACPI namespace * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 161e60ddfb69..984129dcaa0c 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -4,7 +4,7 @@ * Module Name: nsxfname - Public interfaces to the ACPI subsystem * ACPI Namespace oriented interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index e62c7897fdf1..3b40db4ad9f3 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -3,7 +3,7 @@ * * Module Name: psargs - Parse AML opcode arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 207805047bc4..3cf0687b9915 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -3,7 +3,7 @@ * * Module Name: psloop - Main AML parse loop * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index ded2779fc8ea..2480c26c5171 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -3,7 +3,7 @@ * * Module Name: psobject - Support for parse objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 43775c5ce17c..28af49263ebf 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -3,7 +3,7 @@ * * Module Name: psopcode - Parser/Interpreter opcode information table * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index 15e7563829f1..ab9327f6a63c 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -3,7 +3,7 @@ * * Module Name: psopinfo - AML opcode information functions and dispatch tables * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 9b386530ffbe..c780046bf294 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -3,7 +3,7 @@ * * Module Name: psparse - Parser top level AML parse routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index f153ca804740..fceb311995e9 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c @@ -3,7 +3,7 @@ * * Module Name: psscope - Parser scope stack management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index 22d8a2becdd0..c8aef0694864 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -3,7 +3,7 @@ * * Module Name: pstree - Parser op tree manipulation/traversal/search * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 2512f584fa3c..00efae2f95ba 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -3,7 +3,7 @@ * * Module Name: psutils - Parser miscellaneous utilities (Parser only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index cf91841297c2..0fe3adf6b0e5 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -3,7 +3,7 @@ * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index ee2ee2c858f2..1bbfc8def388 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -3,7 +3,7 @@ * * Module Name: psxface - Parser external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 2cf36451e46f..523b1e9b98d4 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -3,7 +3,7 @@ * * Module Name: tbdata - Table manager data structure functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 0041bfba9abc..907edc5edba7 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -3,7 +3,7 @@ * * Module Name: tbfadt - FADT table utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index b2abb40023a6..56d81e490a5c 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -3,7 +3,7 @@ * * Module Name: tbfind - find table * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index ef1ffd36ab3f..0bb15add2245 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -3,7 +3,7 @@ * * Module Name: tbinstal - ACPI table installation and removal * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index 4764f849cb78..0b3494ad9a70 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -3,7 +3,7 @@ * * Module Name: tbprint - Table output utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index c5f0b8ec70cc..dfe1ac3ae34a 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -3,7 +3,7 @@ * * Module Name: tbutils - ACPI Table utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 1640685bf4ae..f8403d480318 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -3,7 +3,7 @@ * * Module Name: tbxface - ACPI table-oriented external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 0782acf85722..bcba993d4dac 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -3,7 +3,7 @@ * * Module Name: tbxfload - Table load/unload external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index e2859d09ca2e..0edc6ef5d46d 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -3,7 +3,7 @@ * * Module Name: tbxfroot - Find the root ACPI table (RSDT) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index bb260376bd59..99fa48722cf6 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -3,7 +3,7 @@ * * Module Name: utaddress - op_region address range check * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index d64da4d9e8d0..303ab51b4fcf 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -3,7 +3,7 @@ * * Module Name: utalloc - local memory allocation routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c index f6cd7d4f698b..d78656d960e8 100644 --- a/drivers/acpi/acpica/utascii.c +++ b/drivers/acpi/acpica/utascii.c @@ -3,7 +3,7 @@ * * Module Name: utascii - Utility ascii functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index db897af1de05..f2ec427f4e29 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c @@ -3,7 +3,7 @@ * * Module Name: utbuffer - Buffer dump routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 8533fce7fa93..1b03a2747401 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c @@ -3,7 +3,7 @@ * * Module Name: utcache - local cache allocation routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index 1fb8327f3c3b..41bdd0278dd8 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -3,7 +3,7 @@ * * Module Name: utcopy - Internal to external object translation utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index 5b169b5f0f1a..0c8cb0612414 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -3,7 +3,7 @@ * * Module Name: utdebug - Debug print/trace routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 65beaa237669..befdd13b403b 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -3,7 +3,7 @@ * * Module Name: utdecode - Utility decoding routines (value-to-string) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 558a9f3b0678..8180d1a458f5 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -3,7 +3,7 @@ * * Module Name: uteval - Object evaluation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index b0622ec4bb85..e6dcbdc3fc6e 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -3,7 +3,7 @@ * * Module Name: utglobal - Global variables for the ACPI subsystem * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index b6da135d5f41..0e02f12513dc 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -3,7 +3,7 @@ * * Module Name: uthex -- Hex/ASCII support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 30198c828ab6..3bb06935a2ad 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -3,7 +3,7 @@ * * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 6f33e7c72327..fdbc397c038d 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -3,7 +3,7 @@ * * Module Name: utinit - Common ACPI subsystem initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index 8b4ff11d617a..46be549539e7 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c @@ -3,7 +3,7 @@ * * Module Name: utlock - Reader/Writer lock interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index eee97a902696..3e60bdac2200 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -3,7 +3,7 @@ * * Module Name: utobject - ACPI object create/delete/size/cache routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index ad2b218039d0..0a01c08dad8a 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -3,7 +3,7 @@ * * Module Name: utosi - Support for the _OSI predefined control method * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 1b0f68f5ed8c..05fe3470fb93 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -3,7 +3,7 @@ * * Module Name: utpredef - support functions for predefined names * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index 5839f2fa7400..a874dac7db5c 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -3,7 +3,7 @@ * * Module Name: utprint - Formatted printing routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 14de4d15e618..d366be431a84 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -3,7 +3,7 @@ * * Module Name: uttrack - Memory allocation tracking routines (debug only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c index 0a7cf8007643..b8039954b0d1 100644 --- a/drivers/acpi/acpica/utuuid.c +++ b/drivers/acpi/acpica/utuuid.c @@ -3,7 +3,7 @@ * * Module Name: utuuid -- UUID support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index f497c4b30e65..ca7c9f0144ef 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -3,7 +3,7 @@ * * Module Name: utxface - External interfaces, miscellaneous utility functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index cf769e94fe0f..653e3bb20036 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -3,7 +3,7 @@ * * Module Name: utxfinit - External interfaces for ACPICA initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 8906c80175e6..103acbbfcf9a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1180,7 +1180,7 @@ static int ghes_probe(struct platform_device *ghes_dev) switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: - timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); + timer_setup(&ghes->timer, ghes_poll_func, 0); ghes_add_timer(ghes); break; case ACPI_HEST_NOTIFY_EXTERNAL: diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 33f71983e001..6078064684c6 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -298,6 +298,59 @@ out: return status; } +struct iort_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool apply_id_count_workaround; + +static struct iort_workaround_oem_info wa_info[] __initdata = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0, + } +}; + +static void __init +iort_check_id_count_workaround(struct acpi_table_header *tbl) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + wa_info[i].oem_revision == tbl->oem_revision) { + apply_id_count_workaround = true; + pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n"); + break; + } + } +} + +static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map) +{ + u32 map_max = map->input_base + map->id_count; + + /* + * The IORT specification revision D (Section 3, table 4, page 9) says + * Number of IDs = The number of IDs in the range minus one, but the + * IORT code ignored the "minus one", and some firmware did that too, + * so apply a workaround here to keep compatible with both the spec + * compliant and non-spec compliant firmwares. + */ + if (apply_id_count_workaround) + map_max--; + + return map_max; +} + static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) { @@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, return -ENXIO; } - if (rid_in < map->input_base || - (rid_in >= map->input_base + map->id_count)) + if (rid_in < map->input_base || rid_in > iort_get_map_max(map)) return -ENXIO; *rid_out = map->output_base + (rid_in - map->input_base); @@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void) return; } + iort_check_id_count_workaround(iort_table); iort_init_platform_devices(); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 8f0e0c8d8c3d..15cc7d5a6185 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -38,6 +38,8 @@ #define PREFIX "ACPI: " #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF +#define ACPI_BATTERY_CAPACITY_VALID(capacity) \ + ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN) #define ACPI_BATTERY_DEVICE_NAME "Battery" @@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) static bool acpi_battery_is_degraded(struct acpi_battery *battery) { - return battery->full_charge_capacity && battery->design_capacity && + return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) && battery->full_charge_capacity < battery->design_capacity; } @@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { - int ret = 0; + int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0; struct acpi_battery *battery = to_acpi_battery(psy); if (acpi_battery_present(battery)) { @@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: - if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) ret = -ENODEV; else val->intval = battery->design_capacity * 1000; break; case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL: - if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) ret = -ENODEV; else val->intval = battery->full_charge_capacity * 1000; @@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = battery->capacity_now * 1000; break; case POWER_SUPPLY_PROP_CAPACITY: - if (battery->capacity_now && battery->full_charge_capacity) - val->intval = battery->capacity_now * 100/ - battery->full_charge_capacity; + if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) + full_capacity = battery->full_charge_capacity; + else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_capacity = battery->design_capacity; + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN || + full_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + ret = -ENODEV; else - val->intval = 0; + val->intval = battery->capacity_now * 100/ + full_capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: if (battery->state & ACPI_BATTERY_STATE_CRITICAL) @@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; +static enum power_supply_property charge_battery_full_cap_broken_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void) static int sysfs_add_battery(struct acpi_battery *battery) { struct power_supply_config psy_cfg = { .drv_data = battery, }; + bool full_cap_broken = false; + + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_cap_broken = true; if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) { - battery->bat_desc.properties = charge_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(charge_battery_props); - } else if (battery->full_charge_capacity == 0) { - battery->bat_desc.properties = - energy_battery_full_cap_broken_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_full_cap_broken_props); + if (full_cap_broken) { + battery->bat_desc.properties = + charge_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = charge_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_props); + } } else { - battery->bat_desc.properties = energy_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_props); + if (full_cap_broken) { + battery->bat_desc.properties = + energy_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = energy_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_props); + } } battery->bat_desc.name = acpi_device_bid(battery->device); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index b758b45737f5..f6925f16c4a2 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = { }, .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, }, + { + /* + * Razer Blade Stealth 13 late 2019, notification of the LID device + * only happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, {} }; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 5e4a8860a9c0..b64c62bfcea5 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1321,6 +1321,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) */ static const struct acpi_device_id special_pm_ids[] = { {"PNP0C0B", }, /* Generic ACPI fan */ + {"INT1044", }, /* Fan for Tiger Lake generation */ {"INT3404", }, /* Fan */ {} }; diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index eb58fc475a03..387f27ef3368 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -97,6 +97,7 @@ static int dptf_power_remove(struct platform_device *pdev) } static const struct acpi_device_id int3407_device_ids[] = { + {"INT1047", 0}, {"INT3407", 0}, {"", 0}, }; diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 5c7a90186e3c..1ec7b6900662 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -13,6 +13,10 @@ #define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { + {"INT1040"}, + {"INT1043"}, + {"INT1044"}, + {"INT1047"}, {"INT3400"}, {"INT3401", INT3401_DEVICE}, {"INT3402"}, diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d05be13c1022..08bc9751fe66 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1053,28 +1053,20 @@ void acpi_ec_unblock_transactions(void) Event Management -------------------------------------------------------------------------- */ static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - -static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 816b0803f7fb..aaf4e8f348cf 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -25,6 +25,7 @@ static int acpi_fan_remove(struct platform_device *pdev); static const struct acpi_device_id fan_device_ids[] = { {"PNP0C0B", 0}, + {"INT1044", 0}, {"INT3404", 0}, {"", 0}, }; @@ -44,12 +45,16 @@ static const struct dev_pm_ops acpi_fan_pm = { #define FAN_PM_OPS_PTR NULL #endif +#define ACPI_FPS_NAME_LEN 20 + struct acpi_fan_fps { u64 control; u64 trip_point; u64 speed; u64 noise_level; u64 power; + char name[ACPI_FPS_NAME_LEN]; + struct device_attribute dev_attr; }; struct acpi_fan_fif { @@ -265,6 +270,39 @@ static int acpi_fan_speed_cmp(const void *a, const void *b) return fps1->speed - fps2->speed; } +static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr); + int count; + + if (fps->control == 0xFFFFFFFF || fps->control > 100) + count = snprintf(buf, PAGE_SIZE, "not-defined:"); + else + count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control); + + if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point); + + if (fps->speed == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed); + + if (fps->noise_level == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100); + + if (fps->power == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power); + + return count; +} + static int acpi_fan_get_fps(struct acpi_device *device) { struct acpi_fan *fan = acpi_driver_data(device); @@ -295,12 +333,13 @@ static int acpi_fan_get_fps(struct acpi_device *device) } for (i = 0; i < fan->fps_count; i++) { struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; - struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] }; + struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name), + &fan->fps[i] }; status = acpi_extract_package(&obj->package.elements[i + 1], &format, &fps); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Invalid _FPS element\n"); - break; + goto err; } } @@ -308,6 +347,24 @@ static int acpi_fan_get_fps(struct acpi_device *device) sort(fan->fps, fan->fps_count, sizeof(*fan->fps), acpi_fan_speed_cmp, NULL); + for (i = 0; i < fan->fps_count; ++i) { + struct acpi_fan_fps *fps = &fan->fps[i]; + + snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i); + fps->dev_attr.show = show_state; + fps->dev_attr.store = NULL; + fps->dev_attr.attr.name = fps->name; + fps->dev_attr.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr); + if (status) { + int j; + + for (j = 0; j < i; ++j) + sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr); + break; + } + } + err: kfree(obj); return status; @@ -330,14 +387,20 @@ static int acpi_fan_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fan); if (acpi_fan_is_acpi4(device)) { - if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device)) - goto end; + result = acpi_fan_get_fif(device); + if (result) + return result; + + result = acpi_fan_get_fps(device); + if (result) + return result; + fan->acpi4 = true; } else { result = acpi_device_update_power(device, NULL); if (result) { dev_err(&device->dev, "Failed to set initial power state\n"); - goto end; + goto err_end; } } @@ -350,7 +413,7 @@ static int acpi_fan_probe(struct platform_device *pdev) &fan_cooling_ops); if (IS_ERR(cdev)) { result = PTR_ERR(cdev); - goto end; + goto err_end; } dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id); @@ -365,10 +428,21 @@ static int acpi_fan_probe(struct platform_device *pdev) result = sysfs_create_link(&cdev->device.kobj, &pdev->dev.kobj, "device"); - if (result) + if (result) { dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); + goto err_end; + } + + return 0; + +err_end: + if (fan->acpi4) { + int i; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + } -end: return result; } @@ -376,6 +450,13 @@ static int acpi_fan_remove(struct platform_device *pdev) { struct acpi_fan *fan = platform_get_drvdata(pdev); + if (fan->acpi4) { + struct acpi_device *device = ACPI_COMPANION(&pdev->dev); + int i; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + } sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); sysfs_remove_link(&fan->cdev->device.kobj, "device"); thermal_cooling_device_unregister(fan->cdev); diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index f31544d3656e..4ae93350b70d 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -98,11 +98,11 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * * Return: The cache structure and the level we terminated with. */ -static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, - int local_level, - struct acpi_subtable_header *res, - struct acpi_pptt_cache **found, - int level, int type) +static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, + unsigned int local_level, + struct acpi_subtable_header *res, + struct acpi_pptt_cache **found, + unsigned int level, int type) { struct acpi_pptt_cache *cache; @@ -119,7 +119,7 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, if (*found != NULL && cache != *found) pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); - pr_debug("Found cache @ level %d\n", level); + pr_debug("Found cache @ level %u\n", level); *found = cache; /* * continue looking at this node's resource list @@ -132,16 +132,17 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, return local_level; } -static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node, - int *starting_level, int level, - int type) +static struct acpi_pptt_cache * +acpi_find_cache_level(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + unsigned int *starting_level, unsigned int level, + int type) { struct acpi_subtable_header *res; - int number_of_levels = *starting_level; + unsigned int number_of_levels = *starting_level; int resource = 0; struct acpi_pptt_cache *ret = NULL; - int local_level; + unsigned int local_level; /* walk down from processor node */ while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) { @@ -321,12 +322,12 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta unsigned int level, struct acpi_pptt_processor **node) { - int total_levels = 0; + unsigned int total_levels = 0; struct acpi_pptt_cache *found = NULL; struct acpi_pptt_processor *cpu_node; u8 acpi_type = acpi_cache_type(type); - pr_debug("Looking for CPU %d's level %d cache type %d\n", + pr_debug("Looking for CPU %d's level %u cache type %d\n", acpi_cpu_id, level, acpi_type); cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2ae95df2e74f..dcc289e30166 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr) static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { - acpi_status status; - u64 count; - int current_count; - int i, ret = 0; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *cst; + int ret; if (nocst) return -ENODEV; - current_count = 0; - - status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return -ENODEV; - } - - cst = buffer.pointer; - - /* There must be at least 2 elements */ - if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - pr_err("not enough elements in _CST\n"); - ret = -EFAULT; - goto end; - } - - count = cst->package.elements[0].integer.value; + ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); + if (ret) + return ret; - /* Validate number of power states. */ - if (count < 1 || count != cst->package.count - 1) { - pr_err("count given by _CST is not valid\n"); - ret = -EFAULT; - goto end; - } + /* + * It is expected that there will be at least 2 states, C1 and + * something else (C2 or C3), so fail if that is not the case. + */ + if (pr->power.count < 2) + return -EFAULT; - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; - - for (i = 1; i <= count; i++) { - union acpi_object *element; - union acpi_object *obj; - struct acpi_power_register *reg; - struct acpi_processor_cx cx; - - memset(&cx, 0, sizeof(cx)); - - element = &(cst->package.elements[i]); - if (element->type != ACPI_TYPE_PACKAGE) - continue; - - if (element->package.count != 4) - continue; - - obj = &(element->package.elements[0]); - - if (obj->type != ACPI_TYPE_BUFFER) - continue; - - reg = (struct acpi_power_register *)obj->buffer.pointer; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && - (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) - continue; - - /* There should be an easy way to extract an integer... */ - obj = &(element->package.elements[1]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.type = obj->integer.value; - /* - * Some buggy BIOSes won't list C1 in _CST - - * Let acpi_processor_get_power_info_default() handle them later - */ - if (i == 1 && cx.type != ACPI_STATE_C1) - current_count++; - - cx.address = reg->address; - cx.index = current_count + 1; - - cx.entry_method = ACPI_CSTATE_SYSTEMIO; - if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (acpi_processor_ffh_cstate_probe - (pr->id, &cx, reg) == 0) { - cx.entry_method = ACPI_CSTATE_FFH; - } else if (cx.type == ACPI_STATE_C1) { - /* - * C1 is a special case where FIXED_HARDWARE - * can be handled in non-MWAIT way as well. - * In that case, save this _CST entry info. - * Otherwise, ignore this info and continue. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } else { - continue; - } - if (cx.type == ACPI_STATE_C1 && - (boot_option_idle_override == IDLE_NOMWAIT)) { - /* - * In most cases the C1 space_id obtained from - * _CST object is FIXED_HARDWARE access mode. - * But when the option of idle=halt is added, - * the entry_method type should be changed from - * CSTATE_FFH to CSTATE_HALT. - * When the option of idle=nomwait is added, - * the C1 entry_method type should be - * CSTATE_HALT. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } - } else { - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", - cx.address); - } - - if (cx.type == ACPI_STATE_C1) { - cx.valid = 1; - } - - obj = &(element->package.elements[2]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.latency = obj->integer.value; - - obj = &(element->package.elements[3]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - current_count++; - memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - - /* - * We support total ACPI_PROCESSOR_MAX_POWER - 1 - * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) - */ - if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { - pr_warn("Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - break; - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - current_count)); - - /* Validate number of power states discovered */ - if (current_count < 2) - ret = -EFAULT; - - end: - kfree(buffer.pointer); - - return ret; + return 0; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, @@ -909,7 +769,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) static inline void acpi_processor_cstate_first_run_checks(void) { - acpi_status status; static int first_run; if (first_run) @@ -921,13 +780,10 @@ static inline void acpi_processor_cstate_first_run_checks(void) max_cstate); first_run++; - if (acpi_gbl_FADT.cst_control && !nocst) { - status = acpi_os_write_port(acpi_gbl_FADT.smi_command, - acpi_gbl_FADT.cst_control, 8); - if (ACPI_FAILURE(status)) - ACPI_EXCEPTION((AE_INFO, status, - "Notifying BIOS of _CST ability failed")); - } + if (nocst) + return; + + acpi_processor_claim_cst_control(); } #else diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 6747a279621b..439880629839 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -61,8 +61,11 @@ static struct notifier_block tts_notifier = { static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP + unsigned long acpi_wakeup_address; + /* do we have a wakeup address for S2 and S3? */ if (acpi_state == ACPI_STATE_S3) { + acpi_wakeup_address = acpi_get_wakeup_address(); if (!acpi_wakeup_address) return -EFAULT; acpi_set_waking_vector(acpi_wakeup_address); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 31014c7d3793..419f814d596a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -303,6 +303,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { + .callback = video_detect_force_native, + .ident = "Lenovo E41-25", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FS"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Lenovo E41-45", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82BK"), + }, + }, + { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, .ident = "Apple MacBook Pro 12,1", @@ -336,6 +352,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + + /* + * Desktops which falsely report a backlight and which our heuristics + * for this do not catch. + */ { .callback = video_detect_force_none, .ident = "Dell OptiPlex 9020M", @@ -344,6 +365,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"), }, }, + { + .callback = video_detect_force_none, + .ident = "MSI MS-7721", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MSI"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"), + }, + }, { }, }; diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 46dc54d18f0b..2a04e8abd397 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -218,7 +218,6 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) void *cmd_tbl; u32 opts; const u32 cmd_fis_len = 5; /* five dwords */ - unsigned int n_elem; /* * Fill in command table information. First, the header, @@ -232,9 +231,8 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); } - n_elem = 0; if (qc->flags & ATA_QCFLAG_DMAMAP) - n_elem = acard_ahci_fill_sg(qc, cmd_tbl); + acard_ahci_fill_sg(qc, cmd_tbl); /* * Fill in command slot information. diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 66a570d0da83..6853dbb4131d 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -73,6 +73,7 @@ enum brcm_ahci_version { BRCM_SATA_BCM7425 = 1, BRCM_SATA_BCM7445, BRCM_SATA_NSP, + BRCM_SATA_BCM7216, }; enum brcm_ahci_quirks { @@ -337,24 +338,39 @@ static const struct ata_port_info ahci_brcm_port_info = { .port_ops = &ahci_brcm_platform_ops, }; -#ifdef CONFIG_PM_SLEEP static int brcm_ahci_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret; brcm_sata_phys_disable(priv); - return ahci_platform_suspend(dev); + if (IS_ENABLED(CONFIG_PM_SLEEP)) + ret = ahci_platform_suspend(dev); + else + ret = 0; + + if (priv->version != BRCM_SATA_BCM7216) + reset_control_assert(priv->rcdev); + + return ret; } -static int brcm_ahci_resume(struct device *dev) +static int __maybe_unused brcm_ahci_resume(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; - int ret; + int ret = 0; + + if (priv->version == BRCM_SATA_BCM7216) + ret = reset_control_reset(priv->rcdev); + else + ret = reset_control_deassert(priv->rcdev); + if (ret) + return ret; /* Make sure clocks are turned on before re-configuration */ ret = ahci_platform_enable_clks(hpriv); @@ -393,7 +409,6 @@ out_disable_phys: ahci_platform_disable_clks(hpriv); return ret; } -#endif static struct scsi_host_template ahci_platform_sht = { AHCI_SHT(DRV_NAME), @@ -404,6 +419,7 @@ static const struct of_device_id ahci_of_match[] = { {.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445}, {.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445}, {.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP}, + {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216}, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); @@ -412,6 +428,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) { const struct of_device_id *of_id; struct device *dev = &pdev->dev; + const char *reset_name = NULL; struct brcm_ahci_priv *priv; struct ahci_host_priv *hpriv; struct resource *res; @@ -433,16 +450,19 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); - /* Reset is optional depending on platform */ - priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci"); - if (!IS_ERR_OR_NULL(priv->rcdev)) - reset_control_deassert(priv->rcdev); + /* Reset is optional depending on platform and named differently */ + if (priv->version == BRCM_SATA_BCM7216) + reset_name = "rescal"; + else + reset_name = "ahci"; + + priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name); + if (IS_ERR(priv->rcdev)) + return PTR_ERR(priv->rcdev); hpriv = ahci_platform_get_resources(pdev, 0); - if (IS_ERR(hpriv)) { - ret = PTR_ERR(hpriv); - goto out_reset; - } + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); hpriv->plat_data = priv; hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; @@ -459,6 +479,13 @@ static int brcm_ahci_probe(struct platform_device *pdev) break; } + if (priv->version == BRCM_SATA_BCM7216) + ret = reset_control_reset(priv->rcdev); + else + ret = reset_control_deassert(priv->rcdev); + if (ret) + return ret; + ret = ahci_platform_enable_clks(hpriv); if (ret) goto out_reset; @@ -500,7 +527,7 @@ out_disable_phys: out_disable_clks: ahci_platform_disable_clks(hpriv); out_reset: - if (!IS_ERR_OR_NULL(priv->rcdev)) + if (priv->version != BRCM_SATA_BCM7216) reset_control_assert(priv->rcdev); return ret; } @@ -521,11 +548,26 @@ static int brcm_ahci_remove(struct platform_device *pdev) return 0; } +static void brcm_ahci_shutdown(struct platform_device *pdev) +{ + int ret; + + /* All resources releasing happens via devres, but our device, unlike a + * proper remove is not disappearing, therefore using + * brcm_ahci_suspend() here which does explicit power management is + * appropriate. + */ + ret = brcm_ahci_suspend(&pdev->dev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume); static struct platform_driver brcm_ahci_driver = { .probe = brcm_ahci_probe, .remove = brcm_ahci_remove, + .shutdown = brcm_ahci_shutdown, .driver = { .name = DRV_NAME, .of_match_table = ahci_of_match, diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 135173c8d138..391dff0f25a2 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -824,7 +824,7 @@ static int arasan_cf_probe(struct platform_device *pdev) quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA; acdev->pbase = res->start; - acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, + acdev->vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!acdev->vbase) { dev_warn(&pdev->dev, "ioremap fail\n"); diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 1bfd0154dad5..e47a28271f5b 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -979,7 +979,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv) priv->aapl_bus_id = bidp ? *bidp : 0; /* Fixup missing Apple bus ID in case of media-bay */ - if (priv->mediabay && bidp == 0) + if (priv->mediabay && !bidp) priv->aapl_bus_id = 1; } diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index d3d851b014a3..bd87476ab481 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -891,7 +891,7 @@ static int octeon_cf_probe(struct platform_device *pdev) of_node_put(dma_node); return -EINVAL; } - cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start, + cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { of_node_put(dma_node); @@ -909,7 +909,7 @@ static int octeon_cf_probe(struct platform_device *pdev) if (!res_cs1) return -EINVAL; - cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, + cs1 = devm_ioremap(&pdev->dev, res_cs1->start, resource_size(res_cs1)); if (!cs1) return rv; @@ -925,7 +925,7 @@ static int octeon_cf_probe(struct platform_device *pdev) if (!res_cs0) return -EINVAL; - cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, + cs0 = devm_ioremap(&pdev->dev, res_cs0->start, resource_size(res_cs0)); if (!cs0) return rv; diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index deae466395de..479c4b29b856 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -140,7 +140,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) info->gpio_line = gpiod; info->irq = irq; - info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, + info->iobase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!info->iobase) return -ENOMEM; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 9d0d65efcd94..17d47ad03ab7 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -31,12 +31,6 @@ #include "suni.h" #include "eni.h" -#if !defined(__i386__) && !defined(__x86_64__) -#ifndef ioremap_nocache -#define ioremap_nocache(X,Y) ioremap(X,Y) -#endif -#endif - /* * TODO: * @@ -1725,7 +1719,7 @@ static int eni_do_init(struct atm_dev *dev) } printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,", dev->number,pci_dev->revision,real_base,eni_dev->irq); - if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) { + if (!(base = ioremap(real_base,MAP_MAX_SIZE))) { printk("\n"); printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page " "mapping\n",dev->number); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 48616f358854..16134a69bf6f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1006,8 +1006,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); return 0; + } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); @@ -1038,8 +1040,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); return 0; + } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); @@ -1101,6 +1105,7 @@ int pm_runtime_get_if_in_use(struct device *dev) retval = dev->power.disable_depth > 0 ? -EINVAL : dev->power.runtime_status == RPM_ACTIVE && atomic_inc_not_zero(&dev->power.usage_count); + trace_rpm_usage_rcuidle(dev, 0); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } @@ -1434,6 +1439,8 @@ void pm_runtime_allow(struct device *dev) dev->power.runtime_auto = true; if (atomic_dec_and_test(&dev->power.usage_count)) rpm_idle(dev, RPM_AUTO | RPM_ASYNC); + else + trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); out: spin_unlock_irq(&dev->power.lock); @@ -1501,6 +1508,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use) if (!old_use || old_delay >= 0) { atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); + } else { + trace_rpm_usage_rcuidle(dev, 0); } } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 70a9edb5f525..27f3e60608e5 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m, break; } + if (!next_ws) + print_wakeup_source_stats(m, &deleted_ws); + return next_ws; } diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index ac9b31c57967..008f8da69d97 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, return i2c_smbus_write_byte_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_byte = { +static const struct regmap_bus regmap_smbus_byte = { .reg_write = regmap_smbus_byte_reg_write, .reg_read = regmap_smbus_byte_reg_read, }; @@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg, return i2c_smbus_write_word_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word = { +static const struct regmap_bus regmap_smbus_word = { .reg_write = regmap_smbus_word_reg_write, .reg_read = regmap_smbus_word_reg_read, }; @@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, return i2c_smbus_write_word_swapped(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word_swapped = { +static const struct regmap_bus regmap_smbus_word_swapped = { .reg_write = regmap_smbus_word_write_swapped, .reg_read = regmap_smbus_word_read_swapped, }; @@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context, return -EIO; } -static struct regmap_bus regmap_i2c = { +static const struct regmap_bus regmap_i2c = { .write = regmap_i2c_write, .gather_write = regmap_i2c_gather_write, .read = regmap_i2c_read, @@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, return -EIO; } -static struct regmap_bus regmap_i2c_smbus_i2c_block = { +static const struct regmap_bus regmap_i2c_smbus_i2c_block = { .write = regmap_i2c_smbus_i2c_write, .read = regmap_i2c_smbus_i2c_read, .max_raw_read = I2C_SMBUS_BLOCK_MAX, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 19f57ccfbe1d..59f911e57719 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, WARN_ON(!map->bus); - /* Check for unwritable registers before we start */ - for (i = 0; i < val_len / map->format.val_bytes; i++) - if (!regmap_writeable(map, - reg + regmap_get_offset(map, i))) - return -EINVAL; + /* Check for unwritable or noinc registers in range + * before we start + */ + if (!regmap_writeable_noinc(map, reg)) { + for (i = 0; i < val_len / map->format.val_bytes; i++) { + unsigned int element = + reg + regmap_get_offset(map, i); + if (!regmap_writeable(map, element) || + regmap_writeable_noinc(map, element)) + return -EINVAL; + } + } if (!map->cache_bypass && map->format.parse_val) { unsigned int ival; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index d8d0dc0ca5ac..0b081dee1e95 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -108,10 +108,7 @@ static const void *property_get_pointer(const struct property_entry *prop) if (!prop->length) return NULL; - if (prop->is_array) - return prop->pointer; - - return &prop->value; + return prop->is_inline ? &prop->value : prop->pointer; } static const void *property_entry_find(const struct property_entry *props, @@ -201,92 +198,91 @@ static int property_entry_read_string_array(const struct property_entry *props, static void property_entry_free_data(const struct property_entry *p) { - const void *pointer = property_get_pointer(p); const char * const *src_str; size_t i, nval; - if (p->is_array) { - if (p->type == DEV_PROP_STRING && p->pointer) { - src_str = p->pointer; - nval = p->length / sizeof(const char *); - for (i = 0; i < nval; i++) - kfree(src_str[i]); - } - kfree(pointer); - } else if (p->type == DEV_PROP_STRING) { - kfree(p->value.str); + if (p->type == DEV_PROP_STRING) { + src_str = property_get_pointer(p); + nval = p->length / sizeof(*src_str); + for (i = 0; i < nval; i++) + kfree(src_str[i]); } + + if (!p->is_inline) + kfree(p->pointer); + kfree(p->name); } -static const char * const * -property_copy_string_array(const struct property_entry *src) +static bool property_copy_string_array(const char **dst_ptr, + const char * const *src_ptr, + size_t nval) { - const char **d; - const char * const *src_str = src->pointer; - size_t nval = src->length / sizeof(*d); int i; - d = kcalloc(nval, sizeof(*d), GFP_KERNEL); - if (!d) - return NULL; - for (i = 0; i < nval; i++) { - d[i] = kstrdup(src_str[i], GFP_KERNEL); - if (!d[i] && src_str[i]) { + dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL); + if (!dst_ptr[i] && src_ptr[i]) { while (--i >= 0) - kfree(d[i]); - kfree(d); - return NULL; + kfree(dst_ptr[i]); + return false; } } - return d; + return true; } static int property_entry_copy_data(struct property_entry *dst, const struct property_entry *src) { const void *pointer = property_get_pointer(src); - const void *new; - - if (src->is_array) { - if (!src->length) - return -ENODATA; - - if (src->type == DEV_PROP_STRING) { - new = property_copy_string_array(src); - if (!new) - return -ENOMEM; - } else { - new = kmemdup(pointer, src->length, GFP_KERNEL); - if (!new) - return -ENOMEM; - } + void *dst_ptr; + size_t nval; + + /* + * Properties with no data should not be marked as stored + * out of line. + */ + if (!src->is_inline && !src->length) + return -ENODATA; + + /* + * Reference properties are never stored inline as + * they are too big. + */ + if (src->type == DEV_PROP_REF && src->is_inline) + return -EINVAL; - dst->is_array = true; - dst->pointer = new; - } else if (src->type == DEV_PROP_STRING) { - new = kstrdup(src->value.str, GFP_KERNEL); - if (!new && src->value.str) + if (src->length <= sizeof(dst->value)) { + dst_ptr = &dst->value; + dst->is_inline = true; + } else { + dst_ptr = kmalloc(src->length, GFP_KERNEL); + if (!dst_ptr) return -ENOMEM; + dst->pointer = dst_ptr; + } - dst->value.str = new; + if (src->type == DEV_PROP_STRING) { + nval = src->length / sizeof(const char *); + if (!property_copy_string_array(dst_ptr, pointer, nval)) { + if (!dst->is_inline) + kfree(dst->pointer); + return -ENOMEM; + } } else { - dst->value = src->value; + memcpy(dst_ptr, pointer, src->length); } dst->length = src->length; dst->type = src->type; dst->name = kstrdup(src->name, GFP_KERNEL); - if (!dst->name) - goto out_free_data; + if (!dst->name) { + property_entry_free_data(dst); + return -ENOMEM; + } return 0; - -out_free_data: - property_entry_free_data(dst); - return -ENOMEM; } /** @@ -483,31 +479,49 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, struct fwnode_reference_args *args) { struct swnode *swnode = to_swnode(fwnode); - const struct software_node_reference *ref; + const struct software_node_ref_args *ref_array; + const struct software_node_ref_args *ref; const struct property_entry *prop; struct fwnode_handle *refnode; + u32 nargs_prop_val; + int error; int i; - if (!swnode || !swnode->node->references) + if (!swnode) return -ENOENT; - for (ref = swnode->node->references; ref->name; ref++) - if (!strcmp(ref->name, propname)) - break; + prop = property_entry_get(swnode->node->properties, propname); + if (!prop) + return -ENOENT; + + if (prop->type != DEV_PROP_REF) + return -EINVAL; - if (!ref->name || index > (ref->nrefs - 1)) + /* + * We expect that references are never stored inline, even + * single ones, as they are too big. + */ + if (prop->is_inline) + return -EINVAL; + + if (index * sizeof(*ref) >= prop->length) return -ENOENT; - refnode = software_node_fwnode(ref->refs[index].node); + ref_array = prop->pointer; + ref = &ref_array[index]; + + refnode = software_node_fwnode(ref->node); if (!refnode) return -ENOENT; if (nargs_prop) { - prop = property_entry_get(swnode->node->properties, nargs_prop); - if (!prop) - return -EINVAL; + error = property_entry_read_int_array(swnode->node->properties, + nargs_prop, sizeof(u32), + &nargs_prop_val, 1); + if (error) + return error; - nargs = prop->value.u32_data; + nargs = nargs_prop_val; } if (nargs > NR_FWNODE_REFERENCE_ARGS) @@ -517,7 +531,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, args->nargs = nargs; for (i = 0; i < nargs; i++) - args->args[i] = ref->refs[index].args[i]; + args->args[i] = ref->args[i]; return 0; } diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig index 86e85daa80bf..305c7751184a 100644 --- a/drivers/base/test/Kconfig +++ b/drivers/base/test/Kconfig @@ -8,3 +8,6 @@ config TEST_ASYNC_DRIVER_PROBE The module name will be test_async_driver_probe.ko If unsure say N. +config KUNIT_DRIVER_PE_TEST + bool "KUnit Tests for property entry API" + depends on KUNIT=y diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile index 0f1f7277a013..3ca56367c84b 100644 --- a/drivers/base/test/Makefile +++ b/drivers/base/test/Makefile @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o + +obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c new file mode 100644 index 000000000000..abe03315180f --- /dev/null +++ b/drivers/base/test/property-entry-test.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +// Unit tests for property entries API +// +// Copyright 2019 Google LLC. + +#include <kunit/test.h> +#include <linux/property.h> +#include <linux/types.h> + +static void pe_test_uints(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8("prop-u8", 8), + PROPERTY_ENTRY_U16("prop-u16", 16), + PROPERTY_ENTRY_U32("prop-u32", 32), + PROPERTY_ENTRY_U64("prop-u64", 64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[2]; + u16 val_u16, array_u16[2]; + u32 val_u32, array_u32[2]; + u64 val_u64, array_u64[2]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_uint_arrays(struct kunit *test) +{ + static const u8 a_u8[16] = { 8, 9 }; + static const u16 a_u16[16] = { 16, 17 }; + static const u32 a_u32[16] = { 32, 33 }; + static const u64 a_u64[16] = { 64, 65 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8), + PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16), + PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32), + PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[32]; + u16 val_u16, array_u16[32]; + u32 val_u32, array_u32[32]; + u64 val_u64, array_u64[32]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_strings(struct kunit *test) +{ + static const char *strings[] = { + "string-a", + "string-b", + }; + + static const struct property_entry entries[] = { + PROPERTY_ENTRY_STRING("str", "single"), + PROPERTY_ENTRY_STRING("empty", ""), + PROPERTY_ENTRY_STRING_ARRAY("strs", strings), + { } + }; + + struct fwnode_handle *node; + const char *str; + const char *strs[10]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_string(node, "str", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "single"); + + error = fwnode_property_read_string_array(node, "str", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + /* asking for more data returns what we have */ + error = fwnode_property_read_string_array(node, "str", strs, 2); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + error = fwnode_property_read_string(node, "no-str", &str); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_string_array(node, "no-str", strs, 1); + KUNIT_EXPECT_LT(test, error, 0); + + error = fwnode_property_read_string(node, "empty", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, ""); + + error = fwnode_property_read_string_array(node, "strs", strs, 3); + KUNIT_EXPECT_EQ(test, error, 2); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + KUNIT_EXPECT_STREQ(test, strs[1], "string-b"); + + error = fwnode_property_read_string_array(node, "strs", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + + /* NULL argument -> returns size */ + error = fwnode_property_read_string_array(node, "strs", NULL, 0); + KUNIT_EXPECT_EQ(test, error, 2); + + /* accessing array as single value */ + error = fwnode_property_read_string(node, "strs", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "string-a"); + + fwnode_remove_software_node(node); +} + +static void pe_test_bool(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_BOOL("prop"), + { } + }; + + struct fwnode_handle *node; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop")); + KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop")); + + fwnode_remove_software_node(node); +} + +/* Verifies that small U8 array is stored inline when property is copied */ +static void pe_test_move_inline_u8(struct kunit *test) +{ + static const u8 u8_array_small[8] = { 1, 2, 3, 4 }; + static const u8 u8_array_big[128] = { 5, 6, 7, 8 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small), + PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big), + { } + }; + + struct property_entry *copy; + const u8 *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + data_ptr = (u8 *)©[0].value; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6); + + property_entries_free(copy); +} + +/* Verifies that single string array is stored inline when property is copied */ +static void pe_test_move_inline_str(struct kunit *test) +{ + static char *str_array_small[] = { "a" }; + static char *str_array_big[] = { "b", "c", "d", "e" }; + static char *str_array_small_empty[] = { "" }; + static struct property_entry entries[] = { + PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small), + PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big), + PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty), + { } + }; + + struct property_entry *copy; + const char * const *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a"); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_STREQ(test, data_ptr[0], "b"); + KUNIT_EXPECT_STREQ(test, data_ptr[1], "c"); + + KUNIT_EXPECT_TRUE(test, copy[2].is_inline); + KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], ""); + + property_entries_free(copy); +} + +/* Handling of reference properties */ +static void pe_test_reference(struct kunit *test) +{ + static const struct software_node nodes[] = { + { .name = "1", }, + { .name = "2", }, + { } + }; + + static const struct software_node_ref_args refs[] = { + { + .node = &nodes[0], + .nargs = 0, + }, + { + .node = &nodes[1], + .nargs = 2, + .args = { 3, 4 }, + }, + }; + + const struct property_entry entries[] = { + PROPERTY_ENTRY_REF("ref-1", &nodes[0]), + PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2), + PROPERTY_ENTRY_REF_ARRAY("ref-3", refs), + { } + }; + + struct fwnode_handle *node; + struct fwnode_reference_args ref; + int error; + + error = software_node_register_nodes(nodes); + KUNIT_ASSERT_EQ(test, error, 0); + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 1, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 1U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + + /* asking for more args, padded with zero data */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 3, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 3U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU); + KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 2, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + /* array of references */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* second reference in the array */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 2, 1, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 2U); + KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 2, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); + software_node_unregister_nodes(nodes); +} + +static struct kunit_case property_entry_test_cases[] = { + KUNIT_CASE(pe_test_uints), + KUNIT_CASE(pe_test_uint_arrays), + KUNIT_CASE(pe_test_strings), + KUNIT_CASE(pe_test_bool), + KUNIT_CASE(pe_test_move_inline_u8), + KUNIT_CASE(pe_test_move_inline_str), + KUNIT_CASE(pe_test_reference), + { } +}; + +static struct kunit_suite property_entry_test_suite = { + .name = "property-entry", + .test_cases = property_entry_test_cases, +}; + +kunit_test_suite(property_entry_test_suite); diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c index 57f10b58b47c..c153c96a6145 100644 --- a/drivers/bcma/driver_chipcommon_b.c +++ b/drivers/bcma/driver_chipcommon_b.c @@ -48,7 +48,7 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb) return 0; ccb->setup_done = 1; - ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE); + ccb->mii = ioremap(ccb->core->addr_s[1], BCMA_CORE_SIZE); if (!ccb->mii) return -ENOMEM; diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index c42cec7c7ecc..88a93c266c19 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -115,7 +115,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, sizeof(val)); + mmio = ioremap(addr, sizeof(val)); if (!mmio) goto out; @@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, sizeof(val)); + mmio = ioremap(addr, sizeof(val)); if (!mmio) goto out; @@ -515,7 +515,7 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) /* Ok, ready to run, register it to the system. * The following needs change, if we want to port hostmode * to non-MIPS platform. */ - io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start, + io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start, resource_size(&pc_host->mem_resource)); pc_host->pci_controller.io_map_base = io_map_base; set_io_port_base(pc_host->pci_controller.io_map_base); diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index c8073b509a2b..90d5bdc12e03 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -172,7 +172,7 @@ int __init bcma_host_soc_register(struct bcma_soc *soc) /* iomap only first core. We have to read some register on this core * to scan the bus. */ - bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1); + bus->mmio = ioremap(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1); if (!bus->mmio) return -ENOMEM; diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index 1f2de714b401..1a942f734188 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -422,11 +422,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, } } if (bus->hosttype == BCMA_HOSTTYPE_SOC) { - core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE); + core->io_addr = ioremap(core->addr, BCMA_CORE_SIZE); if (!core->io_addr) return -ENOMEM; if (core->wrap) { - core->io_wrap = ioremap_nocache(core->wrap, + core->io_wrap = ioremap(core->wrap, BCMA_CORE_SIZE); if (!core->io_wrap) { iounmap(core->io_addr); @@ -469,7 +469,7 @@ int bcma_bus_scan(struct bcma_bus *bus) erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); if (bus->hosttype == BCMA_HOSTTYPE_SOC) { - eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE); + eromptr = ioremap(erombase, BCMA_CORE_SIZE); if (!eromptr) return -ENOMEM; } else { diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 1f3f9e0f02a8..4eaf97d7a170 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -827,7 +827,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) goto failed_req_csr; } - card->csr_remap = ioremap_nocache(csr_base, csr_len); + card->csr_remap = ioremap(csr_base, csr_len); if (!card->csr_remap) { dev_printk(KERN_ERR, &card->dev->dev, "Unable to remap memory region\n"); diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index d9629fc13a15..6ae48ad80409 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -97,12 +97,12 @@ int __must_check fsl_create_mc_io(struct device *dev, return -EBUSY; } - mc_portal_virt_addr = devm_ioremap_nocache(dev, + mc_portal_virt_addr = devm_ioremap(dev, mc_portal_phys_addr, mc_portal_size); if (!mc_portal_virt_addr) { dev_err(dev, - "devm_ioremap_nocache failed for MC portal %pa\n", + "devm_ioremap failed for MC portal %pa\n", &mc_portal_phys_addr); return -ENXIO; } diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index ab154a75acf0..9e84239f88d4 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -941,7 +941,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) bridge->gatt_table = (u32 __iomem *)table; #else - bridge->gatt_table = ioremap_nocache(virt_to_phys(table), + bridge->gatt_table = ioremap(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); bridge->driver->cache_flush(); #endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c6271ce250b3..66a62d17a3f5 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1087,7 +1087,7 @@ static void intel_i9xx_setup_flush(void) } if (intel_private.ifp_resource.start) - intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); + intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE); if (!intel_private.i9xx_flush_page) dev_err(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing\n"); diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index eb108b3c619a..51121a4b82c7 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -204,7 +204,7 @@ static int __init applicom_init(void) if (pci_enable_device(dev)) return -EIO; - RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO); + RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO); if (!RamIO) { printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " @@ -259,7 +259,7 @@ static int __init applicom_init(void) /* Now try the specified ISA cards */ for (i = 0; i < MAX_ISA_BOARD; i++) { - RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO); + RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO); if (!RamIO) { printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1); diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8486c29d8324..914e293ba62b 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -90,7 +90,7 @@ config HW_RANDOM_BCM2835 config HW_RANDOM_IPROC_RNG200 tristate "Broadcom iProc/STB RNG200 support" - depends on ARCH_BCM_IPROC || ARCH_BRCMSTB + depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB default HW_RANDOM ---help--- This driver provides kernel-side support for the RNG200 diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index 290c880266bf..9f205bd1acc0 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c @@ -317,7 +317,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n"; return -EBUSY; } - intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); + intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); if (intel_rng_hw->mem == NULL) return -EBUSY; diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c index 899ff25f4f28..32d9fe61a225 100644 --- a/drivers/char/hw_random/iproc-rng200.c +++ b/drivers/char/hw_random/iproc-rng200.c @@ -213,6 +213,7 @@ static int iproc_rng200_probe(struct platform_device *pdev) } static const struct of_device_id iproc_rng200_of_match[] = { + { .compatible = "brcm,bcm2711-rng200", }, { .compatible = "brcm,bcm7211-rng200", }, { .compatible = "brcm,bcm7278-rng200", }, { .compatible = "brcm,iproc-rng200", }, diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c index 8c78aa090492..7be8067ac4e8 100644 --- a/drivers/char/hw_random/octeon-rng.c +++ b/drivers/char/hw_random/octeon-rng.c @@ -81,13 +81,13 @@ static int octeon_rng_probe(struct platform_device *pdev) return -ENOENT; - rng->control_status = devm_ioremap_nocache(&pdev->dev, + rng->control_status = devm_ioremap(&pdev->dev, res_ports->start, sizeof(u64)); if (!rng->control_status) return -ENOENT; - rng->result = devm_ioremap_nocache(&pdev->dev, + rng->result = devm_ioremap(&pdev->dev, res_result->start, sizeof(u64)); if (!rng->result) diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 3b53b3e5ec3e..d52bf4df0bca 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(timeouts); -static struct attribute *tpm_dev_attrs[] = { +static ssize_t tpm_version_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tpm_chip *chip = to_tpm_chip(dev); + + return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2 + ? "2" : "1"); +} +static DEVICE_ATTR_RO(tpm_version_major); + +static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_enabled.attr, @@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = { &dev_attr_cancel.attr, &dev_attr_durations.attr, &dev_attr_timeouts.attr, + &dev_attr_tpm_version_major.attr, NULL, }; -static const struct attribute_group tpm_dev_group = { - .attrs = tpm_dev_attrs, +static struct attribute *tpm2_dev_attrs[] = { + &dev_attr_tpm_version_major.attr, + NULL +}; + +static const struct attribute_group tpm1_dev_group = { + .attrs = tpm1_dev_attrs, +}; + +static const struct attribute_group tpm2_dev_group = { + .attrs = tpm2_dev_attrs, }; void tpm_sysfs_add_device(struct tpm_chip *chip) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - return; - WARN_ON(chip->groups_cnt != 0); - chip->groups[chip->groups_cnt++] = &tpm_dev_group; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + chip->groups[chip->groups_cnt++] = &tpm2_dev_group; + else + chip->groups[chip->groups_cnt++] = &tpm1_dev_group; } diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c index fbc34beafc78..7b703f14e20b 100644 --- a/drivers/clk/renesas/clk-rz.c +++ b/drivers/clk/renesas/clk-rz.c @@ -37,8 +37,8 @@ static u16 __init rz_cpg_read_mode_pins(void) void __iomem *ppr0, *pibc0; u16 modes; - ppr0 = ioremap_nocache(PPR0, 2); - pibc0 = ioremap_nocache(PIBC0, 2); + ppr0 = ioremap(PPR0, 2); + pibc0 = ioremap(PIBC0, 2); BUG_ON(!ppr0 || !pibc0); iowrite16(4, pibc0); /* enable input buffer */ modes = ioread16(ppr0); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 5fdd76cb1768..cc909e465823 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -88,7 +88,7 @@ config ROCKCHIP_TIMER select TIMER_OF select CLKSRC_MMIO help - Enables the support for the rockchip timer driver. + Enables the support for the Rockchip timer driver. config ARMADA_370_XP_TIMER bool "Armada 370 and XP timer driver" if COMPILE_TEST @@ -162,13 +162,13 @@ config NPCM7XX_TIMER select CLKSRC_MMIO help Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, - While TIMER0 serves as clockevent and TIMER1 serves as clocksource. + where TIMER0 serves as clockevent and TIMER1 serves as clocksource. config CADENCE_TTC_TIMER bool "Cadence TTC timer driver" if COMPILE_TEST depends on COMMON_CLK help - Enables support for the cadence ttc driver. + Enables support for the Cadence TTC driver. config ASM9260_TIMER bool "ASM9260 timer driver" if COMPILE_TEST @@ -190,10 +190,10 @@ config CLKSRC_DBX500_PRCMU bool "Clocksource PRCMU Timer" if COMPILE_TEST depends on HAS_IOMEM help - Use the always on PRCMU Timer as clocksource + Use the always on PRCMU Timer as clocksource. config CLPS711X_TIMER - bool "Cirrus logic timer driver" if COMPILE_TEST + bool "Cirrus Logic timer driver" if COMPILE_TEST select CLKSRC_MMIO help Enables support for the Cirrus Logic PS711 timer. @@ -205,11 +205,11 @@ config ATLAS7_TIMER Enables support for the Atlas7 timer. config MXS_TIMER - bool "Mxs timer driver" if COMPILE_TEST + bool "MXS timer driver" if COMPILE_TEST select CLKSRC_MMIO select STMP_DEVICE help - Enables support for the Mxs timer. + Enables support for the MXS timer. config PRIMA2_TIMER bool "Prima2 timer driver" if COMPILE_TEST @@ -238,10 +238,10 @@ config KEYSTONE_TIMER Enables support for the Keystone timer. config INTEGRATOR_AP_TIMER - bool "Integrator-ap timer driver" if COMPILE_TEST + bool "Integrator-AP timer driver" if COMPILE_TEST select CLKSRC_MMIO help - Enables support for the Integrator-ap timer. + Enables support for the Integrator-AP timer. config CLKSRC_EFM32 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 @@ -283,8 +283,8 @@ config CLKSRC_NPS select TIMER_OF if OF help NPS400 clocksource support. - Got 64 bit counter with update rate up to 1000MHz. - This counter is accessed via couple of 32 bit memory mapped registers. + It has a 64-bit counter with update rate up to 1000MHz. + This counter is accessed via couple of 32-bit memory-mapped registers. config CLKSRC_STM32 bool "Clocksource for STM32 SoCs" if !ARCH_STM32 @@ -305,14 +305,14 @@ config ARC_TIMERS help These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores (ARC700 as well as ARC HS38). - TIMER0 serves as clockevent while TIMER1 provides clocksource + TIMER0 serves as clockevent while TIMER1 provides clocksource. config ARC_TIMERS_64BIT bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST depends on ARC_TIMERS select TIMER_OF help - This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP) + This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP). RTC is implemented inside the core, while GFRC sits outside the core in ARConnect IP block. Driver automatically picks one of them for clocksource as appropriate. @@ -390,7 +390,7 @@ config ARM_GLOBAL_TIMER select TIMER_OF if OF depends on ARM help - This options enables support for the ARM global timer unit + This option enables support for the ARM global timer unit. config ARM_TIMER_SP804 bool "Support for Dual Timer SP804 module" if COMPILE_TEST @@ -403,14 +403,14 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK depends on ARM_GLOBAL_TIMER default y help - Use ARM global timer clock source as sched_clock + Use ARM global timer clock source as sched_clock. config ARMV7M_SYSTICK bool "Support for the ARMv7M system time" if COMPILE_TEST select TIMER_OF if OF select CLKSRC_MMIO help - This options enables support for the ARMv7M system timer unit + This option enables support for the ARMv7M system timer unit. config ATMEL_PIT bool "Atmel PIT support" if COMPILE_TEST @@ -460,7 +460,7 @@ config VF_PIT_TIMER bool select CLKSRC_MMIO help - Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. + Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs. config OXNAS_RPS_TIMER bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST @@ -470,7 +470,7 @@ config OXNAS_RPS_TIMER This enables support for the Oxford Semiconductor OXNAS RPS timers. config SYS_SUPPORTS_SH_CMT - bool + bool config MTK_TIMER bool "Mediatek timer driver" if COMPILE_TEST @@ -490,13 +490,13 @@ config SPRD_TIMER Enables support for the Spreadtrum timer driver. config SYS_SUPPORTS_SH_MTU2 - bool + bool config SYS_SUPPORTS_SH_TMU - bool + bool config SYS_SUPPORTS_EM_STI - bool + bool config CLKSRC_JCORE_PIT bool "J-Core PIT timer driver" if COMPILE_TEST @@ -523,7 +523,7 @@ config SH_TIMER_MTU2 help This enables build of a clockevent driver for the Multi-Function Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas. - This hardware comes with 16 bit-timer registers. + This hardware comes with 16-bit timer registers. config RENESAS_OSTM bool "Renesas OSTM timer driver" if COMPILE_TEST @@ -580,7 +580,7 @@ config CLKSRC_TANGO_XTAL select TIMER_OF select CLKSRC_MMIO help - This enables the clocksource for Tango SoC + This enables the clocksource for Tango SoC. config CLKSRC_PXA bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST @@ -591,24 +591,24 @@ config CLKSRC_PXA platforms. config H8300_TMR8 - bool "Clockevent timer for the H8300 platform" if COMPILE_TEST - depends on HAS_IOMEM + bool "Clockevent timer for the H8300 platform" if COMPILE_TEST + depends on HAS_IOMEM help This enables the 8 bits timer for the H8300 platform. config H8300_TMR16 - bool "Clockevent timer for the H83069 platform" if COMPILE_TEST - depends on HAS_IOMEM + bool "Clockevent timer for the H83069 platform" if COMPILE_TEST + depends on HAS_IOMEM help This enables the 16 bits timer for the H8300 platform with the - H83069 cpu. + H83069 CPU. config H8300_TPU - bool "Clocksource for the H8300 platform" if COMPILE_TEST - depends on HAS_IOMEM + bool "Clocksource for the H8300 platform" if COMPILE_TEST + depends on HAS_IOMEM help This enables the clocksource for the H8300 platform with the - H8S2678 cpu. + H8S2678 CPU. config CLKSRC_IMX_GPT bool "Clocksource using i.MX GPT" if COMPILE_TEST @@ -666,8 +666,8 @@ config CSKY_MP_TIMER help Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP system. - csky,mptimer is not only used in SMP system, it also could be used - single core system. It's not a mmio reg and it use mtcr/mfcr instruction. + csky,mptimer is not only used in SMP system, it also could be used in + single core system. It's not a mmio reg and it uses mtcr/mfcr instruction. config GX6605S_TIMER bool "Gx6605s SOC system timer driver" if COMPILE_TEST @@ -697,4 +697,14 @@ config INGENIC_TIMER help Support for the timer/counter unit of the Ingenic JZ SoCs. +config MICROCHIP_PIT64B + bool "Microchip PIT64B support" + depends on OF || COMPILE_TEST + select CLKSRC_MMIO + help + This option enables Microchip PIT64B timer for Atmel + based system. It supports the oneshot, the periodic + modes and high resolution. It is used as a clocksource + and a clockevent. + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 4dfe4225ece7..713686faa549 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -88,3 +88,4 @@ obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o +obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 2b196cbfadb6..b235f446ee50 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node) ret = setup_irq(irq, &timer->act); if (ret) { pr_err("Can't set up timer IRQ\n"); - goto err_iounmap; + goto err_timer_free; } clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); @@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node) return 0; +err_timer_free: + kfree(timer); + err_iounmap: iounmap(base); return ret; diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index 9039df4f90e2..ab190dffb1ed 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -279,9 +279,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) static int em_sti_probe(struct platform_device *pdev) { struct em_sti_priv *p; - struct resource *res; - int irq; - int ret; + int irq, ret; p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); if (p == NULL) @@ -295,8 +293,7 @@ static int em_sti_probe(struct platform_device *pdev) return irq; /* map memory, let base point to the STI instance */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - p->base = devm_ioremap_resource(&pdev->dev, res); + p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) return PTR_ERR(p->base); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 74cb299f5089..a267fe31ef13 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -4,7 +4,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * EXYNOS4 MCT(Multi-Core Timer) support + * Exynos4 MCT(Multi-Core Timer) support */ #include <linux/interrupt.h> diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 287d8d58c21a..9d808d595ca8 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -66,7 +66,7 @@ static int hv_ce_set_next_event(unsigned long delta, { u64 current_tick; - current_tick = hyperv_cs->read(NULL); + current_tick = hv_read_reference_counter(); current_tick += delta; hv_init_timer(0, current_tick); return 0; @@ -302,22 +302,33 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); * the other that uses the TSC reference page feature as defined in the * TLFS. The MSR version is for compatibility with old versions of * Hyper-V and 32-bit x86. The TSC reference page version is preferred. + * + * The Hyper-V clocksource ratings of 250 are chosen to be below the + * TSC clocksource rating of 300. In configurations where Hyper-V offers + * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource + * is available and preferred. With the higher rating, it will be the + * default. On older hardware and Hyper-V versions, the TSC is marked + * "unstable", so no TSC clocksource is created and the selected Hyper-V + * clocksource will be the default. */ -struct clocksource *hyperv_cs; -EXPORT_SYMBOL_GPL(hyperv_cs); +u64 (*hv_read_reference_counter)(void); +EXPORT_SYMBOL_GPL(hv_read_reference_counter); -static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE); +static union { + struct ms_hyperv_tsc_page page; + u8 reserved[PAGE_SIZE]; +} tsc_pg __aligned(PAGE_SIZE); struct ms_hyperv_tsc_page *hv_get_tsc_page(void) { - return &tsc_pg; + return &tsc_pg.page; } EXPORT_SYMBOL_GPL(hv_get_tsc_page); -static u64 notrace read_hv_clock_tsc(struct clocksource *arg) +static u64 notrace read_hv_clock_tsc(void) { - u64 current_tick = hv_read_tsc_page(&tsc_pg); + u64 current_tick = hv_read_tsc_page(hv_get_tsc_page()); if (current_tick == U64_MAX) hv_get_time_ref_count(current_tick); @@ -325,20 +336,50 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg) return current_tick; } +static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) +{ + return read_hv_clock_tsc(); +} + static u64 read_hv_sched_clock_tsc(void) { - return read_hv_clock_tsc(NULL) - hv_sched_clock_offset; + return read_hv_clock_tsc() - hv_sched_clock_offset; +} + +static void suspend_hv_clock_tsc(struct clocksource *arg) +{ + u64 tsc_msr; + + /* Disable the TSC page */ + hv_get_reference_tsc(tsc_msr); + tsc_msr &= ~BIT_ULL(0); + hv_set_reference_tsc(tsc_msr); +} + + +static void resume_hv_clock_tsc(struct clocksource *arg) +{ + phys_addr_t phys_addr = virt_to_phys(&tsc_pg); + u64 tsc_msr; + + /* Re-enable the TSC page */ + hv_get_reference_tsc(tsc_msr); + tsc_msr &= GENMASK_ULL(11, 0); + tsc_msr |= BIT_ULL(0) | (u64)phys_addr; + hv_set_reference_tsc(tsc_msr); } static struct clocksource hyperv_cs_tsc = { .name = "hyperv_clocksource_tsc_page", - .rating = 400, - .read = read_hv_clock_tsc, + .rating = 250, + .read = read_hv_clock_tsc_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .suspend= suspend_hv_clock_tsc, + .resume = resume_hv_clock_tsc, }; -static u64 notrace read_hv_clock_msr(struct clocksource *arg) +static u64 notrace read_hv_clock_msr(void) { u64 current_tick; /* @@ -350,15 +391,20 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg) return current_tick; } +static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) +{ + return read_hv_clock_msr(); +} + static u64 read_hv_sched_clock_msr(void) { - return read_hv_clock_msr(NULL) - hv_sched_clock_offset; + return read_hv_clock_msr() - hv_sched_clock_offset; } static struct clocksource hyperv_cs_msr = { .name = "hyperv_clocksource_msr", - .rating = 400, - .read = read_hv_clock_msr, + .rating = 250, + .read = read_hv_clock_msr_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -371,8 +417,8 @@ static bool __init hv_init_tsc_clocksource(void) if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) return false; - hyperv_cs = &hyperv_cs_tsc; - phys_addr = virt_to_phys(&tsc_pg); + hv_read_reference_counter = read_hv_clock_tsc; + phys_addr = virt_to_phys(hv_get_tsc_page()); /* * The Hyper-V TLFS specifies to preserve the value of reserved @@ -389,7 +435,7 @@ static bool __init hv_init_tsc_clocksource(void) hv_set_clocksource_vdso(hyperv_cs_tsc); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); - hv_sched_clock_offset = hyperv_cs->read(hyperv_cs); + hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_tsc); return true; @@ -411,10 +457,10 @@ void __init hv_init_clocksource(void) if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)) return; - hyperv_cs = &hyperv_cs_msr; + hv_read_reference_counter = read_hv_clock_msr; clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); - hv_sched_clock_offset = hyperv_cs->read(hyperv_cs); + hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_msr); } EXPORT_SYMBOL_GPL(hv_init_clocksource); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 9cde50cb3220..12ac75f7571f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -905,7 +905,7 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt) return -ENXIO; } - cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); + cmt->mapbase = ioremap(mem->start, resource_size(mem)); if (cmt->mapbase == NULL) { dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); return -ENXIO; diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 64526e50d471..bfccb31e94ad 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -377,7 +377,7 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) return -ENXIO; } - mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); + mtu->mapbase = ioremap(res->start, resource_size(res)); if (mtu->mapbase == NULL) return -ENXIO; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index d49690d15536..d41df9ba3725 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -486,7 +486,7 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu) return -ENXIO; } - tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); + tmu->mapbase = ioremap(res->start, resource_size(res)); if (tmu->mapbase == NULL) return -ENXIO; diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c index 88fe2e9ba9a3..38858e141731 100644 --- a/drivers/clocksource/timer-cadence-ttc.c +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -15,6 +15,8 @@ #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/sched_clock.h> +#include <linux/module.h> +#include <linux/of_platform.h> /* * This driver configures the 2 16/32-bit count-up timers as follows: @@ -464,13 +466,7 @@ static int __init ttc_setup_clockevent(struct clk *clk, return 0; } -/** - * ttc_timer_init - Initialize the timer - * - * Initializes the timer hardware and register the clock source and clock event - * timers with Linux kernal timer framework - */ -static int __init ttc_timer_init(struct device_node *timer) +static int __init ttc_timer_probe(struct platform_device *pdev) { unsigned int irq; void __iomem *timer_baseaddr; @@ -478,6 +474,7 @@ static int __init ttc_timer_init(struct device_node *timer) static int initialized; int clksel, ret; u32 timer_width = 16; + struct device_node *timer = pdev->dev.of_node; if (initialized) return 0; @@ -532,4 +529,17 @@ static int __init ttc_timer_init(struct device_node *timer) return 0; } -TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init); +static const struct of_device_id ttc_timer_of_match[] = { + {.compatible = "cdns,ttc"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, ttc_timer_of_match); + +static struct platform_driver ttc_timer_driver = { + .driver = { + .name = "cdns_ttc_timer", + .of_match_table = ttc_timer_of_match, + }, +}; +builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe); diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c new file mode 100644 index 000000000000..bd63d3484838 --- /dev/null +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * 64-bit Periodic Interval Timer driver + * + * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries + * + * Author: Claudiu Beznea <claudiu.beznea@microchip.com> + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> +#include <linux/slab.h> + +#define MCHP_PIT64B_CR 0x00 /* Control Register */ +#define MCHP_PIT64B_CR_START BIT(0) +#define MCHP_PIT64B_CR_SWRST BIT(8) + +#define MCHP_PIT64B_MR 0x04 /* Mode Register */ +#define MCHP_PIT64B_MR_CONT BIT(0) +#define MCHP_PIT64B_MR_ONE_SHOT (0) +#define MCHP_PIT64B_MR_SGCLK BIT(3) +#define MCHP_PIT64B_MR_PRES GENMASK(11, 8) + +#define MCHP_PIT64B_LSB_PR 0x08 /* LSB Period Register */ + +#define MCHP_PIT64B_MSB_PR 0x0C /* MSB Period Register */ + +#define MCHP_PIT64B_IER 0x10 /* Interrupt Enable Register */ +#define MCHP_PIT64B_IER_PERIOD BIT(0) + +#define MCHP_PIT64B_ISR 0x1C /* Interrupt Status Register */ + +#define MCHP_PIT64B_TLSBR 0x20 /* Timer LSB Register */ + +#define MCHP_PIT64B_TMSBR 0x24 /* Timer MSB Register */ + +#define MCHP_PIT64B_PRES_MAX 0x10 +#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0) +#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8)) +#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8) +#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */ +#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */ + +#define MCHP_PIT64B_NAME "pit64b" + +/** + * struct mchp_pit64b_timer - PIT64B timer data structure + * @base: base address of PIT64B hardware block + * @pclk: PIT64B's peripheral clock + * @gclk: PIT64B's generic clock + * @mode: precomputed value for mode register + */ +struct mchp_pit64b_timer { + void __iomem *base; + struct clk *pclk; + struct clk *gclk; + u32 mode; +}; + +/** + * mchp_pit64b_clkevt - PIT64B clockevent data structure + * @timer: PIT64B timer + * @clkevt: clockevent + */ +struct mchp_pit64b_clkevt { + struct mchp_pit64b_timer timer; + struct clock_event_device clkevt; +}; + +#define to_mchp_pit64b_timer(x) \ + ((struct mchp_pit64b_timer *)container_of(x,\ + struct mchp_pit64b_clkevt, clkevt)) + +/* Base address for clocksource timer. */ +static void __iomem *mchp_pit64b_cs_base; +/* Default cycles for clockevent timer. */ +static u64 mchp_pit64b_ce_cycles; + +static inline u64 mchp_pit64b_cnt_read(void __iomem *base) +{ + unsigned long flags; + u32 low, high; + + raw_local_irq_save(flags); + + /* + * When using a 64 bit period TLSB must be read first, followed by the + * read of TMSB. This sequence generates an atomic read of the 64 bit + * timer value whatever the lapse of time between the accesses. + */ + low = readl_relaxed(base + MCHP_PIT64B_TLSBR); + high = readl_relaxed(base + MCHP_PIT64B_TMSBR); + + raw_local_irq_restore(flags); + + return (((u64)high << 32) | low); +} + +static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer, + u64 cycles, u32 mode, u32 irqs) +{ + u32 low, high; + + low = cycles & MCHP_PIT64B_LSBMASK; + high = cycles >> 32; + + writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + writel_relaxed(mode | timer->mode, timer->base + MCHP_PIT64B_MR); + writel_relaxed(high, timer->base + MCHP_PIT64B_MSB_PR); + writel_relaxed(low, timer->base + MCHP_PIT64B_LSB_PR); + writel_relaxed(irqs, timer->base + MCHP_PIT64B_IER); + writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR); +} + +static u64 mchp_pit64b_clksrc_read(struct clocksource *cs) +{ + return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); +} + +static u64 mchp_pit64b_sched_read_clk(void) +{ + return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); +} + +static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + + return 0; +} + +static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT, + MCHP_PIT64B_IER_PERIOD); + + return 0; +} + +static int mchp_pit64b_clkevt_set_next_event(unsigned long evt, + struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT, + MCHP_PIT64B_IER_PERIOD); + + return 0; +} + +static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + if (timer->mode & MCHP_PIT64B_MR_SGCLK) + clk_disable_unprepare(timer->gclk); + clk_disable_unprepare(timer->pclk); +} + +static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + clk_prepare_enable(timer->pclk); + if (timer->mode & MCHP_PIT64B_MR_SGCLK) + clk_prepare_enable(timer->gclk); +} + +static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id) +{ + struct mchp_pit64b_clkevt *irq_data = dev_id; + + /* Need to clear the interrupt. */ + readl_relaxed(irq_data->timer.base + MCHP_PIT64B_ISR); + + irq_data->clkevt.event_handler(&irq_data->clkevt); + + return IRQ_HANDLED; +} + +static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate, + u32 max_rate) +{ + u32 tmp; + + for (*pres = 0; *pres < MCHP_PIT64B_PRES_MAX; (*pres)++) { + tmp = clk_rate / (*pres + 1); + if (tmp <= max_rate) + break; + } + + /* Use the bigest prescaler if we didn't match one. */ + if (*pres == MCHP_PIT64B_PRES_MAX) + *pres = MCHP_PIT64B_PRES_MAX - 1; +} + +/** + * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at + * runtime; this includes prescaler and SGCLK bit + * + * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to + * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate + * could be changed via clock APIs. The chosen clock (pclk or gclk) could be + * divided by the internal PIT64B's divider. + * + * This function, first tries to use GCLK by requesting the desired rate from + * PMC and then using the internal PIT64B prescaler, if any, to reach the + * requested rate. If PCLK/GCLK < 3 (condition requested by PIT64B hardware) + * then the function falls back on using PCLK as clock source for PIT64B timer + * choosing the highest prescaler in case it doesn't locate one to match the + * requested frequency. + * + * Below is presented the PIT64B block in relation with PMC: + * + * PIT64B + * PMC +------------------------------------+ + * +----+ | +-----+ | + * | |-->gclk -->|-->| | +---------+ +-----+ | + * | | | | MUX |--->| Divider |->|timer| | + * | |-->pclk -->|-->| | +---------+ +-----+ | + * +----+ | +-----+ | + * | ^ | + * | sel | + * +------------------------------------+ + * + * Where: + * - gclk rate <= pclk rate/3 + * - gclk rate could be requested from PMC + * - pclk rate is fixed (cannot be requested from PMC) + */ +static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer, + unsigned long max_rate) +{ + unsigned long pclk_rate, diff = 0, best_diff = ULONG_MAX; + long gclk_round = 0; + u32 pres, best_pres = 0; + + pclk_rate = clk_get_rate(timer->pclk); + if (!pclk_rate) + return -EINVAL; + + timer->mode = 0; + + /* Try using GCLK. */ + gclk_round = clk_round_rate(timer->gclk, max_rate); + if (gclk_round < 0) + goto pclk; + + if (pclk_rate / gclk_round < 3) + goto pclk; + + mchp_pit64b_pres_compute(&pres, gclk_round, max_rate); + best_diff = abs(gclk_round / (pres + 1) - max_rate); + best_pres = pres; + + if (!best_diff) { + timer->mode |= MCHP_PIT64B_MR_SGCLK; + goto done; + } + +pclk: + /* Check if requested rate could be obtained using PCLK. */ + mchp_pit64b_pres_compute(&pres, pclk_rate, max_rate); + diff = abs(pclk_rate / (pres + 1) - max_rate); + + if (best_diff > diff) { + /* Use PCLK. */ + best_pres = pres; + } else { + /* Use GCLK. */ + timer->mode |= MCHP_PIT64B_MR_SGCLK; + clk_set_rate(timer->gclk, gclk_round); + } + +done: + timer->mode |= MCHP_PIT64B_PRES_TO_MODE(best_pres); + + pr_info("PIT64B: using clk=%s with prescaler %u, freq=%lu [Hz]\n", + timer->mode & MCHP_PIT64B_MR_SGCLK ? "gclk" : "pclk", best_pres, + timer->mode & MCHP_PIT64B_MR_SGCLK ? + gclk_round / (best_pres + 1) : pclk_rate / (best_pres + 1)); + + return 0; +} + +static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer, + u32 clk_rate) +{ + int ret; + + mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0); + + mchp_pit64b_cs_base = timer->base; + + ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate, + 210, 64, mchp_pit64b_clksrc_read); + if (ret) { + pr_debug("clksrc: Failed to register PIT64B clocksource!\n"); + + /* Stop timer. */ + writel_relaxed(MCHP_PIT64B_CR_SWRST, + timer->base + MCHP_PIT64B_CR); + + return ret; + } + + sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate); + + return 0; +} + +static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer, + u32 clk_rate, u32 irq) +{ + struct mchp_pit64b_clkevt *ce; + int ret; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) + return -ENOMEM; + + mchp_pit64b_ce_cycles = DIV_ROUND_CLOSEST(clk_rate, HZ); + + ce->timer.base = timer->base; + ce->timer.pclk = timer->pclk; + ce->timer.gclk = timer->gclk; + ce->timer.mode = timer->mode; + ce->clkevt.name = MCHP_PIT64B_NAME; + ce->clkevt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; + ce->clkevt.rating = 150; + ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown; + ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic; + ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event; + ce->clkevt.suspend = mchp_pit64b_clkevt_suspend; + ce->clkevt.resume = mchp_pit64b_clkevt_resume; + ce->clkevt.cpumask = cpumask_of(0); + ce->clkevt.irq = irq; + + ret = request_irq(irq, mchp_pit64b_interrupt, IRQF_TIMER, + "pit64b_tick", ce); + if (ret) { + pr_debug("clkevt: Failed to setup PIT64B IRQ\n"); + kfree(ce); + return ret; + } + + clockevents_config_and_register(&ce->clkevt, clk_rate, 1, ULONG_MAX); + + return 0; +} + +static int __init mchp_pit64b_dt_init_timer(struct device_node *node, + bool clkevt) +{ + u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ; + struct mchp_pit64b_timer timer; + unsigned long clk_rate; + u32 irq = 0; + int ret; + + /* Parse DT node. */ + timer.pclk = of_clk_get_by_name(node, "pclk"); + if (IS_ERR(timer.pclk)) + return PTR_ERR(timer.pclk); + + timer.gclk = of_clk_get_by_name(node, "gclk"); + if (IS_ERR(timer.gclk)) + return PTR_ERR(timer.gclk); + + timer.base = of_iomap(node, 0); + if (!timer.base) + return -ENXIO; + + if (clkevt) { + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + ret = -ENODEV; + goto io_unmap; + } + } + + /* Initialize mode (prescaler + SGCK bit). To be used at runtime. */ + ret = mchp_pit64b_init_mode(&timer, freq); + if (ret) + goto irq_unmap; + + ret = clk_prepare_enable(timer.pclk); + if (ret) + goto irq_unmap; + + if (timer.mode & MCHP_PIT64B_MR_SGCLK) { + ret = clk_prepare_enable(timer.gclk); + if (ret) + goto pclk_unprepare; + + clk_rate = clk_get_rate(timer.gclk); + } else { + clk_rate = clk_get_rate(timer.pclk); + } + clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1); + + if (clkevt) + ret = mchp_pit64b_init_clkevt(&timer, clk_rate, irq); + else + ret = mchp_pit64b_init_clksrc(&timer, clk_rate); + + if (ret) + goto gclk_unprepare; + + return 0; + +gclk_unprepare: + if (timer.mode & MCHP_PIT64B_MR_SGCLK) + clk_disable_unprepare(timer.gclk); +pclk_unprepare: + clk_disable_unprepare(timer.pclk); +irq_unmap: + irq_dispose_mapping(irq); +io_unmap: + iounmap(timer.base); + + return ret; +} + +static int __init mchp_pit64b_dt_init(struct device_node *node) +{ + static int inits; + + switch (inits++) { + case 0: + /* 1st request, register clockevent. */ + return mchp_pit64b_dt_init_timer(node, true); + case 1: + /* 2nd request, register clocksource. */ + return mchp_pit64b_dt_init_timer(node, false); + } + + /* The rest, don't care. */ + return -EINVAL; +} + +TIMER_OF_DECLARE(mchp_pit64b, "microchip,sam9x60-pit64b", mchp_pit64b_dt_init); diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 5394d9dbdfbc..269a994d6a99 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -780,7 +780,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) { unsigned long flags; struct omap_dm_timer *timer; - struct resource *mem, *irq; struct device *dev = &pdev->dev; const struct dmtimer_platform_data *pdata; int ret; @@ -796,24 +795,16 @@ static int omap_dm_timer_probe(struct platform_device *pdev) return -ENODEV; } - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (unlikely(!irq)) { - dev_err(dev, "%s: no IRQ resource.\n", __func__); - return -ENODEV; - } - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(!mem)) { - dev_err(dev, "%s: no memory resource.\n", __func__); - return -ENODEV; - } - timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL); if (!timer) return -ENOMEM; + timer->irq = platform_get_irq(pdev, 0); + if (timer->irq < 0) + return timer->irq; + timer->fclk = ERR_PTR(-ENODEV); - timer->io_base = devm_ioremap_resource(dev, mem); + timer->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(timer->io_base)) return PTR_ERR(timer->io_base); @@ -836,7 +827,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) if (pdata) timer->errata = pdata->timer_errata; - timer->irq = irq->start; timer->pdev = pdev; pm_runtime_enable(dev); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 77b0e5d0fb13..4f86ce2db34f 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct private_data *priv = policy->driver_data; + cpufreq_cpu_put(policy); + return brcm_avs_get_frequency(priv->base); } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8d8da763adc5..a06777c35fc0 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -39,7 +39,7 @@ static struct cppc_cpudata **all_cpu_data; struct cppc_workaround_oem_info { - char oem_id[ACPI_OEM_ID_SIZE +1]; + char oem_id[ACPI_OEM_ID_SIZE + 1]; char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; u32 oem_revision; }; @@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void) for (i = 0; i < ARRAY_SIZE(wa_info); i++) { if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && - wa_info[i].oem_revision == tbl->oem_revision) + wa_info[i].oem_revision == tbl->oem_revision) { apply_hisi_workaround = true; + break; + } } + + acpi_put_table(tbl); } /* Callback function used to retrieve the max frequency from DMI */ diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index aba591d57c67..f2ae9cd455c1 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "fsl,imx8mq", }, { .compatible = "fsl,imx8mm", }, { .compatible = "fsl,imx8mn", }, + { .compatible = "fsl,imx8mp", }, { .compatible = "marvell,armadaxp", }, diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 85a6efd6b68f..6cb8193421ea 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (ret) return ret; - if (of_machine_is_compatible("fsl,imx8mn")) + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT; else @@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (of_machine_is_compatible("fsl,imx8mm") || of_machine_is_compatible("fsl,imx8mq")) speed_grade = 1; - if (of_machine_is_compatible("fsl,imx8mn")) + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) speed_grade = 0xb; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d2fa3e9ccd97..ad6a17cf0011 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -172,7 +172,7 @@ struct vid_data { /** * struct global_params - Global parameters, mostly tunable via sysfs. * @no_turbo: Whether or not to use turbo P-states. - * @turbo_disabled: Whethet or not turbo P-states are available at all, + * @turbo_disabled: Whether or not turbo P-states are available at all, * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index cb74bdc5baaa..70ad8fe1d78b 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = { static int kirkwood_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - struct resource *res; int err; priv.dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv.base = devm_ioremap_resource(&pdev->dev, res); + priv.base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv.base)) return PTR_ERR(priv.base); diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index e9caa9586982..909f40fbcde2 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void) u32 cpu_freq; spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG(0); - LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */ + cpu_freq = readl(LOONGSON_CHIPCFG); + /* Put CPU into wait mode */ + writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG); + /* Restore CPU state */ + writel(cpu_freq, LOONGSON_CHIPCFG); spin_unlock_irqrestore(&loongson2_wait_lock, flags); local_irq_enable(); } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index fdc767fdbe6a..89d4fa8b65e9 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -445,7 +445,7 @@ static int __init pcc_cpufreq_probe(void) goto out_free; } - pcch_virt_addr = ioremap_nocache(mem_resource->minimum, + pcch_virt_addr = ioremap(mem_resource->minimum, mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 106910351c41..5c221bc90210 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; int ret; + struct cpufreq_policy *policy; mutex_lock(&cpufreq_lock); @@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, */ if (s3c_freq->is_dvs) { pr_debug("cpufreq: leave dvs on reboot\n"); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0); + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0); + cpufreq_cpu_put(policy); + if (ret < 0) return NOTIFY_BAD; } diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5d10030f2560..e84281e2561d 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret; + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0); + cpufreq_cpu_put(policy); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); if (ret < 0) return NOTIFY_BAD; diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index bcecb068b51b..2e233ad72758 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) { struct tegra186_cpufreq_data *data; struct tegra_bpmp *bpmp; - struct resource *res; unsigned int i = 0, err; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); @@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) if (IS_ERR(bpmp)) return PTR_ERR(bpmp); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) { err = PTR_ERR(data->regs); goto put_bpmp; diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index a224d33dda7f..62272ecfa771 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" - depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS + depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST depends on MCPM && !ARM64 select ARM_CPU_SUSPEND select CPU_IDLE_MULTIPLE_DRIVERS @@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE config ARM_KIRKWOOD_CPUIDLE bool "CPU Idle Driver for Marvell Kirkwood SoCs" - depends on MACH_KIRKWOOD && !ARM64 + depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64 help This adds the CPU Idle driver for Marvell Kirkwood SoCs. config ARM_ZYNQ_CPUIDLE bool "CPU Idle Driver for Xilinx Zynq processors" - depends on ARCH_ZYNQ && !ARM64 + depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Xilinx Zynq processors. @@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE config ARM_AT91_CPUIDLE bool "Cpu Idle Driver for the AT91 processors" default y - depends on ARCH_AT91 && !ARM64 + depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle for AT91 processors. config ARM_EXYNOS_CPUIDLE bool "Cpu Idle Driver for the Exynos processors" - depends on ARCH_EXYNOS && !ARM64 + depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64 select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP help Select this to enable cpuidle for Exynos processors. config ARM_MVEBU_V7_CPUIDLE bool "CPU Idle Driver for mvebu v7 family processors" - depends on ARCH_MVEBU && !ARM64 + depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Armada 370, 38x and XP processors. diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index b607278df25b..04003b90dc49 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -89,6 +89,7 @@ * @coupled_cpus: mask of cpus that are part of the coupled set * @requested_state: array of requested states for cpus in the coupled set * @ready_waiting_counts: combined count of cpus in ready or waiting loops + * @abort_barrier: synchronisation point for abort cases * @online_count: count of cpus that are online * @refcnt: reference count of cpuidle devices that are using this struct * @prevent: flag to prevent coupled idle while a cpu is hotplugging @@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu) /** * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting - * @dev: struct cpuidle_device for this cpu + * @this_cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Calls cpuidle_coupled_poke on all other online cpus. @@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu, /** * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * @@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu, /** * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Removes the requested idle state for the specified cpuidle device. @@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) /** * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed - * @cpu - this cpu + * @cpu: this cpu * * Turns on interrupts and spins until any outstanding poke interrupts have * been processed and the poke bit has been cleared. diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c index 6e36740f5719..fc22c59b6c73 100644 --- a/drivers/cpuidle/cpuidle-clps711x.c +++ b/drivers/cpuidle/cpuidle-clps711x.c @@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = { static int __init clps711x_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - clps711x_halt = devm_ioremap_resource(&pdev->dev, res); + clps711x_halt = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(clps711x_halt)) return PTR_ERR(clps711x_halt); diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c index d23d8f468c12..511c4f46027a 100644 --- a/drivers/cpuidle/cpuidle-kirkwood.c +++ b/drivers/cpuidle/cpuidle-kirkwood.c @@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = { /* Initialize CPU idle by registering the idle states */ static int kirkwood_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ddr_operation_base = devm_ioremap_resource(&pdev->dev, res); + ddr_operation_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ddr_operation_base)) return PTR_ERR(ddr_operation_base); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 33d19c8eb027..de81298051b3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns) * cpuidle_find_deepest_state - Find the deepest available idle state. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. + * @latency_limit_ns: Idle state exit latency limit + * + * Return: the index of the deepest available idle state. */ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, @@ -572,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) if (!try_module_get(drv->owner)) return -EINVAL; - for (i = 0; i < drv->state_count; i++) + for (i = 0; i < drv->state_count; i++) { if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER; + if (drv->states[i].flags & CPUIDLE_FLAG_OFF) + dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; + } + per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index ce6a5f80fb83..4070e573bf43 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) { int i; - drv->refcnt = 0; - /* * Use all possible CPUs as the default, because if the kernel boots * with some CPUs offline and then we online one of them, the CPU @@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) */ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) { - if (WARN_ON(drv->refcnt > 0)) - return; - if (drv->bctimer) { drv->bctimer = 0; on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, @@ -350,47 +345,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); /** - * cpuidle_driver_ref - get a reference to the driver. - * - * Increment the reference counter of the cpuidle driver associated with - * the current CPU. - * - * Returns a pointer to the driver, or NULL if the current CPU has no driver. - */ -struct cpuidle_driver *cpuidle_driver_ref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv) - drv->refcnt++; - - spin_unlock(&cpuidle_driver_lock); - return drv; -} - -/** - * cpuidle_driver_unref - puts down the refcount for the driver - * - * Decrement the reference counter of the cpuidle driver associated with - * the current CPU. - */ -void cpuidle_driver_unref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv && !WARN_ON(drv->refcnt <= 0)) - drv->refcnt--; - - spin_unlock(&cpuidle_driver_lock); -} - -/** * cpuidle_driver_state_disabled - Disable or enable an idle state * @drv: cpuidle driver owning the state * @idx: State index diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 38ef770be90d..cdeedbf02646 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = { /** * cpuidle_add_interface - add CPU global sysfs attributes + * @dev: the target device */ int cpuidle_add_interface(struct device *dev) { @@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev) /** * cpuidle_remove_interface - remove CPU global sysfs attributes + * @dev: the target device */ void cpuidle_remove_interface(struct device *dev) { @@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state, return size; } +static ssize_t show_state_default_status(struct cpuidle_state *state, + struct cpuidle_state_usage *state_usage, + char *buf) +{ + return sprintf(buf, "%s\n", + state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); +} + define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); define_one_state_ro(latency, show_state_exit_latency); @@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); define_one_state_ro(above, show_state_above); define_one_state_ro(below, show_state_below); +define_one_state_ro(default_status, show_state_default_status); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, @@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = { &attr_disable.attr, &attr_above.attr, &attr_below.attr, + &attr_default_status.attr, NULL }; @@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = { /** * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) { @@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) /** * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) { diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 91eb768d4221..c2767ed54dfe 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -248,15 +248,15 @@ config CRYPTO_DEV_MARVELL_CESA This driver supports CPU offload through DMA transfers. config CRYPTO_DEV_NIAGARA2 - tristate "Niagara2 Stream Processing Unit driver" - select CRYPTO_LIB_DES - select CRYPTO_SKCIPHER - select CRYPTO_HASH - select CRYPTO_MD5 - select CRYPTO_SHA1 - select CRYPTO_SHA256 - depends on SPARC64 - help + tristate "Niagara2 Stream Processing Unit driver" + select CRYPTO_LIB_DES + select CRYPTO_SKCIPHER + select CRYPTO_HASH + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + depends on SPARC64 + help Each core of a Niagara2 processor contains a Stream Processing Unit, which itself contains several cryptographic sub-units. One set provides the Modular Arithmetic Unit, @@ -357,7 +357,7 @@ config CRYPTO_DEV_OMAP depends on ARCH_OMAP2PLUS help OMAP processors have various crypto HW accelerators. Select this if - you want to use the OMAP modules for any of the crypto algorithms. + you want to use the OMAP modules for any of the crypto algorithms. if CRYPTO_DEV_OMAP @@ -430,7 +430,7 @@ config CRYPTO_DEV_SAHARA found in some Freescale i.MX chips. config CRYPTO_DEV_EXYNOS_RNG - tristate "EXYNOS HW pseudo random number generator support" + tristate "Exynos HW pseudo random number generator support" depends on ARCH_EXYNOS || COMPILE_TEST depends on HAS_IOMEM select CRYPTO_RNG @@ -618,6 +618,14 @@ config CRYPTO_DEV_QCE tristate "Qualcomm crypto engine accelerator" depends on ARCH_QCOM || COMPILE_TEST depends on HAS_IOMEM + help + This driver supports Qualcomm crypto engine accelerator + hardware. To compile this driver as a module, choose M here. The + module will be called qcrypto. + +config CRYPTO_DEV_QCE_SKCIPHER + bool + depends on CRYPTO_DEV_QCE select CRYPTO_AES select CRYPTO_LIB_DES select CRYPTO_ECB @@ -625,10 +633,57 @@ config CRYPTO_DEV_QCE select CRYPTO_XTS select CRYPTO_CTR select CRYPTO_SKCIPHER - help - This driver supports Qualcomm crypto engine accelerator - hardware. To compile this driver as a module, choose M here. The - module will be called qcrypto. + +config CRYPTO_DEV_QCE_SHA + bool + depends on CRYPTO_DEV_QCE + +choice + prompt "Algorithms enabled for QCE acceleration" + default CRYPTO_DEV_QCE_ENABLE_ALL + depends on CRYPTO_DEV_QCE + help + This option allows to choose whether to build support for all algorihtms + (default), hashes-only, or skciphers-only. + + The QCE engine does not appear to scale as well as the CPU to handle + multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the + QCE handles only 2 requests in parallel. + + Ipsec throughput seems to improve when disabling either family of + algorithms, sharing the load with the CPU. Enabling skciphers-only + appears to work best. + + config CRYPTO_DEV_QCE_ENABLE_ALL + bool "All supported algorithms" + select CRYPTO_DEV_QCE_SKCIPHER + select CRYPTO_DEV_QCE_SHA + help + Enable all supported algorithms: + - AES (CBC, CTR, ECB, XTS) + - 3DES (CBC, ECB) + - DES (CBC, ECB) + - SHA1, HMAC-SHA1 + - SHA256, HMAC-SHA256 + + config CRYPTO_DEV_QCE_ENABLE_SKCIPHER + bool "Symmetric-key ciphers only" + select CRYPTO_DEV_QCE_SKCIPHER + help + Enable symmetric-key ciphers only: + - AES (CBC, CTR, ECB, XTS) + - 3DES (ECB, CBC) + - DES (ECB, CBC) + + config CRYPTO_DEV_QCE_ENABLE_SHA + bool "Hash/HMAC only" + select CRYPTO_DEV_QCE_SHA + help + Enable hashes/HMAC algorithms only: + - SHA1, HMAC-SHA1 + - SHA256, HMAC-SHA256 + +endchoice config CRYPTO_DEV_QCOM_RNG tristate "Qualcomm Random Number Generator Driver" @@ -639,7 +694,7 @@ config CRYPTO_DEV_QCOM_RNG Generator hardware found on Qualcomm SoCs. To compile this driver as a module, choose M here. The - module will be called qcom-rng. If unsure, say N. + module will be called qcom-rng. If unsure, say N. config CRYPTO_DEV_VMX bool "Support for VMX cryptographic acceleration instructions" @@ -716,7 +771,7 @@ source "drivers/crypto/stm32/Kconfig" config CRYPTO_DEV_SAFEXCEL tristate "Inside Secure's SafeXcel cryptographic engine driver" - depends on OF || PCI || COMPILE_TEST + depends on (OF || PCI || COMPILE_TEST) && HAS_IOMEM select CRYPTO_LIB_AES select CRYPTO_AUTHENC select CRYPTO_SKCIPHER diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index cb2b0874f68f..7f22d305178e 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -541,7 +541,6 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } op->keylen = keylen; diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c index 814cd12149a9..a2b67f7f8a81 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -13,6 +13,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <crypto/scatterwalk.h> #include <linux/scatterlist.h> @@ -22,6 +23,14 @@ #include "sun4i-ss.h" +static const struct ss_variant ss_a10_variant = { + .sha1_in_be = false, +}; + +static const struct ss_variant ss_a33_variant = { + .sha1_in_be = true, +}; + static struct sun4i_ss_alg_template ss_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .mode = SS_OP_MD5, @@ -273,7 +282,7 @@ err_enable: return err; } -const struct dev_pm_ops sun4i_ss_pm_ops = { +static const struct dev_pm_ops sun4i_ss_pm_ops = { SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL) }; @@ -323,6 +332,12 @@ static int sun4i_ss_probe(struct platform_device *pdev) return PTR_ERR(ss->base); } + ss->variant = of_device_get_match_data(&pdev->dev); + if (!ss->variant) { + dev_err(&pdev->dev, "Missing Security System variant\n"); + return -EINVAL; + } + ss->ssclk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(ss->ssclk)) { err = PTR_ERR(ss->ssclk); @@ -484,7 +499,12 @@ static int sun4i_ss_remove(struct platform_device *pdev) } static const struct of_device_id a20ss_crypto_of_match_table[] = { - { .compatible = "allwinner,sun4i-a10-crypto" }, + { .compatible = "allwinner,sun4i-a10-crypto", + .data = &ss_a10_variant + }, + { .compatible = "allwinner,sun8i-a33-crypto", + .data = &ss_a33_variant + }, {} }; MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table); diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c index fdc0e6cdbb85..dc35edd90034 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c @@ -479,7 +479,10 @@ hash_final: /* Get the hash from the device */ if (op->mode == SS_OP_SHA1) { for (i = 0; i < 5; i++) { - v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4)); + if (ss->variant->sha1_in_be) + v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4)); + else + v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4)); memcpy(areq->result + i * 4, &v, 4); } } else { diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h index 60425ac75d90..2b4c6333eb67 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h @@ -131,7 +131,16 @@ #define SS_SEED_LEN 192 #define SS_DATA_LEN 160 +/* + * struct ss_variant - Describe SS hardware variant + * @sha1_in_be: The SHA1 digest is given by SS in BE, and so need to be inverted. + */ +struct ss_variant { + bool sha1_in_be; +}; + struct sun4i_ss_ctx { + const struct ss_variant *variant; void __iomem *base; int irq; struct clk *busclk; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 37d0b6c386a0..a5fd8975f3d3 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -144,11 +144,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) cet->t_sym_ctl = cpu_to_le32(sym); cet->t_asym_ctl = 0; - chan->op_mode = ce->variant->op_mode[algt->ce_blockmode]; - chan->op_dir = rctx->op_dir; - chan->method = ce->variant->alg_cipher[algt->ce_algo_id]; - chan->keylen = op->keylen; - addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE); cet->t_key = cpu_to_le32(addr_key); if (dma_mapping_error(ce->dev, addr_key)) { @@ -394,7 +389,6 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (op->key) { diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 73a7649f915d..f72346a44e69 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -555,7 +555,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) return -EINVAL; } - ce->base = devm_platform_ioremap_resource(pdev, 0);; + ce->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ce->base)) return PTR_ERR(ce->base); @@ -624,7 +624,7 @@ error_alg: error_irq: sun8i_ce_pm_exit(ce); error_pm: - sun8i_ce_free_chanlist(ce, MAXFLOW); + sun8i_ce_free_chanlist(ce, MAXFLOW - 1); return err; } @@ -638,7 +638,7 @@ static int sun8i_ce_remove(struct platform_device *pdev) debugfs_remove_recursive(ce->dbgfs_dir); #endif - sun8i_ce_free_chanlist(ce, MAXFLOW); + sun8i_ce_free_chanlist(ce, MAXFLOW - 1); sun8i_ce_pm_exit(ce); return 0; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 43db49ceafe4..8f8404c84a4d 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -131,12 +131,8 @@ struct ce_task { * @engine: ptr to the crypto_engine for this flow * @bounce_iv: buffer which contain the IV * @ivlen: size of bounce_iv - * @keylen: keylen for this flow operation * @complete: completion for the current task on this flow * @status: set to 1 by interrupt if task is done - * @method: current method for flow - * @op_dir: direction (encrypt vs decrypt) of this flow - * @op_mode: op_mode for this flow * @t_phy: Physical address of task * @tl: pointer to the current ce_task for this flow * @stat_req: number of request done by this flow @@ -145,12 +141,8 @@ struct sun8i_ce_flow { struct crypto_engine *engine; void *bounce_iv; unsigned int ivlen; - unsigned int keylen; struct completion complete; int status; - u32 method; - u32 op_dir; - u32 op_mode; dma_addr_t t_phy; int timeout; struct ce_task *tl; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index f222979a5623..84d52fc3a2da 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -390,7 +390,6 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (op->key) { @@ -416,7 +415,6 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, if (unlikely(keylen != 3 * DES_KEY_SIZE)) { dev_dbg(ss->dev, "Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 90997cc509b8..6b301afffd11 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -595,7 +595,7 @@ error_alg: error_irq: sun8i_ss_pm_exit(ss); error_pm: - sun8i_ss_free_flows(ss, MAXFLOW); + sun8i_ss_free_flows(ss, MAXFLOW - 1); return err; } @@ -609,7 +609,7 @@ static int sun8i_ss_remove(struct platform_device *pdev) debugfs_remove_recursive(ss->dbgfs_dir); #endif - sun8i_ss_free_flows(ss, MAXFLOW); + sun8i_ss_free_flows(ss, MAXFLOW - 1); sun8i_ss_pm_exit(ss); diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index a42f8619589d..f7fc0c464125 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -128,12 +128,9 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, struct dynamic_sa_ctl *sa; int rc; - if (keylen != AES_KEYSIZE_256 && - keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) { - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_128) return -EINVAL; - } /* Create SA */ if (ctx->sa_in || ctx->sa_out) @@ -292,19 +289,11 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx, const u8 *key, unsigned int keylen) { - int rc; - crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher, crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); - rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen); - crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK); - crypto_skcipher_set_flags(cipher, - crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) & - CRYPTO_TFM_RES_MASK); - - return rc; + return crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen); } int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher, @@ -379,18 +368,10 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx, const u8 *key, unsigned int keylen) { - int rc; - crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(ctx->sw_cipher.aead, crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); - rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); - crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(cipher, - crypto_aead_get_flags(ctx->sw_cipher.aead) & - CRYPTO_TFM_RES_MASK); - - return rc; + return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); } /** @@ -551,10 +532,8 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, struct dynamic_sa_ctl *sa; int rc = 0; - if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) { - crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) return -EINVAL; - } rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen); if (rc) diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 7d6b695c4ab3..981de43ea5e2 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -169,7 +169,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) int i; dev->pdr = dma_alloc_coherent(dev->core_dev->device, sizeof(struct ce_pd) * PPC4XX_NUM_PD, - &dev->pdr_pa, GFP_ATOMIC); + &dev->pdr_pa, GFP_KERNEL); if (!dev->pdr) return -ENOMEM; @@ -185,13 +185,13 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, &dev->shadow_sa_pool_pa, - GFP_ATOMIC); + GFP_KERNEL); if (!dev->shadow_sa_pool) return -ENOMEM; dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device, sizeof(struct sa_state_record) * PPC4XX_NUM_PD, - &dev->shadow_sr_pool_pa, GFP_ATOMIC); + &dev->shadow_sr_pool_pa, GFP_KERNEL); if (!dev->shadow_sr_pool) return -ENOMEM; for (i = 0; i < PPC4XX_NUM_PD; i++) { @@ -277,7 +277,7 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) { dev->gdr = dma_alloc_coherent(dev->core_dev->device, sizeof(struct ce_gd) * PPC4XX_NUM_GD, - &dev->gdr_pa, GFP_ATOMIC); + &dev->gdr_pa, GFP_KERNEL); if (!dev->gdr) return -ENOMEM; @@ -286,7 +286,8 @@ static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev) { - dma_free_coherent(dev->core_dev->device, + if (dev->gdr) + dma_free_coherent(dev->core_dev->device, sizeof(struct ce_gd) * PPC4XX_NUM_GD, dev->gdr, dev->gdr_pa); } @@ -354,20 +355,20 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) { int i; - /* alloc memory for scatter descriptor ring */ - dev->sdr = dma_alloc_coherent(dev->core_dev->device, - sizeof(struct ce_sd) * PPC4XX_NUM_SD, - &dev->sdr_pa, GFP_ATOMIC); - if (!dev->sdr) - return -ENOMEM; - dev->scatter_buffer_va = dma_alloc_coherent(dev->core_dev->device, PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, - &dev->scatter_buffer_pa, GFP_ATOMIC); + &dev->scatter_buffer_pa, GFP_KERNEL); if (!dev->scatter_buffer_va) return -ENOMEM; + /* alloc memory for scatter descriptor ring */ + dev->sdr = dma_alloc_coherent(dev->core_dev->device, + sizeof(struct ce_sd) * PPC4XX_NUM_SD, + &dev->sdr_pa, GFP_KERNEL); + if (!dev->sdr) + return -ENOMEM; + for (i = 0; i < PPC4XX_NUM_SD; i++) { dev->sdr[i].ptr = dev->scatter_buffer_pa + PPC4XX_SD_BUFFER_SIZE * i; @@ -1439,16 +1440,15 @@ static int crypto4xx_probe(struct platform_device *ofdev) spin_lock_init(&core_dev->lock); INIT_LIST_HEAD(&core_dev->dev->alg_list); ratelimit_default_init(&core_dev->dev->aead_ratelimit); + rc = crypto4xx_build_sdr(core_dev->dev); + if (rc) + goto err_build_sdr; rc = crypto4xx_build_pdr(core_dev->dev); if (rc) - goto err_build_pdr; + goto err_build_sdr; rc = crypto4xx_build_gdr(core_dev->dev); if (rc) - goto err_build_pdr; - - rc = crypto4xx_build_sdr(core_dev->dev); - if (rc) goto err_build_sdr; /* Init tasklet for bottom half processing */ @@ -1493,7 +1493,6 @@ err_iomap: err_build_sdr: crypto4xx_destroy_sdr(core_dev->dev); crypto4xx_destroy_gdr(core_dev->dev); -err_build_pdr: crypto4xx_destroy_pdr(core_dev->dev); kfree(core_dev->dev); err_alloc_dev: diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig index b90850d18965..cf9547602670 100644 --- a/drivers/crypto/amlogic/Kconfig +++ b/drivers/crypto/amlogic/Kconfig @@ -1,5 +1,6 @@ config CRYPTO_DEV_AMLOGIC_GXL tristate "Support for amlogic cryptographic offloader" + depends on HAS_IOMEM default y if ARCH_MESON select CRYPTO_SKCIPHER select CRYPTO_ENGINE diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index e589015aac1c..9819dd50fbad 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -366,7 +366,6 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, break; default: dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if (op->key) { diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index fa05fce1c0de..9d4ead2f7ebb 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -289,7 +289,7 @@ static int meson_crypto_probe(struct platform_device *pdev) error_alg: meson_unregister_algs(mc); error_flow: - meson_free_chanlist(mc, MAXFLOW); + meson_free_chanlist(mc, MAXFLOW - 1); clk_disable_unprepare(mc->busclk); return err; } @@ -304,7 +304,7 @@ static int meson_crypto_remove(struct platform_device *pdev) meson_unregister_algs(mc); - meson_free_chanlist(mc, MAXFLOW); + meson_free_chanlist(mc, MAXFLOW - 1); clk_disable_unprepare(mc->busclk); return 0; diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 91092504bc96..a6e14491e080 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/device.h> +#include <linux/dmaengine.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> @@ -37,8 +38,6 @@ #include <crypto/xts.h> #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> -#include <linux/platform_data/crypto-atmel.h> -#include <dt-bindings/dma/at91.h> #include "atmel-aes-regs.h" #include "atmel-authenc.h" @@ -89,7 +88,6 @@ struct atmel_aes_caps { bool has_dualbuff; bool has_cfb64; - bool has_ctr32; bool has_gcm; bool has_xts; bool has_authenc; @@ -122,6 +120,7 @@ struct atmel_aes_ctr_ctx { size_t offset; struct scatterlist src[2]; struct scatterlist dst[2]; + u32 blocks; }; struct atmel_aes_gcm_ctx { @@ -514,8 +513,37 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd) } } +static inline struct atmel_aes_ctr_ctx * +atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx) +{ + return container_of(ctx, struct atmel_aes_ctr_ctx, base); +} + +static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd) +{ + struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); + struct skcipher_request *req = skcipher_request_cast(dd->areq); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + int i; + + /* + * The CTR transfer works in fragments of data of maximum 1 MByte + * because of the 16 bit CTR counter embedded in the IP. When reaching + * here, ctx->blocks contains the number of blocks of the last fragment + * processed, there is no need to explicit cast it to u16. + */ + for (i = 0; i < ctx->blocks; i++) + crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE); + + memcpy(req->iv, ctx->iv, ivsize); +} + static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) { + struct skcipher_request *req = skcipher_request_cast(dd->areq); + struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req); + #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->ctx->is_aead) atmel_aes_authenc_complete(dd, err); @@ -524,8 +552,13 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) clk_disable(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; - if (!dd->ctx->is_aead) - atmel_aes_set_iv_as_last_ciphertext_block(dd); + if (!err && !dd->ctx->is_aead && + (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) { + if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR) + atmel_aes_set_iv_as_last_ciphertext_block(dd); + else + atmel_aes_ctr_update_req_iv(dd); + } if (dd->is_async) dd->areq->complete(dd->areq, err); @@ -790,7 +823,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd, int err; memset(&config, 0, sizeof(config)); - config.direction = dir; config.src_addr_width = addr_width; config.dst_addr_width = addr_width; config.src_maxburst = maxburst; @@ -830,27 +862,6 @@ static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd, return 0; } -static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd, - enum dma_transfer_direction dir) -{ - struct atmel_aes_dma *dma; - - switch (dir) { - case DMA_MEM_TO_DEV: - dma = &dd->src; - break; - - case DMA_DEV_TO_MEM: - dma = &dd->dst; - break; - - default: - return; - } - - dmaengine_terminate_all(dma->chan); -} - static int atmel_aes_dma_start(struct atmel_aes_dev *dd, struct scatterlist *src, struct scatterlist *dst, @@ -909,25 +920,18 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd, return -EINPROGRESS; output_transfer_stop: - atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); + dmaengine_terminate_sync(dd->dst.chan); unmap: atmel_aes_unmap(dd); exit: return atmel_aes_complete(dd, err); } -static void atmel_aes_dma_stop(struct atmel_aes_dev *dd) -{ - atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV); - atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); - atmel_aes_unmap(dd); -} - static void atmel_aes_dma_callback(void *data) { struct atmel_aes_dev *dd = data; - atmel_aes_dma_stop(dd); + atmel_aes_unmap(dd); dd->is_async = true; (void)dd->resume(dd); } @@ -1004,19 +1008,14 @@ static int atmel_aes_start(struct atmel_aes_dev *dd) atmel_aes_transfer_complete); } -static inline struct atmel_aes_ctr_ctx * -atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx) -{ - return container_of(ctx, struct atmel_aes_ctr_ctx, base); -} - static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) { struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); struct skcipher_request *req = skcipher_request_cast(dd->areq); struct scatterlist *src, *dst; - u32 ctr, blocks; size_t datalen; + u32 ctr; + u16 start, end; bool use_dma, fragmented = false; /* Check for transfer completion. */ @@ -1026,29 +1025,19 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) /* Compute data length. */ datalen = req->cryptlen - ctx->offset; - blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); + ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); ctr = be32_to_cpu(ctx->iv[3]); - if (dd->caps.has_ctr32) { - /* Check 32bit counter overflow. */ - u32 start = ctr; - u32 end = start + blocks - 1; - - if (end < start) { - ctr |= 0xffffffff; - datalen = AES_BLOCK_SIZE * -start; - fragmented = true; - } - } else { - /* Check 16bit counter overflow. */ - u16 start = ctr & 0xffff; - u16 end = start + (u16)blocks - 1; - - if (blocks >> 16 || end < start) { - ctr |= 0xffff; - datalen = AES_BLOCK_SIZE * (0x10000-start); - fragmented = true; - } + + /* Check 16bit counter overflow. */ + start = ctr & 0xffff; + end = start + ctx->blocks - 1; + + if (ctx->blocks >> 16 || end < start) { + ctr |= 0xffff; + datalen = AES_BLOCK_SIZE * (0x10000 - start); + fragmented = true; } + use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD); /* Jump to offset. */ @@ -1131,7 +1120,8 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode) rctx = skcipher_request_ctx(req); rctx->mode = mode; - if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) { + if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB && + !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) { unsigned int ivsize = crypto_skcipher_ivsize(skcipher); if (req->cryptlen >= ivsize) @@ -1150,10 +1140,8 @@ static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) { - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + keylen != AES_KEYSIZE_256) return -EINVAL; - } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -1275,12 +1263,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "atmel-ecb-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1292,12 +1276,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "atmel-cbc-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1310,12 +1290,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "ofb(aes)", .base.cra_driver_name = "atmel-ofb-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1328,12 +1304,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "cfb(aes)", .base.cra_driver_name = "atmel-cfb-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1346,12 +1318,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "cfb32(aes)", .base.cra_driver_name = "atmel-cfb32-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = CFB32_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1364,12 +1332,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "cfb16(aes)", .base.cra_driver_name = "atmel-cfb16-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = CFB16_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1382,12 +1346,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "cfb8(aes)", .base.cra_driver_name = "atmel-cfb8-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = CFB8_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1400,12 +1360,8 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "atmel-ctr-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_ctr_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1420,12 +1376,8 @@ static struct skcipher_alg aes_algs[] = { static struct skcipher_alg aes_cfb64_alg = { .base.cra_name = "cfb64(aes)", .base.cra_driver_name = "atmel-cfb64-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = CFB64_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .init = atmel_aes_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, @@ -1762,10 +1714,8 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, if (keylen != AES_KEYSIZE_256 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_128) { - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + keylen != AES_KEYSIZE_128) return -EINVAL; - } memcpy(ctx->key, key, keylen); ctx->keylen = keylen; @@ -1776,21 +1726,7 @@ static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - /* Same as crypto_gcm_authsize() from crypto/gcm.c */ - switch (authsize) { - case 4: - case 8: - case 12: - case 13: - case 14: - case 15: - case 16: - break; - default: - return -EINVAL; - } - - return 0; + return crypto_gcm_check_authsize(authsize); } static int atmel_aes_gcm_encrypt(struct aead_request *req) @@ -1825,12 +1761,8 @@ static struct aead_alg aes_gcm_alg = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "atmel-gcm-aes", - .cra_priority = ATMEL_AES_PRIORITY, - .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, }, }; @@ -1947,12 +1879,8 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm) static struct skcipher_alg aes_xts_alg = { .base.cra_name = "xts(aes)", .base.cra_driver_name = "atmel-xts-aes", - .base.cra_priority = ATMEL_AES_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -2113,7 +2041,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, { struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_authenc_keys keys; - u32 flags; int err; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) @@ -2123,11 +2050,9 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, goto badkey; /* Save auth key. */ - flags = crypto_aead_get_flags(tfm); err = atmel_sha_authenc_setkey(ctx->auth, keys.authkey, keys.authkeylen, - &flags); - crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK); + crypto_aead_get_flags(tfm)); if (err) { memzero_explicit(&keys, sizeof(keys)); return err; @@ -2141,7 +2066,6 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, return 0; badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -2252,12 +2176,8 @@ static struct aead_alg aes_authenc_algs[] = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes", - .cra_priority = ATMEL_AES_PRIORITY, - .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, }, }, { @@ -2272,12 +2192,8 @@ static struct aead_alg aes_authenc_algs[] = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes", - .cra_priority = ATMEL_AES_PRIORITY, - .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, }, }, { @@ -2292,12 +2208,8 @@ static struct aead_alg aes_authenc_algs[] = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes", - .cra_priority = ATMEL_AES_PRIORITY, - .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, }, }, { @@ -2312,12 +2224,8 @@ static struct aead_alg aes_authenc_algs[] = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes", - .cra_priority = ATMEL_AES_PRIORITY, - .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, }, }, { @@ -2332,12 +2240,8 @@ static struct aead_alg aes_authenc_algs[] = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes", - .cra_priority = ATMEL_AES_PRIORITY, - .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, }, }, }; @@ -2364,47 +2268,30 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) free_page((unsigned long)dd->buf); } -static bool atmel_aes_filter(struct dma_chan *chan, void *slave) +static int atmel_aes_dma_init(struct atmel_aes_dev *dd) { - struct at_dma_slave *sl = slave; - - if (sl && sl->dma_dev == chan->device->dev) { - chan->private = sl; - return true; - } else { - return false; - } -} - -static int atmel_aes_dma_init(struct atmel_aes_dev *dd, - struct crypto_platform_data *pdata) -{ - struct at_dma_slave *slave; - dma_cap_mask_t mask; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); + int ret; /* Try to grab 2 DMA channels */ - slave = &pdata->dma_slave->rxdata; - dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, - slave, dd->dev, "tx"); - if (!dd->src.chan) + dd->src.chan = dma_request_chan(dd->dev, "tx"); + if (IS_ERR(dd->src.chan)) { + ret = PTR_ERR(dd->src.chan); goto err_dma_in; + } - slave = &pdata->dma_slave->txdata; - dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, - slave, dd->dev, "rx"); - if (!dd->dst.chan) + dd->dst.chan = dma_request_chan(dd->dev, "rx"); + if (IS_ERR(dd->dst.chan)) { + ret = PTR_ERR(dd->dst.chan); goto err_dma_out; + } return 0; err_dma_out: dma_release_channel(dd->src.chan); err_dma_in: - dev_warn(dd->dev, "no DMA channel available\n"); - return -ENODEV; + dev_err(dd->dev, "no DMA channel available\n"); + return ret; } static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) @@ -2469,29 +2356,45 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) crypto_unregister_skcipher(&aes_algs[i]); } +static void atmel_aes_crypto_alg_init(struct crypto_alg *alg) +{ + alg->cra_flags = CRYPTO_ALG_ASYNC; + alg->cra_alignmask = 0xf; + alg->cra_priority = ATMEL_AES_PRIORITY; + alg->cra_module = THIS_MODULE; +} + static int atmel_aes_register_algs(struct atmel_aes_dev *dd) { int err, i, j; for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { + atmel_aes_crypto_alg_init(&aes_algs[i].base); + err = crypto_register_skcipher(&aes_algs[i]); if (err) goto err_aes_algs; } if (dd->caps.has_cfb64) { + atmel_aes_crypto_alg_init(&aes_cfb64_alg.base); + err = crypto_register_skcipher(&aes_cfb64_alg); if (err) goto err_aes_cfb64_alg; } if (dd->caps.has_gcm) { + atmel_aes_crypto_alg_init(&aes_gcm_alg.base); + err = crypto_register_aead(&aes_gcm_alg); if (err) goto err_aes_gcm_alg; } if (dd->caps.has_xts) { + atmel_aes_crypto_alg_init(&aes_xts_alg.base); + err = crypto_register_skcipher(&aes_xts_alg); if (err) goto err_aes_xts_alg; @@ -2500,6 +2403,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (dd->caps.has_authenc) { for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) { + atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base); + err = crypto_register_aead(&aes_authenc_algs[i]); if (err) goto err_aes_authenc_alg; @@ -2533,7 +2438,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) { dd->caps.has_dualbuff = 0; dd->caps.has_cfb64 = 0; - dd->caps.has_ctr32 = 0; dd->caps.has_gcm = 0; dd->caps.has_xts = 0; dd->caps.has_authenc = 0; @@ -2544,7 +2448,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) case 0x500: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; - dd->caps.has_ctr32 = 1; dd->caps.has_gcm = 1; dd->caps.has_xts = 1; dd->caps.has_authenc = 1; @@ -2553,7 +2456,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) case 0x200: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; - dd->caps.has_ctr32 = 1; dd->caps.has_gcm = 1; dd->caps.max_burst_size = 4; break; @@ -2577,65 +2479,18 @@ static const struct of_device_id atmel_aes_dt_ids[] = { { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids); - -static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct crypto_platform_data *pdata; - - if (!np) { - dev_err(&pdev->dev, "device node not found\n"); - return ERR_PTR(-EINVAL); - } - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->dma_slave = devm_kzalloc(&pdev->dev, - sizeof(*(pdata->dma_slave)), - GFP_KERNEL); - if (!pdata->dma_slave) { - devm_kfree(&pdev->dev, pdata); - return ERR_PTR(-ENOMEM); - } - - return pdata; -} -#else -static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) -{ - return ERR_PTR(-EINVAL); -} #endif static int atmel_aes_probe(struct platform_device *pdev) { struct atmel_aes_dev *aes_dd; - struct crypto_platform_data *pdata; struct device *dev = &pdev->dev; struct resource *aes_res; int err; - pdata = pdev->dev.platform_data; - if (!pdata) { - pdata = atmel_aes_of_init(pdev); - if (IS_ERR(pdata)) { - err = PTR_ERR(pdata); - goto aes_dd_err; - } - } - - if (!pdata->dma_slave) { - err = -ENXIO; - goto aes_dd_err; - } - aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL); - if (aes_dd == NULL) { - err = -ENOMEM; - goto aes_dd_err; - } + if (!aes_dd) + return -ENOMEM; aes_dd->dev = dev; @@ -2656,7 +2511,7 @@ static int atmel_aes_probe(struct platform_device *pdev) if (!aes_res) { dev_err(dev, "no MEM resource info\n"); err = -ENODEV; - goto res_err; + goto err_tasklet_kill; } aes_dd->phys_base = aes_res->start; @@ -2664,14 +2519,14 @@ static int atmel_aes_probe(struct platform_device *pdev) aes_dd->irq = platform_get_irq(pdev, 0); if (aes_dd->irq < 0) { err = aes_dd->irq; - goto res_err; + goto err_tasklet_kill; } err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes", aes_dd); if (err) { dev_err(dev, "unable to request aes irq.\n"); - goto res_err; + goto err_tasklet_kill; } /* Initializing the clock */ @@ -2679,40 +2534,40 @@ static int atmel_aes_probe(struct platform_device *pdev) if (IS_ERR(aes_dd->iclk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(aes_dd->iclk); - goto res_err; + goto err_tasklet_kill; } aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res); if (IS_ERR(aes_dd->io_base)) { dev_err(dev, "can't ioremap\n"); err = PTR_ERR(aes_dd->io_base); - goto res_err; + goto err_tasklet_kill; } err = clk_prepare(aes_dd->iclk); if (err) - goto res_err; + goto err_tasklet_kill; err = atmel_aes_hw_version_init(aes_dd); if (err) - goto iclk_unprepare; + goto err_iclk_unprepare; atmel_aes_get_cap(aes_dd); #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) { err = -EPROBE_DEFER; - goto iclk_unprepare; + goto err_iclk_unprepare; } #endif err = atmel_aes_buff_init(aes_dd); if (err) - goto err_aes_buff; + goto err_iclk_unprepare; - err = atmel_aes_dma_init(aes_dd, pdata); + err = atmel_aes_dma_init(aes_dd); if (err) - goto err_aes_dma; + goto err_buff_cleanup; spin_lock(&atmel_aes.lock); list_add_tail(&aes_dd->list, &atmel_aes.dev_list); @@ -2733,17 +2588,13 @@ err_algs: list_del(&aes_dd->list); spin_unlock(&atmel_aes.lock); atmel_aes_dma_cleanup(aes_dd); -err_aes_dma: +err_buff_cleanup: atmel_aes_buff_cleanup(aes_dd); -err_aes_buff: -iclk_unprepare: +err_iclk_unprepare: clk_unprepare(aes_dd->iclk); -res_err: +err_tasklet_kill: tasklet_kill(&aes_dd->done_task); tasklet_kill(&aes_dd->queue_task); -aes_dd_err: - if (err != -EPROBE_DEFER) - dev_err(dev, "initialization failed.\n"); return err; } diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h index d6de810df44f..c6530a1c8c20 100644 --- a/drivers/crypto/atmel-authenc.h +++ b/drivers/crypto/atmel-authenc.h @@ -30,8 +30,7 @@ unsigned int atmel_sha_authenc_get_reqsize(void); struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode); void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth); int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, - const u8 *key, unsigned int keylen, - u32 *flags); + const u8 *key, unsigned int keylen, u32 flags); int atmel_sha_authenc_schedule(struct ahash_request *req, struct atmel_sha_authenc_ctx *auth, diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 8ea0e4bcde0d..e536e2a6bbd8 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/device.h> +#include <linux/dmaengine.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> @@ -36,10 +37,11 @@ #include <crypto/sha.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> -#include <linux/platform_data/crypto-atmel.h> #include "atmel-sha-regs.h" #include "atmel-authenc.h" +#define ATMEL_SHA_PRIORITY 300 + /* SHA flags */ #define SHA_FLAGS_BUSY BIT(0) #define SHA_FLAGS_FINAL BIT(1) @@ -134,7 +136,6 @@ struct atmel_sha_dev { void __iomem *io_base; spinlock_t lock; - int err; struct tasklet_struct done_task; struct tasklet_struct queue_task; @@ -851,7 +852,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) 0, final); } -static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) +static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) { struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); @@ -870,8 +871,6 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + ctx->block_size, DMA_TO_DEVICE); } - - return 0; } static int atmel_sha_update_req(struct atmel_sha_dev *dd) @@ -1025,7 +1024,6 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) if (!(SHA_FLAGS_INIT & dd->flags)) { atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); dd->flags |= SHA_FLAGS_INIT; - dd->err = 0; } return 0; @@ -1036,9 +1034,13 @@ static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; } -static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) +static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd) { - atmel_sha_hw_init(dd); + int err; + + err = atmel_sha_hw_init(dd); + if (err) + return err; dd->hw_version = atmel_sha_get_version(dd); @@ -1046,6 +1048,8 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) "version: 0x%x\n", dd->hw_version); clk_disable(dd->iclk); + + return 0; } static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, @@ -1248,130 +1252,66 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm) return 0; } +static void atmel_sha_alg_init(struct ahash_alg *alg) +{ + alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; + alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx); + alg->halg.base.cra_module = THIS_MODULE; + alg->halg.base.cra_init = atmel_sha_cra_init; + + alg->halg.statesize = sizeof(struct atmel_sha_reqctx); + + alg->init = atmel_sha_init; + alg->update = atmel_sha_update; + alg->final = atmel_sha_final; + alg->finup = atmel_sha_finup; + alg->digest = atmel_sha_digest; + alg->export = atmel_sha_export; + alg->import = atmel_sha_import; +} + static struct ahash_alg sha_1_256_algs[] = { { - .init = atmel_sha_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .finup = atmel_sha_finup, - .digest = atmel_sha_digest, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "sha1", - .cra_driver_name = "atmel-sha1", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_cra_init, - } - } + .halg.base.cra_name = "sha1", + .halg.base.cra_driver_name = "atmel-sha1", + .halg.base.cra_blocksize = SHA1_BLOCK_SIZE, + + .halg.digestsize = SHA1_DIGEST_SIZE, }, { - .init = atmel_sha_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .finup = atmel_sha_finup, - .digest = atmel_sha_digest, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "sha256", - .cra_driver_name = "atmel-sha256", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_cra_init, - } - } + .halg.base.cra_name = "sha256", + .halg.base.cra_driver_name = "atmel-sha256", + .halg.base.cra_blocksize = SHA256_BLOCK_SIZE, + + .halg.digestsize = SHA256_DIGEST_SIZE, }, }; static struct ahash_alg sha_224_alg = { - .init = atmel_sha_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .finup = atmel_sha_finup, - .digest = atmel_sha_digest, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "sha224", - .cra_driver_name = "atmel-sha224", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_cra_init, - } - } + .halg.base.cra_name = "sha224", + .halg.base.cra_driver_name = "atmel-sha224", + .halg.base.cra_blocksize = SHA224_BLOCK_SIZE, + + .halg.digestsize = SHA224_DIGEST_SIZE, }; static struct ahash_alg sha_384_512_algs[] = { { - .init = atmel_sha_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .finup = atmel_sha_finup, - .digest = atmel_sha_digest, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA384_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "sha384", - .cra_driver_name = "atmel-sha384", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_ctx), - .cra_alignmask = 0x3, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_cra_init, - } - } + .halg.base.cra_name = "sha384", + .halg.base.cra_driver_name = "atmel-sha384", + .halg.base.cra_blocksize = SHA384_BLOCK_SIZE, + .halg.base.cra_alignmask = 0x3, + + .halg.digestsize = SHA384_DIGEST_SIZE, }, { - .init = atmel_sha_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .finup = atmel_sha_finup, - .digest = atmel_sha_digest, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA512_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "sha512", - .cra_driver_name = "atmel-sha512", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_ctx), - .cra_alignmask = 0x3, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_cra_init, - } - } + .halg.base.cra_name = "sha512", + .halg.base.cra_driver_name = "atmel-sha512", + .halg.base.cra_blocksize = SHA512_BLOCK_SIZE, + .halg.base.cra_alignmask = 0x3, + + .halg.digestsize = SHA512_DIGEST_SIZE, }, }; @@ -1395,10 +1335,6 @@ static int atmel_sha_done(struct atmel_sha_dev *dd) if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; atmel_sha_update_dma_stop(dd); - if (dd->err) { - err = dd->err; - goto finish; - } } if (SHA_FLAGS_OUTPUT_READY & dd->flags) { /* hash or semi-hash ready */ @@ -1493,7 +1429,6 @@ static void atmel_sha_dma_callback2(void *data) struct scatterlist *sg; int nents; - dmaengine_terminate_all(dma->chan); dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); sg = dma->sg; @@ -1918,12 +1853,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, { struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); - if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return 0; + return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen); } static int atmel_sha_hmac_init(struct ahash_request *req) @@ -2084,131 +2014,61 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm) atmel_sha_hmac_key_release(&hmac->hkey); } +static void atmel_sha_hmac_alg_init(struct ahash_alg *alg) +{ + alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; + alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx); + alg->halg.base.cra_module = THIS_MODULE; + alg->halg.base.cra_init = atmel_sha_hmac_cra_init; + alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit; + + alg->halg.statesize = sizeof(struct atmel_sha_reqctx); + + alg->init = atmel_sha_hmac_init; + alg->update = atmel_sha_update; + alg->final = atmel_sha_final; + alg->digest = atmel_sha_hmac_digest; + alg->setkey = atmel_sha_hmac_setkey; + alg->export = atmel_sha_export; + alg->import = atmel_sha_import; +} + static struct ahash_alg sha_hmac_algs[] = { { - .init = atmel_sha_hmac_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .digest = atmel_sha_hmac_digest, - .setkey = atmel_sha_hmac_setkey, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "hmac(sha1)", - .cra_driver_name = "atmel-hmac-sha1", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_hmac_cra_init, - .cra_exit = atmel_sha_hmac_cra_exit, - } - } + .halg.base.cra_name = "hmac(sha1)", + .halg.base.cra_driver_name = "atmel-hmac-sha1", + .halg.base.cra_blocksize = SHA1_BLOCK_SIZE, + + .halg.digestsize = SHA1_DIGEST_SIZE, }, { - .init = atmel_sha_hmac_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .digest = atmel_sha_hmac_digest, - .setkey = atmel_sha_hmac_setkey, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "hmac(sha224)", - .cra_driver_name = "atmel-hmac-sha224", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_hmac_cra_init, - .cra_exit = atmel_sha_hmac_cra_exit, - } - } + .halg.base.cra_name = "hmac(sha224)", + .halg.base.cra_driver_name = "atmel-hmac-sha224", + .halg.base.cra_blocksize = SHA224_BLOCK_SIZE, + + .halg.digestsize = SHA224_DIGEST_SIZE, }, { - .init = atmel_sha_hmac_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .digest = atmel_sha_hmac_digest, - .setkey = atmel_sha_hmac_setkey, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "hmac(sha256)", - .cra_driver_name = "atmel-hmac-sha256", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_hmac_cra_init, - .cra_exit = atmel_sha_hmac_cra_exit, - } - } + .halg.base.cra_name = "hmac(sha256)", + .halg.base.cra_driver_name = "atmel-hmac-sha256", + .halg.base.cra_blocksize = SHA256_BLOCK_SIZE, + + .halg.digestsize = SHA256_DIGEST_SIZE, }, { - .init = atmel_sha_hmac_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .digest = atmel_sha_hmac_digest, - .setkey = atmel_sha_hmac_setkey, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA384_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "hmac(sha384)", - .cra_driver_name = "atmel-hmac-sha384", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_hmac_cra_init, - .cra_exit = atmel_sha_hmac_cra_exit, - } - } + .halg.base.cra_name = "hmac(sha384)", + .halg.base.cra_driver_name = "atmel-hmac-sha384", + .halg.base.cra_blocksize = SHA384_BLOCK_SIZE, + + .halg.digestsize = SHA384_DIGEST_SIZE, }, { - .init = atmel_sha_hmac_init, - .update = atmel_sha_update, - .final = atmel_sha_final, - .digest = atmel_sha_hmac_digest, - .setkey = atmel_sha_hmac_setkey, - .export = atmel_sha_export, - .import = atmel_sha_import, - .halg = { - .digestsize = SHA512_DIGEST_SIZE, - .statesize = sizeof(struct atmel_sha_reqctx), - .base = { - .cra_name = "hmac(sha512)", - .cra_driver_name = "atmel-hmac-sha512", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = atmel_sha_hmac_cra_init, - .cra_exit = atmel_sha_hmac_cra_exit, - } - } + .halg.base.cra_name = "hmac(sha512)", + .halg.base.cra_driver_name = "atmel-hmac-sha512", + .halg.base.cra_blocksize = SHA512_BLOCK_SIZE, + + .halg.digestsize = SHA512_DIGEST_SIZE, }, }; @@ -2347,18 +2207,13 @@ void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth) EXPORT_SYMBOL_GPL(atmel_sha_authenc_free); int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, - const u8 *key, unsigned int keylen, - u32 *flags) + const u8 *key, unsigned int keylen, u32 flags) { struct crypto_ahash *tfm = auth->tfm; - int err; crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); - crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(tfm, key, keylen); - *flags = crypto_ahash_get_flags(tfm); - - return err; + crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK); + return crypto_ahash_setkey(tfm, key, keylen); } EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey); @@ -2561,12 +2416,16 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd) int err, i, j; for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { + atmel_sha_alg_init(&sha_1_256_algs[i]); + err = crypto_register_ahash(&sha_1_256_algs[i]); if (err) goto err_sha_1_256_algs; } if (dd->caps.has_sha224) { + atmel_sha_alg_init(&sha_224_alg); + err = crypto_register_ahash(&sha_224_alg); if (err) goto err_sha_224_algs; @@ -2574,6 +2433,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd) if (dd->caps.has_sha_384_512) { for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { + atmel_sha_alg_init(&sha_384_512_algs[i]); + err = crypto_register_ahash(&sha_384_512_algs[i]); if (err) goto err_sha_384_512_algs; @@ -2582,6 +2443,8 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd) if (dd->caps.has_hmac) { for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) { + atmel_sha_hmac_alg_init(&sha_hmac_algs[i]); + err = crypto_register_ahash(&sha_hmac_algs[i]); if (err) goto err_sha_hmac_algs; @@ -2608,35 +2471,14 @@ err_sha_1_256_algs: return err; } -static bool atmel_sha_filter(struct dma_chan *chan, void *slave) +static int atmel_sha_dma_init(struct atmel_sha_dev *dd) { - struct at_dma_slave *sl = slave; - - if (sl && sl->dma_dev == chan->device->dev) { - chan->private = sl; - return true; - } else { - return false; + dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); + if (IS_ERR(dd->dma_lch_in.chan)) { + dev_err(dd->dev, "DMA channel is not available\n"); + return PTR_ERR(dd->dma_lch_in.chan); } -} -static int atmel_sha_dma_init(struct atmel_sha_dev *dd, - struct crypto_platform_data *pdata) -{ - dma_cap_mask_t mask_in; - - /* Try to grab DMA channel */ - dma_cap_zero(mask_in); - dma_cap_set(DMA_SLAVE, mask_in); - - dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in, - atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); - if (!dd->dma_lch_in.chan) { - dev_warn(dd->dev, "no DMA channel available\n"); - return -ENODEV; - } - - dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + SHA_REG_DIN(0); dd->dma_lch_in.dma_conf.src_maxburst = 1; @@ -2709,49 +2551,18 @@ static const struct of_device_id atmel_sha_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); - -static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct crypto_platform_data *pdata; - - if (!np) { - dev_err(&pdev->dev, "device node not found\n"); - return ERR_PTR(-EINVAL); - } - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->dma_slave = devm_kzalloc(&pdev->dev, - sizeof(*(pdata->dma_slave)), - GFP_KERNEL); - if (!pdata->dma_slave) - return ERR_PTR(-ENOMEM); - - return pdata; -} -#else /* CONFIG_OF */ -static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev) -{ - return ERR_PTR(-EINVAL); -} #endif static int atmel_sha_probe(struct platform_device *pdev) { struct atmel_sha_dev *sha_dd; - struct crypto_platform_data *pdata; struct device *dev = &pdev->dev; struct resource *sha_res; int err; sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); - if (sha_dd == NULL) { - err = -ENOMEM; - goto sha_dd_err; - } + if (!sha_dd) + return -ENOMEM; sha_dd->dev = dev; @@ -2772,7 +2583,7 @@ static int atmel_sha_probe(struct platform_device *pdev) if (!sha_res) { dev_err(dev, "no MEM resource info\n"); err = -ENODEV; - goto res_err; + goto err_tasklet_kill; } sha_dd->phys_base = sha_res->start; @@ -2780,14 +2591,14 @@ static int atmel_sha_probe(struct platform_device *pdev) sha_dd->irq = platform_get_irq(pdev, 0); if (sha_dd->irq < 0) { err = sha_dd->irq; - goto res_err; + goto err_tasklet_kill; } err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha", sha_dd); if (err) { dev_err(dev, "unable to request sha irq.\n"); - goto res_err; + goto err_tasklet_kill; } /* Initializing the clock */ @@ -2795,41 +2606,30 @@ static int atmel_sha_probe(struct platform_device *pdev) if (IS_ERR(sha_dd->iclk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(sha_dd->iclk); - goto res_err; + goto err_tasklet_kill; } sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); if (IS_ERR(sha_dd->io_base)) { dev_err(dev, "can't ioremap\n"); err = PTR_ERR(sha_dd->io_base); - goto res_err; + goto err_tasklet_kill; } err = clk_prepare(sha_dd->iclk); if (err) - goto res_err; + goto err_tasklet_kill; - atmel_sha_hw_version_init(sha_dd); + err = atmel_sha_hw_version_init(sha_dd); + if (err) + goto err_iclk_unprepare; atmel_sha_get_cap(sha_dd); if (sha_dd->caps.has_dma) { - pdata = pdev->dev.platform_data; - if (!pdata) { - pdata = atmel_sha_of_init(pdev); - if (IS_ERR(pdata)) { - dev_err(&pdev->dev, "platform data not available\n"); - err = PTR_ERR(pdata); - goto iclk_unprepare; - } - } - if (!pdata->dma_slave) { - err = -ENXIO; - goto iclk_unprepare; - } - err = atmel_sha_dma_init(sha_dd, pdata); + err = atmel_sha_dma_init(sha_dd); if (err) - goto err_sha_dma; + goto err_iclk_unprepare; dev_info(dev, "using %s for DMA transfers\n", dma_chan_name(sha_dd->dma_lch_in.chan)); @@ -2855,14 +2655,11 @@ err_algs: spin_unlock(&atmel_sha.lock); if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); -err_sha_dma: -iclk_unprepare: +err_iclk_unprepare: clk_unprepare(sha_dd->iclk); -res_err: +err_tasklet_kill: tasklet_kill(&sha_dd->queue_task); tasklet_kill(&sha_dd->done_task); -sha_dd_err: - dev_err(dev, "initialization failed.\n"); return err; } diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 0c1f79b30fc1..ed40dbb98c6b 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/device.h> +#include <linux/dmaengine.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> @@ -30,31 +31,32 @@ #include <linux/of_device.h> #include <linux/delay.h> #include <linux/crypto.h> -#include <linux/cryptohash.h> #include <crypto/scatterwalk.h> #include <crypto/algapi.h> #include <crypto/internal/des.h> -#include <crypto/hash.h> -#include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> -#include <linux/platform_data/crypto-atmel.h> #include "atmel-tdes-regs.h" +#define ATMEL_TDES_PRIORITY 300 + /* TDES flags */ -#define TDES_FLAGS_MODE_MASK 0x00ff -#define TDES_FLAGS_ENCRYPT BIT(0) -#define TDES_FLAGS_CBC BIT(1) -#define TDES_FLAGS_CFB BIT(2) -#define TDES_FLAGS_CFB8 BIT(3) -#define TDES_FLAGS_CFB16 BIT(4) -#define TDES_FLAGS_CFB32 BIT(5) -#define TDES_FLAGS_CFB64 BIT(6) -#define TDES_FLAGS_OFB BIT(7) - -#define TDES_FLAGS_INIT BIT(16) -#define TDES_FLAGS_FAST BIT(17) -#define TDES_FLAGS_BUSY BIT(18) -#define TDES_FLAGS_DMA BIT(19) +/* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */ +#define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC +#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK) +#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB +#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC +#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB +#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b) +#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b) +#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b) +#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b) + +#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT) + +#define TDES_FLAGS_INIT BIT(3) +#define TDES_FLAGS_FAST BIT(4) +#define TDES_FLAGS_BUSY BIT(5) +#define TDES_FLAGS_DMA BIT(6) #define ATMEL_TDES_QUEUE_LENGTH 50 @@ -100,7 +102,6 @@ struct atmel_tdes_dev { int irq; unsigned long flags; - int err; spinlock_t lock; struct crypto_queue queue; @@ -189,7 +190,7 @@ static inline void atmel_tdes_write(struct atmel_tdes_dev *dd, } static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset, - u32 *value, int count) + const u32 *value, int count) { for (; count--; value++, offset += 4) atmel_tdes_write(dd, offset, *value); @@ -226,7 +227,6 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) if (!(dd->flags & TDES_FLAGS_INIT)) { atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST); dd->flags |= TDES_FLAGS_INIT; - dd->err = 0; } return 0; @@ -237,9 +237,13 @@ static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd) return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff; } -static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd) +static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd) { - atmel_tdes_hw_init(dd); + int err; + + err = atmel_tdes_hw_init(dd); + if (err) + return err; dd->hw_version = atmel_tdes_get_version(dd); @@ -247,6 +251,8 @@ static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd) "version: 0x%x\n", dd->hw_version); clk_disable_unprepare(dd->iclk); + + return 0; } static void atmel_tdes_dma_callback(void *data) @@ -260,7 +266,7 @@ static void atmel_tdes_dma_callback(void *data) static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) { int err; - u32 valcr = 0, valmr = TDES_MR_SMOD_PDC; + u32 valmr = TDES_MR_SMOD_PDC; err = atmel_tdes_hw_init(dd); @@ -282,36 +288,15 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd) valmr |= TDES_MR_TDESMOD_DES; } - if (dd->flags & TDES_FLAGS_CBC) { - valmr |= TDES_MR_OPMOD_CBC; - } else if (dd->flags & TDES_FLAGS_CFB) { - valmr |= TDES_MR_OPMOD_CFB; - - if (dd->flags & TDES_FLAGS_CFB8) - valmr |= TDES_MR_CFBS_8b; - else if (dd->flags & TDES_FLAGS_CFB16) - valmr |= TDES_MR_CFBS_16b; - else if (dd->flags & TDES_FLAGS_CFB32) - valmr |= TDES_MR_CFBS_32b; - else if (dd->flags & TDES_FLAGS_CFB64) - valmr |= TDES_MR_CFBS_64b; - } else if (dd->flags & TDES_FLAGS_OFB) { - valmr |= TDES_MR_OPMOD_OFB; - } - - if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB)) - valmr |= TDES_MR_CYPHER_ENC; + valmr |= dd->flags & TDES_FLAGS_MODE_MASK; - atmel_tdes_write(dd, TDES_CR, valcr); atmel_tdes_write(dd, TDES_MR, valmr); atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key, dd->ctx->keylen >> 2); - if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) || - (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) { + if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB) atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2); - } return 0; } @@ -397,11 +382,11 @@ static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd) free_page((unsigned long)dd->buf_in); } -static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, - dma_addr_t dma_addr_out, int length) +static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd, + dma_addr_t dma_addr_in, + dma_addr_t dma_addr_out, int length) { - struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); - struct atmel_tdes_dev *dd = ctx->dd; + struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req); int len32; dd->dma_size = length; @@ -411,12 +396,19 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, DMA_TO_DEVICE); } - if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8)) + switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) { + case TDES_FLAGS_CFB8: len32 = DIV_ROUND_UP(length, sizeof(u8)); - else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16)) + break; + + case TDES_FLAGS_CFB16: len32 = DIV_ROUND_UP(length, sizeof(u16)); - else + break; + + default: len32 = DIV_ROUND_UP(length, sizeof(u32)); + break; + } atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS); atmel_tdes_write(dd, TDES_TPR, dma_addr_in); @@ -433,13 +425,14 @@ static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, return 0; } -static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, - dma_addr_t dma_addr_out, int length) +static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd, + dma_addr_t dma_addr_in, + dma_addr_t dma_addr_out, int length) { - struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm); - struct atmel_tdes_dev *dd = ctx->dd; + struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req); struct scatterlist sg[2]; struct dma_async_tx_descriptor *in_desc, *out_desc; + enum dma_slave_buswidth addr_width; dd->dma_size = length; @@ -448,23 +441,23 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, DMA_TO_DEVICE); } - if (dd->flags & TDES_FLAGS_CFB8) { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_1_BYTE; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_1_BYTE; - } else if (dd->flags & TDES_FLAGS_CFB16) { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_2_BYTES; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_2_BYTES; - } else { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; + switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) { + case TDES_FLAGS_CFB8: + addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + + case TDES_FLAGS_CFB16: + addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + + default: + addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; } + dd->dma_lch_in.dma_conf.dst_addr_width = addr_width; + dd->dma_lch_out.dma_conf.src_addr_width = addr_width; + dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); @@ -504,8 +497,6 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd) { - struct crypto_tfm *tfm = crypto_skcipher_tfm( - crypto_skcipher_reqtfm(dd->req)); int err, fast = 0, in, out; size_t count; dma_addr_t addr_in, addr_out; @@ -561,9 +552,9 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd) dd->total -= count; if (dd->caps.has_dma) - err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count); + err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count); else - err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count); + err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count); if (err && (dd->flags & TDES_FLAGS_FAST)) { dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); @@ -600,12 +591,14 @@ atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd) static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) { struct skcipher_request *req = dd->req; + struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req); clk_disable_unprepare(dd->iclk); dd->flags &= ~TDES_FLAGS_BUSY; - atmel_tdes_set_iv_as_last_ciphertext_block(dd); + if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB) + atmel_tdes_set_iv_as_last_ciphertext_block(dd); req->base.complete(&req->base, err); } @@ -699,35 +692,44 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode) struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher); struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req); - if (mode & TDES_FLAGS_CFB8) { + switch (mode & TDES_FLAGS_OPMODE_MASK) { + case TDES_FLAGS_CFB8: if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) { pr_err("request size is not exact amount of CFB8 blocks\n"); return -EINVAL; } ctx->block_size = CFB8_BLOCK_SIZE; - } else if (mode & TDES_FLAGS_CFB16) { + break; + + case TDES_FLAGS_CFB16: if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) { pr_err("request size is not exact amount of CFB16 blocks\n"); return -EINVAL; } ctx->block_size = CFB16_BLOCK_SIZE; - } else if (mode & TDES_FLAGS_CFB32) { + break; + + case TDES_FLAGS_CFB32: if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) { pr_err("request size is not exact amount of CFB32 blocks\n"); return -EINVAL; } ctx->block_size = CFB32_BLOCK_SIZE; - } else { + break; + + default: if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) { pr_err("request size is not exact amount of DES blocks\n"); return -EINVAL; } ctx->block_size = DES_BLOCK_SIZE; + break; } rctx->mode = mode; - if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) { + if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB && + !(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) { unsigned int ivsize = crypto_skcipher_ivsize(skcipher); if (req->cryptlen >= ivsize) @@ -739,33 +741,17 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode) return atmel_tdes_handle_queue(ctx->dd, req); } -static bool atmel_tdes_filter(struct dma_chan *chan, void *slave) +static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd) { - struct at_dma_slave *sl = slave; - - if (sl && sl->dma_dev == chan->device->dev) { - chan->private = sl; - return true; - } else { - return false; - } -} - -static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, - struct crypto_platform_data *pdata) -{ - dma_cap_mask_t mask; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); + int ret; /* Try to grab 2 DMA channels */ - dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, - atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); - if (!dd->dma_lch_in.chan) + dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); + if (IS_ERR(dd->dma_lch_in.chan)) { + ret = PTR_ERR(dd->dma_lch_in.chan); goto err_dma_in; + } - dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + TDES_IDATA1R; dd->dma_lch_in.dma_conf.src_maxburst = 1; @@ -776,12 +762,12 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, DMA_SLAVE_BUSWIDTH_4_BYTES; dd->dma_lch_in.dma_conf.device_fc = false; - dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, - atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); - if (!dd->dma_lch_out.chan) + dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx"); + if (IS_ERR(dd->dma_lch_out.chan)) { + ret = PTR_ERR(dd->dma_lch_out.chan); goto err_dma_out; + } - dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + TDES_ODATA1R; dd->dma_lch_out.dma_conf.src_maxburst = 1; @@ -797,8 +783,8 @@ static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, err_dma_out: dma_release_channel(dd->dma_lch_in.chan); err_dma_in: - dev_warn(dd->dev, "no DMA channel available\n"); - return -ENODEV; + dev_err(dd->dev, "no DMA channel available\n"); + return ret; } static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) @@ -841,17 +827,17 @@ static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key, static int atmel_tdes_ecb_encrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT); + return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT); } static int atmel_tdes_ecb_decrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, 0); + return atmel_tdes_crypt(req, TDES_FLAGS_ECB); } static int atmel_tdes_cbc_encrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC); + return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT); } static int atmel_tdes_cbc_decrypt(struct skcipher_request *req) @@ -860,50 +846,47 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req) } static int atmel_tdes_cfb_encrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT); } static int atmel_tdes_cfb_decrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_CFB); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB64); } static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | - TDES_FLAGS_CFB8); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT); } static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB8); } static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | - TDES_FLAGS_CFB16); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT); } static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB16); } static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB | - TDES_FLAGS_CFB32); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT); } static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32); + return atmel_tdes_crypt(req, TDES_FLAGS_CFB32); } static int atmel_tdes_ofb_encrypt(struct skcipher_request *req) { - return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB); + return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT); } static int atmel_tdes_ofb_decrypt(struct skcipher_request *req) @@ -925,18 +908,23 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm) return 0; } +static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg) +{ + alg->base.cra_priority = ATMEL_TDES_PRIORITY; + alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), + alg->base.cra_module = THIS_MODULE; + + alg->init = atmel_tdes_init_tfm; +} + static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "ecb(des)", .base.cra_driver_name = "atmel-ecb-des", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x7, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = atmel_des_setkey, @@ -946,14 +934,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "cbc(des)", .base.cra_driver_name = "atmel-cbc-des", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x7, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -964,14 +947,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "cfb(des)", .base.cra_driver_name = "atmel-cfb-des", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x7, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -982,14 +960,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "cfb8(des)", .base.cra_driver_name = "atmel-cfb8-des", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = CFB8_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -1000,14 +973,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "cfb16(des)", .base.cra_driver_name = "atmel-cfb16-des", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = CFB16_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x1, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -1018,14 +986,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "cfb32(des)", .base.cra_driver_name = "atmel-cfb32-des", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = CFB32_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x3, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -1036,14 +999,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "ofb(des)", .base.cra_driver_name = "atmel-ofb-des", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x7, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, @@ -1054,14 +1012,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "atmel-ecb-tdes", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x7, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = atmel_tdes_setkey, @@ -1071,14 +1024,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "atmel-cbc-tdes", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x7, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = atmel_tdes_setkey, @@ -1089,14 +1037,9 @@ static struct skcipher_alg tdes_algs[] = { { .base.cra_name = "ofb(des3_ede)", .base.cra_driver_name = "atmel-ofb-tdes", - .base.cra_priority = 100, - .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx), .base.cra_alignmask = 0x7, - .base.cra_module = THIS_MODULE, - .init = atmel_tdes_init_tfm, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = atmel_tdes_setkey, @@ -1123,8 +1066,6 @@ static void atmel_tdes_done_task(unsigned long data) else err = atmel_tdes_crypt_dma_stop(dd); - err = dd->err ? : err; - if (dd->total && !err) { if (dd->flags & TDES_FLAGS_FAST) { dd->in_sg = sg_next(dd->in_sg); @@ -1173,6 +1114,8 @@ static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd) int err, i, j; for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) { + atmel_tdes_skcipher_alg_init(&tdes_algs[i]); + err = crypto_register_skcipher(&tdes_algs[i]); if (err) goto err_tdes_algs; @@ -1214,49 +1157,18 @@ static const struct of_device_id atmel_tdes_dt_ids[] = { { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids); - -static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev) -{ - struct device_node *np = pdev->dev.of_node; - struct crypto_platform_data *pdata; - - if (!np) { - dev_err(&pdev->dev, "device node not found\n"); - return ERR_PTR(-EINVAL); - } - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->dma_slave = devm_kzalloc(&pdev->dev, - sizeof(*(pdata->dma_slave)), - GFP_KERNEL); - if (!pdata->dma_slave) - return ERR_PTR(-ENOMEM); - - return pdata; -} -#else /* CONFIG_OF */ -static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev) -{ - return ERR_PTR(-EINVAL); -} #endif static int atmel_tdes_probe(struct platform_device *pdev) { struct atmel_tdes_dev *tdes_dd; - struct crypto_platform_data *pdata; struct device *dev = &pdev->dev; struct resource *tdes_res; int err; tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL); - if (tdes_dd == NULL) { - err = -ENOMEM; - goto tdes_dd_err; - } + if (!tdes_dd) + return -ENOMEM; tdes_dd->dev = dev; @@ -1277,7 +1189,7 @@ static int atmel_tdes_probe(struct platform_device *pdev) if (!tdes_res) { dev_err(dev, "no MEM resource info\n"); err = -ENODEV; - goto res_err; + goto err_tasklet_kill; } tdes_dd->phys_base = tdes_res->start; @@ -1285,14 +1197,14 @@ static int atmel_tdes_probe(struct platform_device *pdev) tdes_dd->irq = platform_get_irq(pdev, 0); if (tdes_dd->irq < 0) { err = tdes_dd->irq; - goto res_err; + goto err_tasklet_kill; } err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED, "atmel-tdes", tdes_dd); if (err) { dev_err(dev, "unable to request tdes irq.\n"); - goto res_err; + goto err_tasklet_kill; } /* Initializing the clock */ @@ -1300,41 +1212,30 @@ static int atmel_tdes_probe(struct platform_device *pdev) if (IS_ERR(tdes_dd->iclk)) { dev_err(dev, "clock initialization failed.\n"); err = PTR_ERR(tdes_dd->iclk); - goto res_err; + goto err_tasklet_kill; } tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res); if (IS_ERR(tdes_dd->io_base)) { dev_err(dev, "can't ioremap\n"); err = PTR_ERR(tdes_dd->io_base); - goto res_err; + goto err_tasklet_kill; } - atmel_tdes_hw_version_init(tdes_dd); + err = atmel_tdes_hw_version_init(tdes_dd); + if (err) + goto err_tasklet_kill; atmel_tdes_get_cap(tdes_dd); err = atmel_tdes_buff_init(tdes_dd); if (err) - goto err_tdes_buff; + goto err_tasklet_kill; if (tdes_dd->caps.has_dma) { - pdata = pdev->dev.platform_data; - if (!pdata) { - pdata = atmel_tdes_of_init(pdev); - if (IS_ERR(pdata)) { - dev_err(&pdev->dev, "platform data not available\n"); - err = PTR_ERR(pdata); - goto err_pdata; - } - } - if (!pdata->dma_slave) { - err = -ENXIO; - goto err_pdata; - } - err = atmel_tdes_dma_init(tdes_dd, pdata); + err = atmel_tdes_dma_init(tdes_dd); if (err) - goto err_tdes_dma; + goto err_buff_cleanup; dev_info(dev, "using %s, %s for DMA transfers\n", dma_chan_name(tdes_dd->dma_lch_in.chan), @@ -1359,15 +1260,11 @@ err_algs: spin_unlock(&atmel_tdes.lock); if (tdes_dd->caps.has_dma) atmel_tdes_dma_cleanup(tdes_dd); -err_tdes_dma: -err_pdata: +err_buff_cleanup: atmel_tdes_buff_cleanup(tdes_dd); -err_tdes_buff: -res_err: +err_tasklet_kill: tasklet_kill(&tdes_dd->done_task); tasklet_kill(&tdes_dd->queue_task); -tdes_dd_err: - dev_err(dev, "initialization failed.\n"); return err; } diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 4b20606983a4..fcf1effc7661 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -1249,10 +1249,8 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key, { struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base); - if (len != 16 && len != 24 && len != 32) { - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -1; - } + if (len != 16 && len != 24 && len != 32) + return -EINVAL; ctx->key_length = len; @@ -1606,8 +1604,6 @@ artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key, case 32: break; default: - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -1634,8 +1630,6 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key, case 64: break; default: - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 1564a6f8c9cb..c8b9408541a9 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -1846,7 +1846,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key, ctx->cipher_type = CIPHER_TYPE_AES256; break; default: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && @@ -2894,13 +2893,8 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); - if (ret) { + if (ret) flow_log(" fallback setkey() returned:%d\n", ret); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - (ctx->fallback_cipher->base.crt_flags & - CRYPTO_TFM_RES_MASK); - } } ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, @@ -2916,7 +2910,6 @@ badkey: ctx->authkeylen = 0; ctx->digestsize = 0; - crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -2967,13 +2960,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, tfm->crt_flags & CRYPTO_TFM_REQ_MASK; ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen + ctx->salt_len); - if (ret) { + if (ret) flow_log(" fallback setkey() returned:%d\n", ret); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - (ctx->fallback_cipher->base.crt_flags & - CRYPTO_TFM_RES_MASK); - } } ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, @@ -2992,7 +2980,6 @@ badkey: ctx->authkeylen = 0; ctx->digestsize = 0; - crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 87053e46c788..fac5b2e26610 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -130,13 +130,13 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API scatterlist crypto API to the SEC4 via job ring. config CRYPTO_DEV_FSL_CAAM_PKC_API - bool "Register public key cryptography implementations with Crypto API" - default y - select CRYPTO_RSA - help - Selecting this will allow SEC Public key support for RSA. - Supported cryptographic primitives: encryption, decryption, - signature and verification. + bool "Register public key cryptography implementations with Crypto API" + default y + select CRYPTO_RSA + help + Selecting this will allow SEC Public key support for RSA. + Supported cryptographic primitives: encryption, decryption, + signature and verification. config CRYPTO_DEV_FSL_CAAM_RNG_API bool "Register caam device for hwrng API" diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 2912006b946b..ef1a65f4fc92 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -548,10 +548,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; - if (keylen != CHACHA_KEY_SIZE + saltlen) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - } ctx->cdata.key_virt = key; ctx->cdata.keylen = keylen - saltlen; @@ -619,7 +617,6 @@ skip_split_key: memzero_explicit(&keys, sizeof(keys)); return aead_set_sh_desc(aead); badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -649,10 +646,8 @@ static int gcm_setkey(struct crypto_aead *aead, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -672,10 +667,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, int err; err = aes_check_keylen(keylen - 4); - if (err) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -700,10 +693,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, int err; err = aes_check_keylen(keylen - 4); - if (err) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -762,11 +753,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -786,11 +774,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -809,11 +794,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, ctx1_iv_off = 16; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -846,7 +828,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); dev_err(jrdev, "key size mismatch\n"); return -EINVAL; } diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 8e3449670d2f..4a29e0ef9d63 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -268,7 +268,6 @@ skip_split_key: memzero_explicit(&keys, sizeof(keys)); return ret; badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -356,10 +355,8 @@ static int gcm_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -462,10 +459,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -570,10 +565,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -644,7 +637,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); - goto badkey; + return -EINVAL; } } @@ -653,14 +646,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); - goto badkey; + return -EINVAL; } } return ret; -badkey: - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, @@ -669,11 +659,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -693,11 +680,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -716,11 +700,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, ctx1_iv_off = 16; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -748,7 +729,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { dev_err(jrdev, "key size mismatch\n"); - goto badkey; + return -EINVAL; } ctx->cdata.keylen = keylen; @@ -765,7 +746,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); - goto badkey; + return -EINVAL; } } @@ -774,14 +755,11 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); - goto badkey; + return -EINVAL; } } return ret; -badkey: - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } /* diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 3443f6d6dd83..28669cbecf77 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -313,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return aead_set_sh_desc(aead); badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -326,11 +325,11 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; + goto out; err = -EINVAL; if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; + goto out; err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?: aead_setkey(aead, key, keylen); @@ -338,10 +337,6 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, @@ -634,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; - if (keylen != CHACHA_KEY_SIZE + saltlen) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - } ctx->cdata.key_virt = key; ctx->cdata.keylen = keylen - saltlen; @@ -725,10 +718,8 @@ static int gcm_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -822,10 +813,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -923,10 +912,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, int ret; ret = aes_check_keylen(keylen - 4); - if (ret) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -992,11 +979,8 @@ static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -1016,11 +1000,8 @@ static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -1039,11 +1020,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, ctx1_iv_off = 16; err = aes_check_keylen(keylen); - if (err) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } @@ -1051,11 +1029,8 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { - if (keylen != CHACHA_KEY_SIZE) { - crypto_skcipher_set_flags(skcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != CHACHA_KEY_SIZE) return -EINVAL; - } return skcipher_setkey(skcipher, key, keylen, 0); } @@ -1084,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { dev_err(dev, "key size mismatch\n"); - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -2481,7 +2455,7 @@ static struct caam_aead_alg driver_aeads[] = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" - "hmac-sha256-cbc-desi-" + "hmac-sha256-cbc-des-" "caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, @@ -2998,15 +2972,13 @@ struct caam_hash_state { dma_addr_t buf_dma; dma_addr_t ctx_dma; int ctx_dma_len; - u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; - int buflen_0; - u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; - int buflen_1; + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + int buflen; + int next_buflen; u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); - int current_buf; }; struct caam_export_state { @@ -3018,42 +2990,17 @@ struct caam_export_state { int (*finup)(struct ahash_request *req); }; -static inline void switch_buf(struct caam_hash_state *state) -{ - state->current_buf ^= 1; -} - -static inline u8 *current_buf(struct caam_hash_state *state) -{ - return state->current_buf ? state->buf_1 : state->buf_0; -} - -static inline u8 *alt_buf(struct caam_hash_state *state) -{ - return state->current_buf ? state->buf_0 : state->buf_1; -} - -static inline int *current_buflen(struct caam_hash_state *state) -{ - return state->current_buf ? &state->buflen_1 : &state->buflen_0; -} - -static inline int *alt_buflen(struct caam_hash_state *state) -{ - return state->current_buf ? &state->buflen_0 : &state->buflen_1; -} - /* Map current buffer in state (if length > 0) and put it in link table */ static inline int buf_map_to_qm_sg(struct device *dev, struct dpaa2_sg_entry *qm_sg, struct caam_hash_state *state) { - int buflen = *current_buflen(state); + int buflen = state->buflen; if (!buflen) return 0; - state->buf_dma = dma_map_single(dev, current_buf(state), buflen, + state->buf_dma = dma_map_single(dev, state->buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(dev, state->buf_dma)) { dev_err(dev, "unable to map buf\n"); @@ -3304,7 +3251,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, return ret; bad_free_key: kfree(hashed_key); - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -3321,7 +3267,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, DMA_TO_DEVICE); if (state->buf_dma) { - dma_unmap_single(dev, state->buf_dma, *current_buflen(state), + dma_unmap_single(dev, state->buf_dma, state->buflen, DMA_TO_DEVICE); state->buf_dma = 0; } @@ -3383,9 +3329,17 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); - switch_buf(state); qi_cache_free(edesc); + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); @@ -3440,9 +3394,17 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); - switch_buf(state); qi_cache_free(edesc); + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); @@ -3464,16 +3426,14 @@ static int ahash_update_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int *buflen = current_buflen(state); - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state), last_buflen; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int in_len = *buflen + req->nbytes, to_hash; int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index; struct ahash_edesc *edesc; int ret = 0; - last_buflen = *next_buflen; *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); to_hash = in_len - *next_buflen; @@ -3524,10 +3484,6 @@ static int ahash_update_ctx(struct ahash_request *req) if (mapped_nents) { sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_src_index, 0); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); } else { dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); @@ -3566,14 +3522,11 @@ static int ahash_update_ctx(struct ahash_request *req) scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; - *next_buflen = last_buflen; - } - print_hex_dump_debug("buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump_debug("next buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -3592,7 +3545,7 @@ static int ahash_final_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; int qm_sg_bytes; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; @@ -3663,7 +3616,7 @@ static int ahash_finup_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; int qm_sg_bytes, qm_sg_src_index; int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); @@ -3852,8 +3805,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int buflen = *current_buflen(state); + u8 *buf = state->buf; + int buflen = state->buflen; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = -ENOMEM; @@ -3925,10 +3878,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int *buflen = current_buflen(state); - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state); + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int in_len = *buflen + req->nbytes, to_hash; int qm_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; @@ -3977,11 +3929,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); - edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { @@ -4029,14 +3976,11 @@ static int ahash_update_no_ctx(struct ahash_request *req) scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; - *next_buflen = 0; - } - print_hex_dump_debug("buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump_debug("next buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -4055,7 +3999,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; int qm_sg_bytes, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; @@ -4151,8 +4095,9 @@ static int ahash_update_first(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state); + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int to_hash; int src_nents, mapped_nents; struct ahash_edesc *edesc; @@ -4220,10 +4165,6 @@ static int ahash_update_first(struct ahash_request *req) dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); } - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, to_hash, - *next_buflen, 0); - state->ctx_dma_len = ctx->ctx_len; state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, ctx->ctx_len, DMA_FROM_DEVICE); @@ -4257,14 +4198,14 @@ static int ahash_update_first(struct ahash_request *req) state->update = ahash_update_no_ctx; state->finup = ahash_finup_no_ctx; state->final = ahash_final_no_ctx; - scatterwalk_map_and_copy(next_buf, req->src, 0, + scatterwalk_map_and_copy(buf, req->src, 0, req->nbytes, 0); - switch_buf(state); - } + *buflen = *next_buflen; - print_hex_dump_debug("next buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -4288,10 +4229,9 @@ static int ahash_init(struct ahash_request *req) state->ctx_dma = 0; state->ctx_dma_len = 0; - state->current_buf = 0; state->buf_dma = 0; - state->buflen_0 = 0; - state->buflen_1 = 0; + state->buflen = 0; + state->next_buflen = 0; return 0; } @@ -4321,16 +4261,8 @@ static int ahash_export(struct ahash_request *req, void *out) { struct caam_hash_state *state = ahash_request_ctx(req); struct caam_export_state *export = out; - int len; - u8 *buf; - - if (state->current_buf) { - buf = state->buf_1; - len = state->buflen_1; - } else { - buf = state->buf_0; - len = state->buflen_0; - } + u8 *buf = state->buf; + int len = state->buflen; memcpy(export->buf, buf, len); memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); @@ -4348,9 +4280,9 @@ static int ahash_import(struct ahash_request *req, const void *in) const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); - memcpy(state->buf_0, export->buf, export->buflen); + memcpy(state->buf, export->buf, export->buflen); memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); - state->buflen_0 = export->buflen; + state->buflen = export->buflen; state->update = export->update; state->final = export->final; state->finup = export->finup; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 65399cb2a770..8d9143407fc5 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -107,15 +107,13 @@ struct caam_hash_state { dma_addr_t buf_dma; dma_addr_t ctx_dma; int ctx_dma_len; - u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; - int buflen_0; - u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; - int buflen_1; + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + int buflen; + int next_buflen; u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); - int current_buf; }; struct caam_export_state { @@ -127,31 +125,6 @@ struct caam_export_state { int (*finup)(struct ahash_request *req); }; -static inline void switch_buf(struct caam_hash_state *state) -{ - state->current_buf ^= 1; -} - -static inline u8 *current_buf(struct caam_hash_state *state) -{ - return state->current_buf ? state->buf_1 : state->buf_0; -} - -static inline u8 *alt_buf(struct caam_hash_state *state) -{ - return state->current_buf ? state->buf_0 : state->buf_1; -} - -static inline int *current_buflen(struct caam_hash_state *state) -{ - return state->current_buf ? &state->buflen_1 : &state->buflen_0; -} - -static inline int *alt_buflen(struct caam_hash_state *state) -{ - return state->current_buf ? &state->buflen_0 : &state->buflen_1; -} - static inline bool is_cmac_aes(u32 algtype) { return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == @@ -183,12 +156,12 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, struct caam_hash_state *state) { - int buflen = *current_buflen(state); + int buflen = state->buflen; if (!buflen) return 0; - state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, + state->buf_dma = dma_map_single(jrdev, state->buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, state->buf_dma)) { dev_err(jrdev, "unable to map buf\n"); @@ -500,7 +473,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, return ahash_set_sh_desc(ahash); bad_free_key: kfree(hashed_key); - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -510,10 +482,8 @@ static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct device *jrdev = ctx->jrdev; - if (keylen != AES_KEYSIZE_128) { - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != AES_KEYSIZE_128) return -EINVAL; - } memcpy(ctx->key, key, keylen); dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, @@ -533,10 +503,8 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, int err; err = aes_check_keylen(keylen); - if (err) { - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (err) return err; - } /* key is immediate data for all cmac shared descriptors */ ctx->adata.key_virt = key; @@ -578,7 +546,7 @@ static inline void ahash_unmap(struct device *dev, edesc->sec4_sg_bytes, DMA_TO_DEVICE); if (state->buf_dma) { - dma_unmap_single(dev, state->buf_dma, *current_buflen(state), + dma_unmap_single(dev, state->buf_dma, state->buflen, DMA_TO_DEVICE); state->buf_dma = 0; } @@ -643,9 +611,17 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); - switch_buf(state); kfree(edesc); + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); @@ -703,9 +679,17 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); - switch_buf(state); kfree(edesc); + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); @@ -786,18 +770,16 @@ static int ahash_update_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int *buflen = current_buflen(state); - u8 *next_buf = alt_buf(state); + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int blocksize = crypto_ahash_blocksize(ahash); - int *next_buflen = alt_buflen(state), last_buflen; int in_len = *buflen + req->nbytes, to_hash; u32 *desc; int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; struct ahash_edesc *edesc; int ret = 0; - last_buflen = *next_buflen; *next_buflen = in_len & (blocksize - 1); to_hash = in_len - *next_buflen; @@ -868,10 +850,6 @@ static int ahash_update_ctx(struct ahash_request *req) sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, @@ -901,14 +879,11 @@ static int ahash_update_ctx(struct ahash_request *req) scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; - *next_buflen = last_buflen; - } - print_hex_dump_debug("buf@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, - *next_buflen, 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -925,7 +900,7 @@ static int ahash_final_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; u32 *desc; int sec4_sg_bytes; int digestsize = crypto_ahash_digestsize(ahash); @@ -991,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; u32 *desc; int sec4_sg_src_index; int src_nents, mapped_nents; @@ -1148,8 +1123,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int buflen = *current_buflen(state); + u8 *buf = state->buf; + int buflen = state->buflen; u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; @@ -1207,11 +1182,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int *buflen = current_buflen(state); + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int blocksize = crypto_ahash_blocksize(ahash); - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state); int in_len = *buflen + req->nbytes, to_hash; int sec4_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; @@ -1278,12 +1252,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); - if (*next_buflen) { - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); - } - desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, @@ -1317,14 +1285,11 @@ static int ahash_update_no_ctx(struct ahash_request *req) scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; - *next_buflen = 0; - } - print_hex_dump_debug("buf@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -1342,7 +1307,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; u32 *desc; int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); @@ -1428,8 +1393,9 @@ static int ahash_update_first(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state); + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int to_hash; int blocksize = crypto_ahash_blocksize(ahash); u32 *desc; @@ -1491,10 +1457,6 @@ static int ahash_update_first(struct ahash_request *req) if (ret) goto unmap_ctx; - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, to_hash, - *next_buflen, 0); - desc = edesc->hw_desc; ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); @@ -1517,14 +1479,14 @@ static int ahash_update_first(struct ahash_request *req) state->update = ahash_update_no_ctx; state->finup = ahash_finup_no_ctx; state->final = ahash_final_no_ctx; - scatterwalk_map_and_copy(next_buf, req->src, 0, + scatterwalk_map_and_copy(buf, req->src, 0, req->nbytes, 0); - switch_buf(state); - } + *buflen = *next_buflen; - print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -1548,10 +1510,9 @@ static int ahash_init(struct ahash_request *req) state->ctx_dma = 0; state->ctx_dma_len = 0; - state->current_buf = 0; state->buf_dma = 0; - state->buflen_0 = 0; - state->buflen_1 = 0; + state->buflen = 0; + state->next_buflen = 0; return 0; } @@ -1581,16 +1542,8 @@ static int ahash_export(struct ahash_request *req, void *out) { struct caam_hash_state *state = ahash_request_ctx(req); struct caam_export_state *export = out; - int len; - u8 *buf; - - if (state->current_buf) { - buf = state->buf_1; - len = state->buflen_1; - } else { - buf = state->buf_0; - len = state->buflen_0; - } + u8 *buf = state->buf; + int len = state->buflen; memcpy(export->buf, buf, len); memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); @@ -1608,9 +1561,9 @@ static int ahash_import(struct ahash_request *req, const void *in) const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); - memcpy(state->buf_0, export->buf, export->buflen); + memcpy(state->buf, export->buf, export->buflen); memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); - state->buflen_0 = export->buflen; + state->buflen = export->buflen; state->update = export->update; state->final = export->final; state->finup = export->finup; diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index d7c3c3805693..7139366da016 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -99,10 +99,13 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, if (ctrlpriv->virt_en == 1 || /* - * Apparently on i.MX8MQ it doesn't matter if virt_en == 1 + * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1 * and the following steps should be performed regardless */ - of_machine_is_compatible("fsl,imx8mq")) { + of_machine_is_compatible("fsl,imx8mq") || + of_machine_is_compatible("fsl,imx8mm") || + of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) { clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && @@ -508,7 +511,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, { .soc_id = "i.MX6*", .data = &caam_imx6_data }, { .soc_id = "i.MX7*", .data = &caam_imx7_data }, - { .soc_id = "i.MX8MQ", .data = &caam_imx7_data }, + { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } }; @@ -671,11 +674,9 @@ static int caam_probe(struct platform_device *pdev) of_node_put(np); if (!ctrlpriv->mc_en) - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | - MCFGR_WDENABLE | MCFGR_LARGE_BURST | - (sizeof(dma_addr_t) == sizeof(u64) ? - MCFGR_LONG_PTR : 0)); + MCFGR_WDENABLE | MCFGR_LARGE_BURST); handle_imx6_err005766(&ctrl->mcr); diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 1ad66677d88e..1be1adffff1d 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -295,8 +295,6 @@ static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key, memcpy(ctx->enc_key, key, keylen); return 0; } else { - crypto_skcipher_set_flags(cipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } } diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c index 6f80cc3b5c84..dce5423a5883 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_aead.c +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -40,10 +40,8 @@ static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, union fc_ctx_flags flags; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } /* fill crypto context */ fctx = nctx->u.fctx; diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 97af4d50d003..18088b0a2257 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -200,10 +200,8 @@ static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, int aes_keylen; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } @@ -351,10 +349,8 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, keylen /= 2; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } fctx = nctx->u.fctx; /* copy KEY2 */ @@ -382,10 +378,8 @@ static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, keylen -= CTR_RFC3686_NONCE_SIZE; aes_keylen = flexi_aes_keylen(keylen); - if (aes_keylen < 0) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (aes_keylen < 0) return -EINVAL; - } return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 6b86f1e6d634..db362fe472ea 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -8,7 +8,9 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-dmaengine.o ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o ccp-$(CONFIG_PCI) += sp-pci.o -ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o +ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ + sev-dev.o \ + tee-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 32f19f402073..5eba7ee49e81 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -276,7 +276,6 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, ctx->u.aes.type = CCP_AES_TYPE_256; break; default: - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx->u.aes.mode = alg->mode; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c index ff50ee80d223..9e8f07c1afac 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c @@ -42,7 +42,6 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, ctx->u.aes.type = CCP_AES_TYPE_256; break; default: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index 33328a153225..51e12fbd1159 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -51,7 +51,6 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->u.aes.type = CCP_AES_TYPE_256; break; default: - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx->u.aes.mode = alg->mode; diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 453b9797f93f..474e6f1a6a84 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -293,10 +293,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, ret = crypto_shash_digest(sdesc, key, key_len, ctx->u.sha.key); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return -EINVAL; - } key_len = digest_size; } else { diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 0186b3df4c87..0d5576f6ad21 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = { .setup = NULL, .perform = &ccp3_actions, .offset = 0, + .rsamax = CCP_RSA_MAX_WIDTH, }; const struct ccp_vdata ccpv3 = { diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 7ca2d3408e7a..e95e7aa5dbf1 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -2,57 +2,20 @@ /* * AMD Platform Security Processor (PSP) interface * - * Copyright (C) 2016,2018 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <brijesh.singh@amd.com> */ -#include <linux/module.h> #include <linux/kernel.h> -#include <linux/kthread.h> -#include <linux/sched.h> -#include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/spinlock_types.h> -#include <linux/types.h> -#include <linux/mutex.h> -#include <linux/delay.h> -#include <linux/hw_random.h> -#include <linux/ccp.h> -#include <linux/firmware.h> - -#include <asm/smp.h> +#include <linux/irqreturn.h> #include "sp-dev.h" #include "psp-dev.h" +#include "sev-dev.h" +#include "tee-dev.h" -#define DEVICE_NAME "sev" -#define SEV_FW_FILE "amd/sev.fw" -#define SEV_FW_NAME_SIZE 64 - -static DEFINE_MUTEX(sev_cmd_mutex); -static struct sev_misc_dev *misc_dev; -static struct psp_device *psp_master; - -static int psp_cmd_timeout = 100; -module_param(psp_cmd_timeout, int, 0644); -MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); - -static int psp_probe_timeout = 5; -module_param(psp_probe_timeout, int, 0644); -MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); - -static bool psp_dead; -static int psp_timeout; - -static inline bool sev_version_greater_or_equal(u8 maj, u8 min) -{ - if (psp_master->api_major > maj) - return true; - if (psp_master->api_major == maj && psp_master->api_minor >= min) - return true; - return false; -} +struct psp_device *psp_master; static struct psp_device *psp_alloc_struct(struct sp_device *sp) { @@ -75,902 +38,95 @@ static irqreturn_t psp_irq_handler(int irq, void *data) { struct psp_device *psp = data; unsigned int status; - int reg; /* Read the interrupt status: */ status = ioread32(psp->io_regs + psp->vdata->intsts_reg); - /* Check if it is command completion: */ - if (!(status & PSP_CMD_COMPLETE)) - goto done; + /* invoke subdevice interrupt handlers */ + if (status) { + if (psp->sev_irq_handler) + psp->sev_irq_handler(irq, psp->sev_irq_data, status); - /* Check if it is SEV command completion: */ - reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); - if (reg & PSP_CMDRESP_RESP) { - psp->sev_int_rcvd = 1; - wake_up(&psp->sev_int_queue); + if (psp->tee_irq_handler) + psp->tee_irq_handler(irq, psp->tee_irq_data, status); } -done: /* Clear the interrupt status by writing the same value we read. */ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); return IRQ_HANDLED; } -static int sev_wait_cmd_ioc(struct psp_device *psp, - unsigned int *reg, unsigned int timeout) -{ - int ret; - - ret = wait_event_timeout(psp->sev_int_queue, - psp->sev_int_rcvd, timeout * HZ); - if (!ret) - return -ETIMEDOUT; - - *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); - - return 0; -} - -static int sev_cmd_buffer_len(int cmd) -{ - switch (cmd) { - case SEV_CMD_INIT: return sizeof(struct sev_data_init); - case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); - case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); - case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); - case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); - case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); - case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); - case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); - case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); - case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); - case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); - case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); - case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); - case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); - case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); - case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); - case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); - case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); - case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); - case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); - case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); - case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); - case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); - case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); - case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); - case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); - case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); - default: return 0; - } - - return 0; -} - -static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) -{ - struct psp_device *psp = psp_master; - unsigned int phys_lsb, phys_msb; - unsigned int reg, ret = 0; - - if (!psp) - return -ENODEV; - - if (psp_dead) - return -EBUSY; - - /* Get the physical address of the command buffer */ - phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; - phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; - - dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", - cmd, phys_msb, phys_lsb, psp_timeout); - - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); - - iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg); - iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg); - - psp->sev_int_rcvd = 0; - - reg = cmd; - reg <<= PSP_CMDRESP_CMD_SHIFT; - reg |= PSP_CMDRESP_IOC; - iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); - - /* wait for command completion */ - ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd); - psp_dead = true; - - return ret; - } - - psp_timeout = psp_cmd_timeout; - - if (psp_ret) - *psp_ret = reg & PSP_CMDRESP_ERR_MASK; - - if (reg & PSP_CMDRESP_ERR_MASK) { - dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n", - cmd, reg & PSP_CMDRESP_ERR_MASK); - ret = -EIO; - } - - print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); - - return ret; -} - -static int sev_do_cmd(int cmd, void *data, int *psp_ret) -{ - int rc; - - mutex_lock(&sev_cmd_mutex); - rc = __sev_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(&sev_cmd_mutex); - - return rc; -} - -static int __sev_platform_init_locked(int *error) -{ - struct psp_device *psp = psp_master; - int rc = 0; - - if (!psp) - return -ENODEV; - - if (psp->sev_state == SEV_STATE_INIT) - return 0; - - rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error); - if (rc) - return rc; - - psp->sev_state = SEV_STATE_INIT; - - /* Prepare for first SEV guest launch after INIT */ - wbinvd_on_all_cpus(); - rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); - if (rc) - return rc; - - dev_dbg(psp->dev, "SEV firmware initialized\n"); - - return rc; -} - -int sev_platform_init(int *error) -{ - int rc; - - mutex_lock(&sev_cmd_mutex); - rc = __sev_platform_init_locked(error); - mutex_unlock(&sev_cmd_mutex); - - return rc; -} -EXPORT_SYMBOL_GPL(sev_platform_init); - -static int __sev_platform_shutdown_locked(int *error) -{ - int ret; - - ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); - if (ret) - return ret; - - psp_master->sev_state = SEV_STATE_UNINIT; - dev_dbg(psp_master->dev, "SEV firmware shutdown\n"); - - return ret; -} - -static int sev_platform_shutdown(int *error) +static unsigned int psp_get_capability(struct psp_device *psp) { - int rc; - - mutex_lock(&sev_cmd_mutex); - rc = __sev_platform_shutdown_locked(NULL); - mutex_unlock(&sev_cmd_mutex); - - return rc; -} - -static int sev_get_platform_state(int *state, int *error) -{ - int rc; - - rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, - &psp_master->status_cmd_buf, error); - if (rc) - return rc; - - *state = psp_master->status_cmd_buf.state; - return rc; -} - -static int sev_ioctl_do_reset(struct sev_issue_cmd *argp) -{ - int state, rc; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); /* - * The SEV spec requires that FACTORY_RESET must be issued in - * UNINIT state. Before we go further lets check if any guest is - * active. - * - * If FW is in WORKING state then deny the request otherwise issue - * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. - * - */ - rc = sev_get_platform_state(&state, &argp->error); - if (rc) - return rc; - - if (state == SEV_STATE_WORKING) - return -EBUSY; - - if (state == SEV_STATE_INIT) { - rc = __sev_platform_shutdown_locked(&argp->error); - if (rc) - return rc; - } - - return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); -} - -static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) -{ - struct sev_user_data_status *data = &psp_master->status_cmd_buf; - int ret; - - ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error); - if (ret) - return ret; - - if (copy_to_user((void __user *)argp->data, data, sizeof(*data))) - ret = -EFAULT; - - return ret; -} - -static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp) -{ - int rc; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (psp_master->sev_state == SEV_STATE_UNINIT) { - rc = __sev_platform_init_locked(&argp->error); - if (rc) - return rc; - } - - return __sev_do_cmd_locked(cmd, NULL, &argp->error); -} - -static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) -{ - struct sev_user_data_pek_csr input; - struct sev_data_pek_csr *data; - void *blob = NULL; - int ret; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) - return -EFAULT; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - /* userspace wants to query CSR length */ - if (!input.address || !input.length) - goto cmd; - - /* allocate a physically contiguous buffer to store the CSR blob */ - if (!access_ok(input.address, input.length) || - input.length > SEV_FW_BLOB_MAX_SIZE) { - ret = -EFAULT; - goto e_free; - } - - blob = kmalloc(input.length, GFP_KERNEL); - if (!blob) { - ret = -ENOMEM; - goto e_free; - } - - data->address = __psp_pa(blob); - data->len = input.length; - -cmd: - if (psp_master->sev_state == SEV_STATE_UNINIT) { - ret = __sev_platform_init_locked(&argp->error); - if (ret) - goto e_free_blob; - } - - ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error); - - /* If we query the CSR length, FW responded with expected data. */ - input.length = data->len; - - if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { - ret = -EFAULT; - goto e_free_blob; - } - - if (blob) { - if (copy_to_user((void __user *)input.address, blob, input.length)) - ret = -EFAULT; - } - -e_free_blob: - kfree(blob); -e_free: - kfree(data); - return ret; -} - -void *psp_copy_user_blob(u64 __user uaddr, u32 len) -{ - if (!uaddr || !len) - return ERR_PTR(-EINVAL); - - /* verify that blob length does not exceed our limit */ - if (len > SEV_FW_BLOB_MAX_SIZE) - return ERR_PTR(-EINVAL); - - return memdup_user((void __user *)(uintptr_t)uaddr, len); -} -EXPORT_SYMBOL_GPL(psp_copy_user_blob); - -static int sev_get_api_version(void) -{ - struct sev_user_data_status *status; - int error = 0, ret; - - status = &psp_master->status_cmd_buf; - ret = sev_platform_status(status, &error); - if (ret) { - dev_err(psp_master->dev, - "SEV: failed to get status. Error: %#x\n", error); - return 1; - } - - psp_master->api_major = status->api_major; - psp_master->api_minor = status->api_minor; - psp_master->build = status->build; - psp_master->sev_state = status->state; - - return 0; -} - -static int sev_get_firmware(struct device *dev, - const struct firmware **firmware) -{ - char fw_name_specific[SEV_FW_NAME_SIZE]; - char fw_name_subset[SEV_FW_NAME_SIZE]; - - snprintf(fw_name_specific, sizeof(fw_name_specific), - "amd/amd_sev_fam%.2xh_model%.2xh.sbin", - boot_cpu_data.x86, boot_cpu_data.x86_model); - - snprintf(fw_name_subset, sizeof(fw_name_subset), - "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", - boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); - - /* Check for SEV FW for a particular model. - * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h - * - * or - * - * Check for SEV FW common to a subset of models. - * Ex. amd_sev_fam17h_model0xh.sbin for - * Family 17h Model 00h -- Family 17h Model 0Fh - * - * or - * - * Fall-back to using generic name: sev.fw + * Check for a access to the registers. If this read returns + * 0xffffffff, it's likely that the system is running a broken + * BIOS which disallows access to the device. Stop here and + * fail the PSP initialization (but not the load, as the CCP + * could get properly initialized). */ - if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || - (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || - (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) + if (val == 0xffffffff) { + dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n"); return 0; - - return -ENOENT; -} - -/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ -static int sev_update_firmware(struct device *dev) -{ - struct sev_data_download_firmware *data; - const struct firmware *firmware; - int ret, error, order; - struct page *p; - u64 data_size; - - if (sev_get_firmware(dev, &firmware) == -ENOENT) { - dev_dbg(dev, "No SEV firmware file present\n"); - return -1; - } - - /* - * SEV FW expects the physical address given to it to be 32 - * byte aligned. Memory allocated has structure placed at the - * beginning followed by the firmware being passed to the SEV - * FW. Allocate enough memory for data structure + alignment - * padding + SEV FW. - */ - data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); - - order = get_order(firmware->size + data_size); - p = alloc_pages(GFP_KERNEL, order); - if (!p) { - ret = -1; - goto fw_err; } - /* - * Copy firmware data to a kernel allocated contiguous - * memory region. - */ - data = page_address(p); - memcpy(page_address(p) + data_size, firmware->data, firmware->size); - - data->address = __psp_pa(page_address(p) + data_size); - data->len = firmware->size; - - ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); - if (ret) - dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); - else - dev_info(dev, "SEV firmware update successful\n"); - - __free_pages(p, order); - -fw_err: - release_firmware(firmware); - - return ret; + return val; } -static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp) +static int psp_check_sev_support(struct psp_device *psp, + unsigned int capability) { - struct sev_user_data_pek_cert_import input; - struct sev_data_pek_cert_import *data; - void *pek_blob, *oca_blob; - int ret; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) - return -EFAULT; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - /* copy PEK certificate blobs from userspace */ - pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); - if (IS_ERR(pek_blob)) { - ret = PTR_ERR(pek_blob); - goto e_free; - } - - data->pek_cert_address = __psp_pa(pek_blob); - data->pek_cert_len = input.pek_cert_len; - - /* copy PEK certificate blobs from userspace */ - oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); - if (IS_ERR(oca_blob)) { - ret = PTR_ERR(oca_blob); - goto e_free_pek; - } - - data->oca_cert_address = __psp_pa(oca_blob); - data->oca_cert_len = input.oca_cert_len; - - /* If platform is not in INIT state then transition it to INIT */ - if (psp_master->sev_state != SEV_STATE_INIT) { - ret = __sev_platform_init_locked(&argp->error); - if (ret) - goto e_free_oca; + /* Check if device supports SEV feature */ + if (!(capability & 1)) { + dev_dbg(psp->dev, "psp does not support SEV\n"); + return -ENODEV; } - ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error); - -e_free_oca: - kfree(oca_blob); -e_free_pek: - kfree(pek_blob); -e_free: - kfree(data); - return ret; + return 0; } -static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) +static int psp_check_tee_support(struct psp_device *psp, + unsigned int capability) { - struct sev_user_data_get_id2 input; - struct sev_data_get_id *data; - void *id_blob = NULL; - int ret; - - /* SEV GET_ID is available from SEV API v0.16 and up */ - if (!sev_version_greater_or_equal(0, 16)) - return -ENOTSUPP; - - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) - return -EFAULT; - - /* Check if we have write access to the userspace buffer */ - if (input.address && - input.length && - !access_ok(input.address, input.length)) - return -EFAULT; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - if (input.address && input.length) { - id_blob = kmalloc(input.length, GFP_KERNEL); - if (!id_blob) { - kfree(data); - return -ENOMEM; - } - - data->address = __psp_pa(id_blob); - data->len = input.length; - } - - ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); - - /* - * Firmware will return the length of the ID value (either the minimum - * required length or the actual length written), return it to the user. - */ - input.length = data->len; - - if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { - ret = -EFAULT; - goto e_free; - } - - if (id_blob) { - if (copy_to_user((void __user *)input.address, - id_blob, data->len)) { - ret = -EFAULT; - goto e_free; - } + /* Check if device supports TEE feature */ + if (!(capability & 2)) { + dev_dbg(psp->dev, "psp does not support TEE\n"); + return -ENODEV; } -e_free: - kfree(id_blob); - kfree(data); - - return ret; + return 0; } -static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) +static int psp_check_support(struct psp_device *psp, + unsigned int capability) { - struct sev_data_get_id *data; - u64 data_size, user_size; - void *id_blob, *mem; - int ret; + int sev_support = psp_check_sev_support(psp, capability); + int tee_support = psp_check_tee_support(psp, capability); - /* SEV GET_ID available from SEV API v0.16 and up */ - if (!sev_version_greater_or_equal(0, 16)) - return -ENOTSUPP; - - /* SEV FW expects the buffer it fills with the ID to be - * 8-byte aligned. Memory allocated should be enough to - * hold data structure + alignment padding + memory - * where SEV FW writes the ID. - */ - data_size = ALIGN(sizeof(struct sev_data_get_id), 8); - user_size = sizeof(struct sev_user_data_get_id); - - mem = kzalloc(data_size + user_size, GFP_KERNEL); - if (!mem) - return -ENOMEM; - - data = mem; - id_blob = mem + data_size; - - data->address = __psp_pa(id_blob); - data->len = user_size; - - ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); - if (!ret) { - if (copy_to_user((void __user *)argp->data, id_blob, data->len)) - ret = -EFAULT; - } - - kfree(mem); + /* Return error if device neither supports SEV nor TEE */ + if (sev_support && tee_support) + return -ENODEV; - return ret; + return 0; } -static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp) +static int psp_init(struct psp_device *psp, unsigned int capability) { - struct sev_user_data_pdh_cert_export input; - void *pdh_blob = NULL, *cert_blob = NULL; - struct sev_data_pdh_cert_export *data; int ret; - /* If platform is not in INIT state then transition it to INIT. */ - if (psp_master->sev_state != SEV_STATE_INIT) { - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - ret = __sev_platform_init_locked(&argp->error); + if (!psp_check_sev_support(psp, capability)) { + ret = sev_dev_init(psp); if (ret) return ret; } - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) - return -EFAULT; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - /* Userspace wants to query the certificate length. */ - if (!input.pdh_cert_address || - !input.pdh_cert_len || - !input.cert_chain_address) - goto cmd; - - /* Allocate a physically contiguous buffer to store the PDH blob. */ - if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) || - !access_ok(input.pdh_cert_address, input.pdh_cert_len)) { - ret = -EFAULT; - goto e_free; - } - - /* Allocate a physically contiguous buffer to store the cert chain blob. */ - if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) || - !access_ok(input.cert_chain_address, input.cert_chain_len)) { - ret = -EFAULT; - goto e_free; - } - - pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); - if (!pdh_blob) { - ret = -ENOMEM; - goto e_free; - } - - data->pdh_cert_address = __psp_pa(pdh_blob); - data->pdh_cert_len = input.pdh_cert_len; - - cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); - if (!cert_blob) { - ret = -ENOMEM; - goto e_free_pdh; - } - - data->cert_chain_address = __psp_pa(cert_blob); - data->cert_chain_len = input.cert_chain_len; - -cmd: - ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error); - - /* If we query the length, FW responded with expected data. */ - input.cert_chain_len = data->cert_chain_len; - input.pdh_cert_len = data->pdh_cert_len; - - if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { - ret = -EFAULT; - goto e_free_cert; - } - - if (pdh_blob) { - if (copy_to_user((void __user *)input.pdh_cert_address, - pdh_blob, input.pdh_cert_len)) { - ret = -EFAULT; - goto e_free_cert; - } - } - - if (cert_blob) { - if (copy_to_user((void __user *)input.cert_chain_address, - cert_blob, input.cert_chain_len)) - ret = -EFAULT; - } - -e_free_cert: - kfree(cert_blob); -e_free_pdh: - kfree(pdh_blob); -e_free: - kfree(data); - return ret; -} - -static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) -{ - void __user *argp = (void __user *)arg; - struct sev_issue_cmd input; - int ret = -EFAULT; - - if (!psp_master) - return -ENODEV; - - if (ioctl != SEV_ISSUE_CMD) - return -EINVAL; - - if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) - return -EFAULT; - - if (input.cmd > SEV_MAX) - return -EINVAL; - - mutex_lock(&sev_cmd_mutex); - - switch (input.cmd) { - - case SEV_FACTORY_RESET: - ret = sev_ioctl_do_reset(&input); - break; - case SEV_PLATFORM_STATUS: - ret = sev_ioctl_do_platform_status(&input); - break; - case SEV_PEK_GEN: - ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input); - break; - case SEV_PDH_GEN: - ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input); - break; - case SEV_PEK_CSR: - ret = sev_ioctl_do_pek_csr(&input); - break; - case SEV_PEK_CERT_IMPORT: - ret = sev_ioctl_do_pek_import(&input); - break; - case SEV_PDH_CERT_EXPORT: - ret = sev_ioctl_do_pdh_export(&input); - break; - case SEV_GET_ID: - pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); - ret = sev_ioctl_do_get_id(&input); - break; - case SEV_GET_ID2: - ret = sev_ioctl_do_get_id2(&input); - break; - default: - ret = -EINVAL; - goto out; - } - - if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) - ret = -EFAULT; -out: - mutex_unlock(&sev_cmd_mutex); - - return ret; -} - -static const struct file_operations sev_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = sev_ioctl, -}; - -int sev_platform_status(struct sev_user_data_status *data, int *error) -{ - return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); -} -EXPORT_SYMBOL_GPL(sev_platform_status); - -int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) -{ - return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); -} -EXPORT_SYMBOL_GPL(sev_guest_deactivate); - -int sev_guest_activate(struct sev_data_activate *data, int *error) -{ - return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); -} -EXPORT_SYMBOL_GPL(sev_guest_activate); - -int sev_guest_decommission(struct sev_data_decommission *data, int *error) -{ - return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); -} -EXPORT_SYMBOL_GPL(sev_guest_decommission); - -int sev_guest_df_flush(int *error) -{ - return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); -} -EXPORT_SYMBOL_GPL(sev_guest_df_flush); - -static void sev_exit(struct kref *ref) -{ - struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount); - - misc_deregister(&misc_dev->misc); -} - -static int sev_misc_init(struct psp_device *psp) -{ - struct device *dev = psp->dev; - int ret; - - /* - * SEV feature support can be detected on multiple devices but the SEV - * FW commands must be issued on the master. During probe, we do not - * know the master hence we create /dev/sev on the first device probe. - * sev_do_cmd() finds the right master device to which to issue the - * command to the firmware. - */ - if (!misc_dev) { - struct miscdevice *misc; - - misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL); - if (!misc_dev) - return -ENOMEM; - - misc = &misc_dev->misc; - misc->minor = MISC_DYNAMIC_MINOR; - misc->name = DEVICE_NAME; - misc->fops = &sev_fops; - - ret = misc_register(misc); + if (!psp_check_tee_support(psp, capability)) { + ret = tee_dev_init(psp); if (ret) return ret; - - kref_init(&misc_dev->refcount); - } else { - kref_get(&misc_dev->refcount); - } - - init_waitqueue_head(&psp->sev_int_queue); - psp->sev_misc = misc_dev; - dev_dbg(dev, "registered SEV device\n"); - - return 0; -} - -static int psp_check_sev_support(struct psp_device *psp) -{ - unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); - - /* - * Check for a access to the registers. If this read returns - * 0xffffffff, it's likely that the system is running a broken - * BIOS which disallows access to the device. Stop here and - * fail the PSP initialization (but not the load, as the CCP - * could get properly initialized). - */ - if (val == 0xffffffff) { - dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n"); - return -ENODEV; - } - - if (!(val & 1)) { - /* Device does not support the SEV feature */ - dev_dbg(psp->dev, "psp does not support SEV\n"); - return -ENODEV; } return 0; @@ -980,6 +136,7 @@ int psp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; struct psp_device *psp; + unsigned int capability; int ret; ret = -ENOMEM; @@ -998,7 +155,11 @@ int psp_dev_init(struct sp_device *sp) psp->io_regs = sp->io_map; - ret = psp_check_sev_support(psp); + capability = psp_get_capability(psp); + if (!capability) + goto e_disable; + + ret = psp_check_support(psp, capability); if (ret) goto e_disable; @@ -1013,7 +174,7 @@ int psp_dev_init(struct sp_device *sp) goto e_err; } - ret = sev_misc_init(psp); + ret = psp_init(psp, capability); if (ret) goto e_irq; @@ -1049,83 +210,52 @@ void psp_dev_destroy(struct sp_device *sp) if (!psp) return; - if (psp->sev_misc) - kref_put(&misc_dev->refcount, sev_exit); + sev_dev_destroy(psp); + + tee_dev_destroy(psp); sp_free_psp_irq(sp, psp); } -int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, - void *data, int *error) +void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, + void *data) { - if (!filep || filep->f_op != &sev_fops) - return -EBADF; - - return sev_do_cmd(cmd, data, error); + psp->sev_irq_data = data; + psp->sev_irq_handler = handler; } -EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); -void psp_pci_init(void) +void psp_clear_sev_irq_handler(struct psp_device *psp) { - struct sp_device *sp; - int error, rc; - - sp = sp_get_psp_master_device(); - if (!sp) - return; - - psp_master = sp->psp_data; - - psp_timeout = psp_probe_timeout; + psp_set_sev_irq_handler(psp, NULL, NULL); +} - if (sev_get_api_version()) - goto err; +void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, + void *data) +{ + psp->tee_irq_data = data; + psp->tee_irq_handler = handler; +} - /* - * If platform is not in UNINIT state then firmware upgrade and/or - * platform INIT command will fail. These command require UNINIT state. - * - * In a normal boot we should never run into case where the firmware - * is not in UNINIT state on boot. But in case of kexec boot, a reboot - * may not go through a typical shutdown sequence and may leave the - * firmware in INIT or WORKING state. - */ +void psp_clear_tee_irq_handler(struct psp_device *psp) +{ + psp_set_tee_irq_handler(psp, NULL, NULL); +} - if (psp_master->sev_state != SEV_STATE_UNINIT) { - sev_platform_shutdown(NULL); - psp_master->sev_state = SEV_STATE_UNINIT; - } +struct psp_device *psp_get_master_device(void) +{ + struct sp_device *sp = sp_get_psp_master_device(); - if (sev_version_greater_or_equal(0, 15) && - sev_update_firmware(psp_master->dev) == 0) - sev_get_api_version(); + return sp ? sp->psp_data : NULL; +} - /* Initialize the platform */ - rc = sev_platform_init(&error); - if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) { - /* - * INIT command returned an integrity check failure - * status code, meaning that firmware load and - * validation of SEV related persistent data has - * failed and persistent state has been erased. - * Retrying INIT command here should succeed. - */ - dev_dbg(sp->dev, "SEV: retrying INIT command"); - rc = sev_platform_init(&error); - } +void psp_pci_init(void) +{ + psp_master = psp_get_master_device(); - if (rc) { - dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error); + if (!psp_master) return; - } - - dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major, - psp_master->api_minor, psp_master->build); - - return; -err: - psp_master = NULL; + sev_pci_init(); } void psp_pci_exit(void) @@ -1133,5 +263,5 @@ void psp_pci_exit(void) if (!psp_master) return; - sev_platform_shutdown(NULL); + sev_pci_exit(); } diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index dd516b35ba86..ef38e4135d81 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -2,7 +2,7 @@ /* * AMD Platform Security Processor (PSP) interface driver * - * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2019 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <brijesh.singh@amd.com> */ @@ -11,35 +11,20 @@ #define __PSP_DEV_H__ #include <linux/device.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> #include <linux/list.h> -#include <linux/wait.h> -#include <linux/dmapool.h> -#include <linux/hw_random.h> -#include <linux/bitops.h> +#include <linux/bits.h> #include <linux/interrupt.h> -#include <linux/irqreturn.h> -#include <linux/dmaengine.h> -#include <linux/psp-sev.h> -#include <linux/miscdevice.h> -#include <linux/capability.h> #include "sp-dev.h" -#define PSP_CMD_COMPLETE BIT(1) - -#define PSP_CMDRESP_CMD_SHIFT 16 -#define PSP_CMDRESP_IOC BIT(0) #define PSP_CMDRESP_RESP BIT(31) #define PSP_CMDRESP_ERR_MASK 0xffff #define MAX_PSP_NAME_LEN 16 -struct sev_misc_dev { - struct kref refcount; - struct miscdevice misc; -}; +extern struct psp_device *psp_master; + +typedef void (*psp_irq_handler_t)(int, void *, unsigned int); struct psp_device { struct list_head entry; @@ -52,16 +37,24 @@ struct psp_device { void __iomem *io_regs; - int sev_state; - unsigned int sev_int_rcvd; - wait_queue_head_t sev_int_queue; - struct sev_misc_dev *sev_misc; - struct sev_user_data_status status_cmd_buf; - struct sev_data_init init_cmd_buf; + psp_irq_handler_t sev_irq_handler; + void *sev_irq_data; + + psp_irq_handler_t tee_irq_handler; + void *tee_irq_data; - u8 api_major; - u8 api_minor; - u8 build; + void *sev_data; + void *tee_data; }; +void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, + void *data); +void psp_clear_sev_irq_handler(struct psp_device *psp); + +void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, + void *data); +void psp_clear_tee_irq_handler(struct psp_device *psp); + +struct psp_device *psp_get_master_device(void); + #endif /* __PSP_DEV_H */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c new file mode 100644 index 000000000000..e467860f797d --- /dev/null +++ b/drivers/crypto/ccp/sev-dev.c @@ -0,0 +1,1077 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Secure Encrypted Virtualization (SEV) interface + * + * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh <brijesh.singh@amd.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/hw_random.h> +#include <linux/ccp.h> +#include <linux/firmware.h> + +#include <asm/smp.h> + +#include "psp-dev.h" +#include "sev-dev.h" + +#define DEVICE_NAME "sev" +#define SEV_FW_FILE "amd/sev.fw" +#define SEV_FW_NAME_SIZE 64 + +static DEFINE_MUTEX(sev_cmd_mutex); +static struct sev_misc_dev *misc_dev; + +static int psp_cmd_timeout = 100; +module_param(psp_cmd_timeout, int, 0644); +MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); + +static int psp_probe_timeout = 5; +module_param(psp_probe_timeout, int, 0644); +MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); + +static bool psp_dead; +static int psp_timeout; + +static inline bool sev_version_greater_or_equal(u8 maj, u8 min) +{ + struct sev_device *sev = psp_master->sev_data; + + if (sev->api_major > maj) + return true; + + if (sev->api_major == maj && sev->api_minor >= min) + return true; + + return false; +} + +static void sev_irq_handler(int irq, void *data, unsigned int status) +{ + struct sev_device *sev = data; + int reg; + + /* Check if it is command completion: */ + if (!(status & SEV_CMD_COMPLETE)) + return; + + /* Check if it is SEV command completion: */ + reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); + if (reg & PSP_CMDRESP_RESP) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } +} + +static int sev_wait_cmd_ioc(struct sev_device *sev, + unsigned int *reg, unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); + + return 0; +} + +static int sev_cmd_buffer_len(int cmd) +{ + switch (cmd) { + case SEV_CMD_INIT: return sizeof(struct sev_data_init); + case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); + case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); + case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); + case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); + case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); + case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); + case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); + case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); + case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); + case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); + case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); + case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); + case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); + case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); + case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); + case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); + case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); + case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); + case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); + case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); + case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); + case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); + case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); + case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); + case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); + case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); + default: return 0; + } + + return 0; +} + +static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = cmd; + reg <<= SEV_CMDRESP_CMD_SHIFT; + reg |= SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; + + if (psp_ret) + *psp_ret = reg & PSP_CMDRESP_ERR_MASK; + + if (reg & PSP_CMDRESP_ERR_MASK) { + dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n", + cmd, reg & PSP_CMDRESP_ERR_MASK); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + +static int sev_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + + mutex_lock(&sev_cmd_mutex); + rc = __sev_do_cmd_locked(cmd, data, psp_ret); + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + +static int __sev_platform_init_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int rc = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + if (sev->state == SEV_STATE_INIT) + return 0; + + rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error); + if (rc) + return rc; + + sev->state = SEV_STATE_INIT; + + /* Prepare for first SEV guest launch after INIT */ + wbinvd_on_all_cpus(); + rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); + if (rc) + return rc; + + dev_dbg(sev->dev, "SEV firmware initialized\n"); + + return rc; +} + +int sev_platform_init(int *error) +{ + int rc; + + mutex_lock(&sev_cmd_mutex); + rc = __sev_platform_init_locked(error); + mutex_unlock(&sev_cmd_mutex); + + return rc; +} +EXPORT_SYMBOL_GPL(sev_platform_init); + +static int __sev_platform_shutdown_locked(int *error) +{ + struct sev_device *sev = psp_master->sev_data; + int ret; + + ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); + if (ret) + return ret; + + sev->state = SEV_STATE_UNINIT; + dev_dbg(sev->dev, "SEV firmware shutdown\n"); + + return ret; +} + +static int sev_platform_shutdown(int *error) +{ + int rc; + + mutex_lock(&sev_cmd_mutex); + rc = __sev_platform_shutdown_locked(NULL); + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + +static int sev_get_platform_state(int *state, int *error) +{ + struct sev_device *sev = psp_master->sev_data; + int rc; + + rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &sev->status_cmd_buf, error); + if (rc) + return rc; + + *state = sev->status_cmd_buf.state; + return rc; +} + +static int sev_ioctl_do_reset(struct sev_issue_cmd *argp) +{ + int state, rc; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + /* + * The SEV spec requires that FACTORY_RESET must be issued in + * UNINIT state. Before we go further lets check if any guest is + * active. + * + * If FW is in WORKING state then deny the request otherwise issue + * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. + * + */ + rc = sev_get_platform_state(&state, &argp->error); + if (rc) + return rc; + + if (state == SEV_STATE_WORKING) + return -EBUSY; + + if (state == SEV_STATE_INIT) { + rc = __sev_platform_shutdown_locked(&argp->error); + if (rc) + return rc; + } + + return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); +} + +static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_status *data = &sev->status_cmd_buf; + int ret; + + ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error); + if (ret) + return ret; + + if (copy_to_user((void __user *)argp->data, data, sizeof(*data))) + ret = -EFAULT; + + return ret; +} + +static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp) +{ + struct sev_device *sev = psp_master->sev_data; + int rc; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (sev->state == SEV_STATE_UNINIT) { + rc = __sev_platform_init_locked(&argp->error); + if (rc) + return rc; + } + + return __sev_do_cmd_locked(cmd, NULL, &argp->error); +} + +static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_pek_csr input; + struct sev_data_pek_csr *data; + void *blob = NULL; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* userspace wants to query CSR length */ + if (!input.address || !input.length) + goto cmd; + + /* allocate a physically contiguous buffer to store the CSR blob */ + if (!access_ok(input.address, input.length) || + input.length > SEV_FW_BLOB_MAX_SIZE) { + ret = -EFAULT; + goto e_free; + } + + blob = kmalloc(input.length, GFP_KERNEL); + if (!blob) { + ret = -ENOMEM; + goto e_free; + } + + data->address = __psp_pa(blob); + data->len = input.length; + +cmd: + if (sev->state == SEV_STATE_UNINIT) { + ret = __sev_platform_init_locked(&argp->error); + if (ret) + goto e_free_blob; + } + + ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error); + + /* If we query the CSR length, FW responded with expected data. */ + input.length = data->len; + + if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { + ret = -EFAULT; + goto e_free_blob; + } + + if (blob) { + if (copy_to_user((void __user *)input.address, blob, input.length)) + ret = -EFAULT; + } + +e_free_blob: + kfree(blob); +e_free: + kfree(data); + return ret; +} + +void *psp_copy_user_blob(u64 __user uaddr, u32 len) +{ + if (!uaddr || !len) + return ERR_PTR(-EINVAL); + + /* verify that blob length does not exceed our limit */ + if (len > SEV_FW_BLOB_MAX_SIZE) + return ERR_PTR(-EINVAL); + + return memdup_user((void __user *)(uintptr_t)uaddr, len); +} +EXPORT_SYMBOL_GPL(psp_copy_user_blob); + +static int sev_get_api_version(void) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_status *status; + int error = 0, ret; + + status = &sev->status_cmd_buf; + ret = sev_platform_status(status, &error); + if (ret) { + dev_err(sev->dev, + "SEV: failed to get status. Error: %#x\n", error); + return 1; + } + + sev->api_major = status->api_major; + sev->api_minor = status->api_minor; + sev->build = status->build; + sev->state = status->state; + + return 0; +} + +static int sev_get_firmware(struct device *dev, + const struct firmware **firmware) +{ + char fw_name_specific[SEV_FW_NAME_SIZE]; + char fw_name_subset[SEV_FW_NAME_SIZE]; + + snprintf(fw_name_specific, sizeof(fw_name_specific), + "amd/amd_sev_fam%.2xh_model%.2xh.sbin", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + snprintf(fw_name_subset, sizeof(fw_name_subset), + "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", + boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); + + /* Check for SEV FW for a particular model. + * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h + * + * or + * + * Check for SEV FW common to a subset of models. + * Ex. amd_sev_fam17h_model0xh.sbin for + * Family 17h Model 00h -- Family 17h Model 0Fh + * + * or + * + * Fall-back to using generic name: sev.fw + */ + if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || + (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || + (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) + return 0; + + return -ENOENT; +} + +/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ +static int sev_update_firmware(struct device *dev) +{ + struct sev_data_download_firmware *data; + const struct firmware *firmware; + int ret, error, order; + struct page *p; + u64 data_size; + + if (sev_get_firmware(dev, &firmware) == -ENOENT) { + dev_dbg(dev, "No SEV firmware file present\n"); + return -1; + } + + /* + * SEV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the SEV + * FW. Allocate enough memory for data structure + alignment + * padding + SEV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(firmware->size + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) { + ret = -1; + goto fw_err; + } + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + memcpy(page_address(p) + data_size, firmware->data, firmware->size); + + data->address = __psp_pa(page_address(p) + data_size); + data->len = firmware->size; + + ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + if (ret) + dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); + else + dev_info(dev, "SEV firmware update successful\n"); + + __free_pages(p, order); + +fw_err: + release_firmware(firmware); + + return ret; +} + +static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_pek_cert_import input; + struct sev_data_pek_cert_import *data; + void *pek_blob, *oca_blob; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy PEK certificate blobs from userspace */ + pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); + if (IS_ERR(pek_blob)) { + ret = PTR_ERR(pek_blob); + goto e_free; + } + + data->pek_cert_address = __psp_pa(pek_blob); + data->pek_cert_len = input.pek_cert_len; + + /* copy PEK certificate blobs from userspace */ + oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); + if (IS_ERR(oca_blob)) { + ret = PTR_ERR(oca_blob); + goto e_free_pek; + } + + data->oca_cert_address = __psp_pa(oca_blob); + data->oca_cert_len = input.oca_cert_len; + + /* If platform is not in INIT state then transition it to INIT */ + if (sev->state != SEV_STATE_INIT) { + ret = __sev_platform_init_locked(&argp->error); + if (ret) + goto e_free_oca; + } + + ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error); + +e_free_oca: + kfree(oca_blob); +e_free_pek: + kfree(pek_blob); +e_free: + kfree(data); + return ret; +} + +static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) +{ + struct sev_user_data_get_id2 input; + struct sev_data_get_id *data; + void *id_blob = NULL; + int ret; + + /* SEV GET_ID is available from SEV API v0.16 and up */ + if (!sev_version_greater_or_equal(0, 16)) + return -ENOTSUPP; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + /* Check if we have write access to the userspace buffer */ + if (input.address && + input.length && + !access_ok(input.address, input.length)) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (input.address && input.length) { + id_blob = kmalloc(input.length, GFP_KERNEL); + if (!id_blob) { + kfree(data); + return -ENOMEM; + } + + data->address = __psp_pa(id_blob); + data->len = input.length; + } + + ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); + + /* + * Firmware will return the length of the ID value (either the minimum + * required length or the actual length written), return it to the user. + */ + input.length = data->len; + + if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { + ret = -EFAULT; + goto e_free; + } + + if (id_blob) { + if (copy_to_user((void __user *)input.address, + id_blob, data->len)) { + ret = -EFAULT; + goto e_free; + } + } + +e_free: + kfree(id_blob); + kfree(data); + + return ret; +} + +static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) +{ + struct sev_data_get_id *data; + u64 data_size, user_size; + void *id_blob, *mem; + int ret; + + /* SEV GET_ID available from SEV API v0.16 and up */ + if (!sev_version_greater_or_equal(0, 16)) + return -ENOTSUPP; + + /* SEV FW expects the buffer it fills with the ID to be + * 8-byte aligned. Memory allocated should be enough to + * hold data structure + alignment padding + memory + * where SEV FW writes the ID. + */ + data_size = ALIGN(sizeof(struct sev_data_get_id), 8); + user_size = sizeof(struct sev_user_data_get_id); + + mem = kzalloc(data_size + user_size, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + data = mem; + id_blob = mem + data_size; + + data->address = __psp_pa(id_blob); + data->len = user_size; + + ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); + if (!ret) { + if (copy_to_user((void __user *)argp->data, id_blob, data->len)) + ret = -EFAULT; + } + + kfree(mem); + + return ret; +} + +static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_pdh_cert_export input; + void *pdh_blob = NULL, *cert_blob = NULL; + struct sev_data_pdh_cert_export *data; + int ret; + + /* If platform is not in INIT state then transition it to INIT. */ + if (sev->state != SEV_STATE_INIT) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = __sev_platform_init_locked(&argp->error); + if (ret) + return ret; + } + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* Userspace wants to query the certificate length. */ + if (!input.pdh_cert_address || + !input.pdh_cert_len || + !input.cert_chain_address) + goto cmd; + + /* Allocate a physically contiguous buffer to store the PDH blob. */ + if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) || + !access_ok(input.pdh_cert_address, input.pdh_cert_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Allocate a physically contiguous buffer to store the cert chain blob. */ + if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) || + !access_ok(input.cert_chain_address, input.cert_chain_len)) { + ret = -EFAULT; + goto e_free; + } + + pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); + if (!pdh_blob) { + ret = -ENOMEM; + goto e_free; + } + + data->pdh_cert_address = __psp_pa(pdh_blob); + data->pdh_cert_len = input.pdh_cert_len; + + cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); + if (!cert_blob) { + ret = -ENOMEM; + goto e_free_pdh; + } + + data->cert_chain_address = __psp_pa(cert_blob); + data->cert_chain_len = input.cert_chain_len; + +cmd: + ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error); + + /* If we query the length, FW responded with expected data. */ + input.cert_chain_len = data->cert_chain_len; + input.pdh_cert_len = data->pdh_cert_len; + + if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { + ret = -EFAULT; + goto e_free_cert; + } + + if (pdh_blob) { + if (copy_to_user((void __user *)input.pdh_cert_address, + pdh_blob, input.pdh_cert_len)) { + ret = -EFAULT; + goto e_free_cert; + } + } + + if (cert_blob) { + if (copy_to_user((void __user *)input.cert_chain_address, + cert_blob, input.cert_chain_len)) + ret = -EFAULT; + } + +e_free_cert: + kfree(cert_blob); +e_free_pdh: + kfree(pdh_blob); +e_free: + kfree(data); + return ret; +} + +static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sev_issue_cmd input; + int ret = -EFAULT; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + if (ioctl != SEV_ISSUE_CMD) + return -EINVAL; + + if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) + return -EFAULT; + + if (input.cmd > SEV_MAX) + return -EINVAL; + + mutex_lock(&sev_cmd_mutex); + + switch (input.cmd) { + + case SEV_FACTORY_RESET: + ret = sev_ioctl_do_reset(&input); + break; + case SEV_PLATFORM_STATUS: + ret = sev_ioctl_do_platform_status(&input); + break; + case SEV_PEK_GEN: + ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input); + break; + case SEV_PDH_GEN: + ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input); + break; + case SEV_PEK_CSR: + ret = sev_ioctl_do_pek_csr(&input); + break; + case SEV_PEK_CERT_IMPORT: + ret = sev_ioctl_do_pek_import(&input); + break; + case SEV_PDH_CERT_EXPORT: + ret = sev_ioctl_do_pdh_export(&input); + break; + case SEV_GET_ID: + pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); + ret = sev_ioctl_do_get_id(&input); + break; + case SEV_GET_ID2: + ret = sev_ioctl_do_get_id2(&input); + break; + default: + ret = -EINVAL; + goto out; + } + + if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) + ret = -EFAULT; +out: + mutex_unlock(&sev_cmd_mutex); + + return ret; +} + +static const struct file_operations sev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sev_ioctl, +}; + +int sev_platform_status(struct sev_user_data_status *data, int *error) +{ + return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); +} +EXPORT_SYMBOL_GPL(sev_platform_status); + +int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) +{ + return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); +} +EXPORT_SYMBOL_GPL(sev_guest_deactivate); + +int sev_guest_activate(struct sev_data_activate *data, int *error) +{ + return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); +} +EXPORT_SYMBOL_GPL(sev_guest_activate); + +int sev_guest_decommission(struct sev_data_decommission *data, int *error) +{ + return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); +} +EXPORT_SYMBOL_GPL(sev_guest_decommission); + +int sev_guest_df_flush(int *error) +{ + return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); +} +EXPORT_SYMBOL_GPL(sev_guest_df_flush); + +static void sev_exit(struct kref *ref) +{ + struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); +} + +static int sev_misc_init(struct sev_device *sev) +{ + struct device *dev = sev->dev; + int ret; + + /* + * SEV feature support can be detected on multiple devices but the SEV + * FW commands must be issued on the master. During probe, we do not + * know the master hence we create /dev/sev on the first device probe. + * sev_do_cmd() finds the right master device to which to issue the + * command to the firmware. + */ + if (!misc_dev) { + struct miscdevice *misc; + + misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL); + if (!misc_dev) + return -ENOMEM; + + misc = &misc_dev->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = DEVICE_NAME; + misc->fops = &sev_fops; + + ret = misc_register(misc); + if (ret) + return ret; + + kref_init(&misc_dev->refcount); + } else { + kref_get(&misc_dev->refcount); + } + + init_waitqueue_head(&sev->int_queue); + sev->misc = misc_dev; + dev_dbg(dev, "registered SEV device\n"); + + return 0; +} + +int sev_dev_init(struct psp_device *psp) +{ + struct device *dev = psp->dev; + struct sev_device *sev; + int ret = -ENOMEM; + + sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); + if (!sev) + goto e_err; + + psp->sev_data = sev; + + sev->dev = dev; + sev->psp = psp; + + sev->io_regs = psp->io_regs; + + sev->vdata = (struct sev_vdata *)psp->vdata->sev; + if (!sev->vdata) { + ret = -ENODEV; + dev_err(dev, "sev: missing driver data\n"); + goto e_err; + } + + psp_set_sev_irq_handler(psp, sev_irq_handler, sev); + + ret = sev_misc_init(sev); + if (ret) + goto e_irq; + + dev_notice(dev, "sev enabled\n"); + + return 0; + +e_irq: + psp_clear_sev_irq_handler(psp); +e_err: + psp->sev_data = NULL; + + dev_notice(dev, "sev initialization failed\n"); + + return ret; +} + +void sev_dev_destroy(struct psp_device *psp) +{ + struct sev_device *sev = psp->sev_data; + + if (!sev) + return; + + if (sev->misc) + kref_put(&misc_dev->refcount, sev_exit); + + psp_clear_sev_irq_handler(psp); +} + +int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, + void *data, int *error) +{ + if (!filep || filep->f_op != &sev_fops) + return -EBADF; + + return sev_do_cmd(cmd, data, error); +} +EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); + +void sev_pci_init(void) +{ + struct sev_device *sev = psp_master->sev_data; + int error, rc; + + if (!sev) + return; + + psp_timeout = psp_probe_timeout; + + if (sev_get_api_version()) + goto err; + + /* + * If platform is not in UNINIT state then firmware upgrade and/or + * platform INIT command will fail. These command require UNINIT state. + * + * In a normal boot we should never run into case where the firmware + * is not in UNINIT state on boot. But in case of kexec boot, a reboot + * may not go through a typical shutdown sequence and may leave the + * firmware in INIT or WORKING state. + */ + + if (sev->state != SEV_STATE_UNINIT) { + sev_platform_shutdown(NULL); + sev->state = SEV_STATE_UNINIT; + } + + if (sev_version_greater_or_equal(0, 15) && + sev_update_firmware(sev->dev) == 0) + sev_get_api_version(); + + /* Initialize the platform */ + rc = sev_platform_init(&error); + if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) { + /* + * INIT command returned an integrity check failure + * status code, meaning that firmware load and + * validation of SEV related persistent data has + * failed and persistent state has been erased. + * Retrying INIT command here should succeed. + */ + dev_dbg(sev->dev, "SEV: retrying INIT command"); + rc = sev_platform_init(&error); + } + + if (rc) { + dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error); + return; + } + + dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); + + return; + +err: + psp_master->sev_data = NULL; +} + +void sev_pci_exit(void) +{ + if (!psp_master->sev_data) + return; + + sev_platform_shutdown(NULL); +} diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h new file mode 100644 index 000000000000..dd5c4fe82914 --- /dev/null +++ b/drivers/crypto/ccp/sev-dev.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AMD Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2017-2019 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh <brijesh.singh@amd.com> + */ + +#ifndef __SEV_DEV_H__ +#define __SEV_DEV_H__ + +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/wait.h> +#include <linux/dmapool.h> +#include <linux/hw_random.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/irqreturn.h> +#include <linux/dmaengine.h> +#include <linux/psp-sev.h> +#include <linux/miscdevice.h> +#include <linux/capability.h> + +#define SEV_CMD_COMPLETE BIT(1) +#define SEV_CMDRESP_CMD_SHIFT 16 +#define SEV_CMDRESP_IOC BIT(0) + +struct sev_misc_dev { + struct kref refcount; + struct miscdevice misc; +}; + +struct sev_device { + struct device *dev; + struct psp_device *psp; + + void __iomem *io_regs; + + struct sev_vdata *vdata; + + int state; + unsigned int int_rcvd; + wait_queue_head_t int_queue; + struct sev_misc_dev *misc; + struct sev_user_data_status status_cmd_buf; + struct sev_data_init init_cmd_buf; + + u8 api_major; + u8 api_minor; + u8 build; +}; + +int sev_dev_init(struct psp_device *psp); +void sev_dev_destroy(struct psp_device *psp); + +void sev_pci_init(void); +void sev_pci_exit(void); + +#endif /* __SEV_DEV_H */ diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 53c12562d31e..423594608ad1 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -2,7 +2,7 @@ /* * AMD Secure Processor driver * - * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> @@ -39,10 +39,23 @@ struct ccp_vdata { const unsigned int rsamax; }; -struct psp_vdata { +struct sev_vdata { const unsigned int cmdresp_reg; const unsigned int cmdbuff_addr_lo_reg; const unsigned int cmdbuff_addr_hi_reg; +}; + +struct tee_vdata { + const unsigned int cmdresp_reg; + const unsigned int cmdbuff_addr_lo_reg; + const unsigned int cmdbuff_addr_hi_reg; + const unsigned int ring_wptr_reg; + const unsigned int ring_rptr_reg; +}; + +struct psp_vdata { + const struct sev_vdata *sev; + const struct tee_vdata *tee; const unsigned int feature_reg; const unsigned int inten_reg; const unsigned int intsts_reg; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b29d2e663e10..56c1f61c0f84 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -2,7 +2,7 @@ /* * AMD Secure Processor device driver * - * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> @@ -262,19 +262,42 @@ static int sp_pci_resume(struct pci_dev *pdev) #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP -static const struct psp_vdata pspv1 = { +static const struct sev_vdata sevv1 = { .cmdresp_reg = 0x10580, .cmdbuff_addr_lo_reg = 0x105e0, .cmdbuff_addr_hi_reg = 0x105e4, +}; + +static const struct sev_vdata sevv2 = { + .cmdresp_reg = 0x10980, + .cmdbuff_addr_lo_reg = 0x109e0, + .cmdbuff_addr_hi_reg = 0x109e4, +}; + +static const struct tee_vdata teev1 = { + .cmdresp_reg = 0x10544, + .cmdbuff_addr_lo_reg = 0x10548, + .cmdbuff_addr_hi_reg = 0x1054c, + .ring_wptr_reg = 0x10550, + .ring_rptr_reg = 0x10554, +}; + +static const struct psp_vdata pspv1 = { + .sev = &sevv1, .feature_reg = 0x105fc, .inten_reg = 0x10610, .intsts_reg = 0x10614, }; static const struct psp_vdata pspv2 = { - .cmdresp_reg = 0x10980, - .cmdbuff_addr_lo_reg = 0x109e0, - .cmdbuff_addr_hi_reg = 0x109e4, + .sev = &sevv2, + .feature_reg = 0x109fc, + .inten_reg = 0x10690, + .intsts_reg = 0x10694, +}; + +static const struct psp_vdata pspv3 = { + .tee = &teev1, .feature_reg = 0x109fc, .inten_reg = 0x10690, .intsts_reg = 0x10694, @@ -312,12 +335,22 @@ static const struct sp_dev_vdata dev_vdata[] = { .psp_vdata = &pspv2, #endif }, + { /* 4 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv3, +#endif + }, }; static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] }, + { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c new file mode 100644 index 000000000000..5e697a90ea7f --- /dev/null +++ b/drivers/crypto/ccp/tee-dev.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: MIT +/* + * AMD Trusted Execution Environment (TEE) interface + * + * Author: Rijo Thomas <Rijo-john.Thomas@amd.com> + * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com> + * + * Copyright 2019 Advanced Micro Devices, Inc. + */ + +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/gfp.h> +#include <linux/psp-sev.h> +#include <linux/psp-tee.h> + +#include "psp-dev.h" +#include "tee-dev.h" + +static bool psp_dead; + +static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size) +{ + struct ring_buf_manager *rb_mgr = &tee->rb_mgr; + void *start_addr; + + if (!ring_size) + return -EINVAL; + + /* We need actual physical address instead of DMA address, since + * Trusted OS running on AMD Secure Processor will map this region + */ + start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size)); + if (!start_addr) + return -ENOMEM; + + rb_mgr->ring_start = start_addr; + rb_mgr->ring_size = ring_size; + rb_mgr->ring_pa = __psp_pa(start_addr); + mutex_init(&rb_mgr->mutex); + + return 0; +} + +static void tee_free_ring(struct psp_tee_device *tee) +{ + struct ring_buf_manager *rb_mgr = &tee->rb_mgr; + + if (!rb_mgr->ring_start) + return; + + free_pages((unsigned long)rb_mgr->ring_start, + get_order(rb_mgr->ring_size)); + + rb_mgr->ring_start = NULL; + rb_mgr->ring_size = 0; + rb_mgr->ring_pa = 0; + mutex_destroy(&rb_mgr->mutex); +} + +static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout, + unsigned int *reg) +{ + /* ~10ms sleep per loop => nloop = timeout * 100 */ + int nloop = timeout * 100; + + while (--nloop) { + *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg); + if (*reg & PSP_CMDRESP_RESP) + return 0; + + usleep_range(10000, 10100); + } + + dev_err(tee->dev, "tee: command timed out, disabling PSP\n"); + psp_dead = true; + + return -ETIMEDOUT; +} + +static +struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee) +{ + struct tee_init_ring_cmd *cmd; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return NULL; + + cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa); + cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa); + cmd->size = tee->rb_mgr.ring_size; + + dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n", + cmd->hi_addr, cmd->low_addr, cmd->size); + + return cmd; +} + +static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd) +{ + kfree(cmd); +} + +static int tee_init_ring(struct psp_tee_device *tee) +{ + int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd); + struct tee_init_ring_cmd *cmd; + phys_addr_t cmd_buffer; + unsigned int reg; + int ret; + + BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024); + + ret = tee_alloc_ring(tee, ring_size); + if (ret) { + dev_err(tee->dev, "tee: ring allocation failed %d\n", ret); + return ret; + } + + tee->rb_mgr.wptr = 0; + + cmd = tee_alloc_cmd_buffer(tee); + if (!cmd) { + tee_free_ring(tee); + return -ENOMEM; + } + + cmd_buffer = __psp_pa((void *)cmd); + + /* Send command buffer details to Trusted OS by writing to + * CPU-PSP message registers + */ + + iowrite32(lower_32_bits(cmd_buffer), + tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg); + iowrite32(upper_32_bits(cmd_buffer), + tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg); + iowrite32(TEE_RING_INIT_CMD, + tee->io_regs + tee->vdata->cmdresp_reg); + + ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, ®); + if (ret) { + dev_err(tee->dev, "tee: ring init command timed out\n"); + tee_free_ring(tee); + goto free_buf; + } + + if (reg & PSP_CMDRESP_ERR_MASK) { + dev_err(tee->dev, "tee: ring init command failed (%#010x)\n", + reg & PSP_CMDRESP_ERR_MASK); + tee_free_ring(tee); + ret = -EIO; + } + +free_buf: + tee_free_cmd_buffer(cmd); + + return ret; +} + +static void tee_destroy_ring(struct psp_tee_device *tee) +{ + unsigned int reg; + int ret; + + if (!tee->rb_mgr.ring_start) + return; + + if (psp_dead) + goto free_ring; + + iowrite32(TEE_RING_DESTROY_CMD, + tee->io_regs + tee->vdata->cmdresp_reg); + + ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, ®); + if (ret) { + dev_err(tee->dev, "tee: ring destroy command timed out\n"); + } else if (reg & PSP_CMDRESP_ERR_MASK) { + dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n", + reg & PSP_CMDRESP_ERR_MASK); + } + +free_ring: + tee_free_ring(tee); +} + +int tee_dev_init(struct psp_device *psp) +{ + struct device *dev = psp->dev; + struct psp_tee_device *tee; + int ret; + + ret = -ENOMEM; + tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL); + if (!tee) + goto e_err; + + psp->tee_data = tee; + + tee->dev = dev; + tee->psp = psp; + + tee->io_regs = psp->io_regs; + + tee->vdata = (struct tee_vdata *)psp->vdata->tee; + if (!tee->vdata) { + ret = -ENODEV; + dev_err(dev, "tee: missing driver data\n"); + goto e_err; + } + + ret = tee_init_ring(tee); + if (ret) { + dev_err(dev, "tee: failed to init ring buffer\n"); + goto e_err; + } + + dev_notice(dev, "tee enabled\n"); + + return 0; + +e_err: + psp->tee_data = NULL; + + dev_notice(dev, "tee initialization failed\n"); + + return ret; +} + +void tee_dev_destroy(struct psp_device *psp) +{ + struct psp_tee_device *tee = psp->tee_data; + + if (!tee) + return; + + tee_destroy_ring(tee); +} + +static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id, + void *buf, size_t len, struct tee_ring_cmd **resp) +{ + struct tee_ring_cmd *cmd; + u32 rptr, wptr; + int nloop = 1000, ret = 0; + + *resp = NULL; + + mutex_lock(&tee->rb_mgr.mutex); + + wptr = tee->rb_mgr.wptr; + + /* Check if ring buffer is full */ + do { + rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg); + + if (!(wptr + sizeof(struct tee_ring_cmd) == rptr)) + break; + + dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", + rptr, wptr); + + /* Wait if ring buffer is full */ + mutex_unlock(&tee->rb_mgr.mutex); + schedule_timeout_interruptible(msecs_to_jiffies(10)); + mutex_lock(&tee->rb_mgr.mutex); + + } while (--nloop); + + if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) { + dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", + rptr, wptr); + ret = -EBUSY; + goto unlock; + } + + /* Pointer to empty data entry in ring buffer */ + cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr); + + /* Write command data into ring buffer */ + cmd->cmd_id = cmd_id; + cmd->cmd_state = TEE_CMD_STATE_INIT; + memset(&cmd->buf[0], 0, sizeof(cmd->buf)); + memcpy(&cmd->buf[0], buf, len); + + /* Update local copy of write pointer */ + tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd); + if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size) + tee->rb_mgr.wptr = 0; + + /* Trigger interrupt to Trusted OS */ + iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg); + + /* The response is provided by Trusted OS in same + * location as submitted data entry within ring buffer. + */ + *resp = cmd; + +unlock: + mutex_unlock(&tee->rb_mgr.mutex); + + return ret; +} + +static int tee_wait_cmd_completion(struct psp_tee_device *tee, + struct tee_ring_cmd *resp, + unsigned int timeout) +{ + /* ~5ms sleep per loop => nloop = timeout * 200 */ + int nloop = timeout * 200; + + while (--nloop) { + if (resp->cmd_state == TEE_CMD_STATE_COMPLETED) + return 0; + + usleep_range(5000, 5100); + } + + dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n", + resp->cmd_id); + + psp_dead = true; + + return -ETIMEDOUT; +} + +int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len, + u32 *status) +{ + struct psp_device *psp = psp_get_master_device(); + struct psp_tee_device *tee; + struct tee_ring_cmd *resp; + int ret; + + if (!buf || !status || !len || len > sizeof(resp->buf)) + return -EINVAL; + + *status = 0; + + if (!psp || !psp->tee_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + tee = psp->tee_data; + + ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp); + if (ret) + return ret; + + ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT); + if (ret) + return ret; + + memcpy(buf, &resp->buf[0], len); + *status = resp->status; + + return 0; +} +EXPORT_SYMBOL(psp_tee_process_cmd); + +int psp_check_tee_status(void) +{ + struct psp_device *psp = psp_get_master_device(); + + if (!psp || !psp->tee_data) + return -ENODEV; + + return 0; +} +EXPORT_SYMBOL(psp_check_tee_status); diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h new file mode 100644 index 000000000000..f09960112115 --- /dev/null +++ b/drivers/crypto/ccp/tee-dev.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Author: Rijo Thomas <Rijo-john.Thomas@amd.com> + * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com> + * + */ + +/* This file describes the TEE communication interface between host and AMD + * Secure Processor + */ + +#ifndef __TEE_DEV_H__ +#define __TEE_DEV_H__ + +#include <linux/device.h> +#include <linux/mutex.h> + +#define TEE_DEFAULT_TIMEOUT 10 +#define MAX_BUFFER_SIZE 992 + +/** + * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration + * @TEE_RING_INIT_CMD: Initialize ring buffer + * @TEE_RING_DESTROY_CMD: Destroy ring buffer + * @TEE_RING_MAX_CMD: Maximum command id + */ +enum tee_ring_cmd_id { + TEE_RING_INIT_CMD = 0x00010000, + TEE_RING_DESTROY_CMD = 0x00020000, + TEE_RING_MAX_CMD = 0x000F0000, +}; + +/** + * struct tee_init_ring_cmd - Command to init TEE ring buffer + * @low_addr: bits [31:0] of the physical address of ring buffer + * @hi_addr: bits [63:32] of the physical address of ring buffer + * @size: size of ring buffer in bytes + */ +struct tee_init_ring_cmd { + u32 low_addr; + u32 hi_addr; + u32 size; +}; + +#define MAX_RING_BUFFER_ENTRIES 32 + +/** + * struct ring_buf_manager - Helper structure to manage ring buffer. + * @ring_start: starting address of ring buffer + * @ring_size: size of ring buffer in bytes + * @ring_pa: physical address of ring buffer + * @wptr: index to the last written entry in ring buffer + */ +struct ring_buf_manager { + struct mutex mutex; /* synchronizes access to ring buffer */ + void *ring_start; + u32 ring_size; + phys_addr_t ring_pa; + u32 wptr; +}; + +struct psp_tee_device { + struct device *dev; + struct psp_device *psp; + void __iomem *io_regs; + struct tee_vdata *vdata; + struct ring_buf_manager rb_mgr; +}; + +/** + * enum tee_cmd_state - TEE command states for the ring buffer interface + * @TEE_CMD_STATE_INIT: initial state of command when sent from host + * @TEE_CMD_STATE_PROCESS: command being processed by TEE environment + * @TEE_CMD_STATE_COMPLETED: command processing completed + */ +enum tee_cmd_state { + TEE_CMD_STATE_INIT, + TEE_CMD_STATE_PROCESS, + TEE_CMD_STATE_COMPLETED, +}; + +/** + * struct tee_ring_cmd - Structure of the command buffer in TEE ring + * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer + * interface + * @cmd_state: refers to &enum tee_cmd_state + * @status: status of TEE command execution + * @res0: reserved region + * @pdata: private data (currently unused) + * @res1: reserved region + * @buf: TEE command specific buffer + */ +struct tee_ring_cmd { + u32 cmd_id; + u32 cmd_state; + u32 status; + u32 res0[1]; + u64 pdata; + u32 res1[2]; + u8 buf[MAX_BUFFER_SIZE]; + + /* Total size: 1024 bytes */ +} __packed; + +int tee_dev_init(struct psp_device *psp); +void tee_dev_destroy(struct psp_device *psp); + +#endif /* __TEE_DEV_H__ */ diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 64d318dc0d47..2fc0e0da790b 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) * revealed the decrypted message --> zero its memory. */ sg_zero_buffer(areq->dst, sg_nents(areq->dst), - areq->cryptlen, 0); + areq->cryptlen, areq->assoclen); err = -EBADMSG; } /*ENCRYPT*/ @@ -385,13 +385,13 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx) return -EINVAL; break; default: - dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); + dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); return -EINVAL; } /* Check cipher key size */ if (ctx->flow_mode == S_DIN_to_DES) { if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) { - dev_err(dev, "Invalid cipher(3DES) key size: %u\n", + dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n", ctx->enc_keylen); return -EINVAL; } @@ -399,7 +399,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx) if (ctx->enc_keylen != AES_KEYSIZE_128 && ctx->enc_keylen != AES_KEYSIZE_192 && ctx->enc_keylen != AES_KEYSIZE_256) { - dev_err(dev, "Invalid cipher(AES) key size: %u\n", + dev_dbg(dev, "Invalid cipher(AES) key size: %u\n", ctx->enc_keylen); return -EINVAL; } @@ -562,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = crypto_authenc_extractkeys(&keys, key, keylen); if (rc) - goto badkey; + return rc; enckey = keys.enckey; authkey = keys.authkey; ctx->enc_keylen = keys.enckeylen; @@ -570,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ - rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) - goto badkey; + return -EINVAL; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ @@ -591,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = validate_keys_sizes(ctx); if (rc) - goto badkey; + return rc; /* STAT_PHASE_1: Copy key to ctx */ @@ -605,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) - goto badkey; + return rc; } /* STAT_PHASE_2: Create sequence */ @@ -622,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, break; /* No auth. key setup */ default: dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); - rc = -ENOTSUPP; - goto badkey; + return -ENOTSUPP; } /* STAT_PHASE_3: Submit sequence to HW */ @@ -632,18 +630,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len); if (rc) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); - goto setkey_error; + return rc; } } /* Update STAT_PHASE_3 */ return rc; - -badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - -setkey_error: - return rc; } static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, @@ -1567,7 +1559,7 @@ static int config_ccm_adata(struct aead_request *req) /* taken from crypto/ccm.c */ /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (l < 2 || l > 8) { - dev_err(dev, "illegal iv value %X\n", req->iv[0]); + dev_dbg(dev, "illegal iv value %X\n", req->iv[0]); return -EINVAL; } memcpy(b0, req->iv, AES_BLOCK_SIZE); @@ -1925,7 +1917,6 @@ static int cc_proc_aead(struct aead_request *req, if (validate_data_size(ctx, direct, req)) { dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", req->cryptlen, areq_ctx->assoclen); - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); return -EINVAL; } @@ -2065,7 +2056,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2115,7 +2106,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2234,7 +2225,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2265,7 +2256,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2299,7 +2290,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2330,7 +2321,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 3112b58d0bb1..7d6252d892d7 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -291,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, /* This check the size of the protected key token */ if (keylen != sizeof(hki)) { dev_err(dev, "Unsupported protected key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -303,8 +302,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, keylen = hki.keylen; if (validate_keys_sizes(ctx_p, keylen)) { - dev_err(dev, "Unsupported key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + dev_dbg(dev, "Unsupported key size %d.\n", keylen); return -EINVAL; } @@ -394,8 +392,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { - dev_err(dev, "Unsupported key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + dev_dbg(dev, "Unsupported key size %d.\n", keylen); return -EINVAL; } @@ -523,6 +520,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm, } } + static void cc_setup_state_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, unsigned int ivsize, unsigned int nbytes, @@ -534,8 +532,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm, int cipher_mode = ctx_p->cipher_mode; int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; - dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; - unsigned int key_len = ctx_p->keylen; dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; unsigned int du_size = nbytes; @@ -571,6 +567,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm, case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: case DRV_CIPHER_BITLOCKER: + break; + default: + dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); + } +} + + +static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, + struct cipher_req_ctx *req_ctx, + unsigned int ivsize, unsigned int nbytes, + struct cc_hw_desc desc[], + unsigned int *seq_size) +{ + struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx_p->drvdata); + int cipher_mode = ctx_p->cipher_mode; + int flow_mode = ctx_p->flow_mode; + int direction = req_ctx->gen_ctx.op_type; + dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; + unsigned int key_len = ctx_p->keylen; + dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; + unsigned int du_size = nbytes; + + struct cc_crypto_alg *cc_alg = + container_of(tfm->__crt_alg, struct cc_crypto_alg, + skcipher_alg.base); + + if (cc_alg->data_unit) + du_size = cc_alg->data_unit; + + switch (cipher_mode) { + case DRV_CIPHER_ECB: + break; + case DRV_CIPHER_CBC: + case DRV_CIPHER_CBC_CTS: + case DRV_CIPHER_CTR: + case DRV_CIPHER_OFB: + break; + case DRV_CIPHER_XTS: + case DRV_CIPHER_ESSIV: + case DRV_CIPHER_BITLOCKER: /* load XEX key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); @@ -836,8 +873,7 @@ static int cc_cipher_process(struct skcipher_request *req, /* TODO: check data length according to mode */ if (validate_data_size(ctx_p, nbytes)) { - dev_err(dev, "Unsupported data size %d.\n", nbytes); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); + dev_dbg(dev, "Unsupported data size %d.\n", nbytes); rc = -EINVAL; goto exit_process; } @@ -881,12 +917,14 @@ static int cc_cipher_process(struct skcipher_request *req, /* STAT_PHASE_2: Create sequence */ - /* Setup IV and XEX key used */ + /* Setup state (IV) */ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Setup MLLI line, if needed */ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len); /* Setup key */ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len); + /* Setup state (IV and XEX key) */ + cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Data processing */ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len); /* Read next IV */ diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 8b8eee513c27..532bc95a8373 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -133,7 +133,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) u32 imr; /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ - /* if driver suspended return, probebly shared interrupt */ + /* if driver suspended return, probably shared interrupt */ if (cc_pm_is_dev_suspended(dev)) return IRQ_NONE; @@ -271,6 +271,7 @@ static int init_cc_resources(struct platform_device *plat_dev) const struct cc_hw_data *hw_rev; const struct of_device_id *dev_id; struct clk *clk; + int irq; int rc = 0; new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL); @@ -337,9 +338,9 @@ static int init_cc_resources(struct platform_device *plat_dev) &req_mem_cc_regs->start, new_drvdata->cc_base); /* Then IRQ */ - new_drvdata->irq = platform_get_irq(plat_dev, 0); - if (new_drvdata->irq < 0) - return new_drvdata->irq; + irq = platform_get_irq(plat_dev, 0); + if (irq < 0) + return irq; init_completion(&new_drvdata->hw_queue_avail); @@ -442,14 +443,13 @@ static int init_cc_resources(struct platform_device *plat_dev) dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n", hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION); /* register the driver isr function */ - rc = devm_request_irq(dev, new_drvdata->irq, cc_isr, - IRQF_SHARED, "ccree", new_drvdata); + rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree", + new_drvdata); if (rc) { - dev_err(dev, "Could not register to interrupt %d\n", - new_drvdata->irq); + dev_err(dev, "Could not register to interrupt %d\n", irq); goto post_clk_err; } - dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq); + dev_dbg(dev, "Registered to IRQ: %d\n", irq); rc = init_cc_regs(new_drvdata, true); if (rc) { @@ -465,7 +465,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_fips_init(new_drvdata); if (rc) { - dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc); + dev_err(dev, "cc_fips_init failed 0x%x\n", rc); goto post_debugfs_err; } rc = cc_sram_mgr_init(new_drvdata); @@ -490,13 +490,13 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_buffer_mgr_init(new_drvdata); if (rc) { - dev_err(dev, "buffer_mgr_init failed\n"); + dev_err(dev, "cc_buffer_mgr_init failed\n"); goto post_req_mgr_err; } rc = cc_pm_init(new_drvdata); if (rc) { - dev_err(dev, "ssi_power_mgr_init failed\n"); + dev_err(dev, "cc_pm_init failed\n"); goto post_buf_mgr_err; } diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index ab31d4a68c80..c227718ba992 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -28,7 +28,6 @@ /* Registers definitions from shared/hw/ree_include */ #include "cc_host_regs.h" -#define CC_DEV_SHA_MAX 512 #include "cc_crypto_ctx.h" #include "cc_hw_queue_defs.h" #include "cc_sram_mgr.h" @@ -133,13 +132,11 @@ struct cc_crypto_req { /** * struct cc_drvdata - driver private data context * @cc_base: virt address of the CC registers - * @irq: device IRQ number - * @irq_mask: Interrupt mask shadow (1 for masked interrupts) + * @irq: bitmap indicating source of last interrupt */ struct cc_drvdata { void __iomem *cc_base; int irq; - u32 irq_mask; struct completion hw_queue_avail; /* wait for HW queue availability */ struct platform_device *plat_dev; cc_sram_addr_t mlli_sram_addr; @@ -161,6 +158,7 @@ struct cc_drvdata { int std_bodies; bool sec_disabled; u32 comp_mask; + bool pm_on; }; struct cc_crypto_alg { diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c index 4c8bce33abcf..702aefc21447 100644 --- a/drivers/crypto/ccree/cc_fips.c +++ b/drivers/crypto/ccree/cc_fips.c @@ -120,7 +120,7 @@ static void fips_dsr(unsigned long devarg) cc_tee_handle_fips_error(drvdata); } - /* after verifing that there is nothing to do, + /* after verifying that there is nothing to do, * unmask AXI completion interrupt. */ val = (CC_REG(HOST_IMR) & ~irq); diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index bc71bdf44a9f..912e5ce5079d 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -899,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); out: - if (rc) - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); - if (ctx->key_params.key_dma_addr) { dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); @@ -990,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); - if (rc) - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); - dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -2358,11 +2352,9 @@ cc_digest_len_addr(void *drvdata, u32 mode) case DRV_HASH_SHA256: case DRV_HASH_MD5: return digest_len_addr; -#if (CC_DEV_SHA_MAX > 256) case DRV_HASH_SHA384: case DRV_HASH_SHA512: return digest_len_addr + sizeof(cc_digest_len_init); -#endif default: return digest_len_addr; /*to avoid kernel crash*/ } diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index dbc508fb719b..24c368b866f6 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = { int cc_pm_suspend(struct device *dev) { struct cc_drvdata *drvdata = dev_get_drvdata(dev); - int rc; dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); - rc = cc_suspend_req_queue(drvdata); - if (rc) { - dev_err(dev, "cc_suspend_req_queue (%x)\n", rc); - return rc; - } fini_cc_regs(drvdata); cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); cc_clk_off(drvdata); @@ -48,7 +42,7 @@ int cc_pm_resume(struct device *dev) dev_err(dev, "failed getting clock back on. We're toast.\n"); return rc; } - /* wait for Crytpcell reset completion */ + /* wait for Cryptocell reset completion */ if (!cc_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); return -EBUSY; @@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev) /* check if tee fips error occurred during power down */ cc_tee_handle_fips_error(drvdata); - rc = cc_resume_req_queue(drvdata); - if (rc) { - dev_err(dev, "cc_resume_req_queue (%x)\n", rc); - return rc; - } - - /* must be after the queue resuming as it uses the HW queue*/ cc_init_hash_sram(drvdata); return 0; @@ -80,28 +67,20 @@ int cc_pm_get(struct device *dev) int rc = 0; struct cc_drvdata *drvdata = dev_get_drvdata(dev); - if (cc_req_queue_suspended(drvdata)) + if (drvdata->pm_on) rc = pm_runtime_get_sync(dev); - else - pm_runtime_get_noresume(dev); - return rc; + return (rc == 1 ? 0 : rc); } -int cc_pm_put_suspend(struct device *dev) +void cc_pm_put_suspend(struct device *dev) { - int rc = 0; struct cc_drvdata *drvdata = dev_get_drvdata(dev); - if (!cc_req_queue_suspended(drvdata)) { + if (drvdata->pm_on) { pm_runtime_mark_last_busy(dev); - rc = pm_runtime_put_autosuspend(dev); - } else { - /* Something wrong happens*/ - dev_err(dev, "request to suspend already suspended queue"); - rc = -EBUSY; + pm_runtime_put_autosuspend(dev); } - return rc; } bool cc_pm_is_dev_suspended(struct device *dev) @@ -114,10 +93,10 @@ int cc_pm_init(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); - /* must be before the enabling to avoid resdundent suspending */ + /* must be before the enabling to avoid redundant suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); - /* activate the PM module */ + /* set us as active - note we won't do PM ops until cc_pm_go()! */ return pm_runtime_set_active(dev); } @@ -125,9 +104,11 @@ int cc_pm_init(struct cc_drvdata *drvdata) void cc_pm_go(struct cc_drvdata *drvdata) { pm_runtime_enable(drvdata_to_dev(drvdata)); + drvdata->pm_on = true; } void cc_pm_fini(struct cc_drvdata *drvdata) { pm_runtime_disable(drvdata_to_dev(drvdata)); + drvdata->pm_on = false; } diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index a7d98a5da2e1..80a18e11cae4 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -21,7 +21,7 @@ void cc_pm_fini(struct cc_drvdata *drvdata); int cc_pm_suspend(struct device *dev); int cc_pm_resume(struct device *dev); int cc_pm_get(struct device *dev); -int cc_pm_put_suspend(struct device *dev); +void cc_pm_put_suspend(struct device *dev); bool cc_pm_is_dev_suspended(struct device *dev); #else @@ -35,25 +35,12 @@ static inline void cc_pm_go(struct cc_drvdata *drvdata) {} static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} -static inline int cc_pm_suspend(struct device *dev) -{ - return 0; -} - -static inline int cc_pm_resume(struct device *dev) -{ - return 0; -} - static inline int cc_pm_get(struct device *dev) { return 0; } -static inline int cc_pm_put_suspend(struct device *dev) -{ - return 0; -} +static inline void cc_pm_put_suspend(struct device *dev) {} static inline bool cc_pm_is_dev_suspended(struct device *dev) { diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c index a947d5a2cf35..9d61e6f12478 100644 --- a/drivers/crypto/ccree/cc_request_mgr.c +++ b/drivers/crypto/ccree/cc_request_mgr.c @@ -41,7 +41,6 @@ struct cc_req_mgr_handle { #else struct tasklet_struct comptask; #endif - bool is_runtime_suspended; }; struct cc_bl_item { @@ -230,7 +229,7 @@ static int cc_queues_status(struct cc_drvdata *drvdata, struct device *dev = drvdata_to_dev(drvdata); /* SW queue is checked only once as it will not - * be chaned during the poll because the spinlock_bh + * be changed during the poll because the spinlock_bh * is held by the thread */ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == @@ -275,12 +274,11 @@ static int cc_queues_status(struct cc_drvdata *drvdata, * \param len The crypto sequence length * \param add_comp If "true": add an artificial dout DMA to mark completion * - * \return int Returns -EINPROGRESS or error code */ -static int cc_do_send_request(struct cc_drvdata *drvdata, - struct cc_crypto_req *cc_req, - struct cc_hw_desc *desc, unsigned int len, - bool add_comp) +static void cc_do_send_request(struct cc_drvdata *drvdata, + struct cc_crypto_req *cc_req, + struct cc_hw_desc *desc, unsigned int len, + bool add_comp) { struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; unsigned int used_sw_slots; @@ -303,8 +301,8 @@ static int cc_do_send_request(struct cc_drvdata *drvdata, /* * We are about to push command to the HW via the command registers - * that may refernece hsot memory. We need to issue a memory barrier - * to make sure there are no outstnading memory writes + * that may reference host memory. We need to issue a memory barrier + * to make sure there are no outstanding memory writes */ wmb(); @@ -328,9 +326,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata, /* Update the free slots in HW queue */ req_mgr_h->q_free_slots -= total_seq_len; } - - /* Operation still in process */ - return -EINPROGRESS; } static void cc_enqueue_backlog(struct cc_drvdata *drvdata, @@ -390,20 +385,15 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) return; } - rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, - bli->len, false); - + cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len, + false); spin_unlock(&mgr->hw_lock); - if (rc != -EINPROGRESS) { - cc_pm_put_suspend(dev); - creq->user_cb(dev, req, rc); - } - /* Remove ourselves from the backlog list */ spin_lock(&mgr->bl_lock); list_del(&bli->list); --mgr->bl_len; + kfree(bli); } spin_unlock(&mgr->bl_lock); @@ -422,7 +412,7 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, rc = cc_pm_get(dev); if (rc) { - dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); + dev_err(dev, "cc_pm_get returned %x\n", rc); return rc; } @@ -451,8 +441,10 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, return -EBUSY; } - if (!rc) - rc = cc_do_send_request(drvdata, cc_req, desc, len, false); + if (!rc) { + cc_do_send_request(drvdata, cc_req, desc, len, false); + rc = -EINPROGRESS; + } spin_unlock_bh(&mgr->hw_lock); return rc; @@ -472,7 +464,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, rc = cc_pm_get(dev); if (rc) { - dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); + dev_err(dev, "cc_pm_get returned %x\n", rc); return rc; } @@ -492,14 +484,8 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, reinit_completion(&drvdata->hw_queue_avail); } - rc = cc_do_send_request(drvdata, cc_req, desc, len, true); + cc_do_send_request(drvdata, cc_req, desc, len, true); spin_unlock_bh(&mgr->hw_lock); - - if (rc != -EINPROGRESS) { - cc_pm_put_suspend(dev); - return rc; - } - wait_for_completion(&cc_req->seq_compl); return 0; } @@ -532,8 +518,8 @@ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, /* * We are about to push command to the HW via the command registers - * that may refernece hsot memory. We need to issue a memory barrier - * to make sure there are no outstnading memory writes + * that may reference host memory. We need to issue a memory barrier + * to make sure there are no outstanding memory writes */ wmb(); enqueue_seq(drvdata, desc, len); @@ -668,7 +654,7 @@ static void comp_handler(unsigned long devarg) request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); } - /* after verifing that there is nothing to do, + /* after verifying that there is nothing to do, * unmask AXI completion interrupt */ cc_iowrite(drvdata, CC_REG(HOST_IMR), @@ -677,52 +663,3 @@ static void comp_handler(unsigned long devarg) cc_proc_backlog(drvdata); dev_dbg(dev, "Comp. handler done.\n"); } - -/* - * resume the queue configuration - no need to take the lock as this happens - * inside the spin lock protection - */ -#if defined(CONFIG_PM) -int cc_resume_req_queue(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - spin_lock_bh(&request_mgr_handle->hw_lock); - request_mgr_handle->is_runtime_suspended = false; - spin_unlock_bh(&request_mgr_handle->hw_lock); - - return 0; -} - -/* - * suspend the queue configuration. Since it is used for the runtime suspend - * only verify that the queue can be suspended. - */ -int cc_suspend_req_queue(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - /* lock the send_request */ - spin_lock_bh(&request_mgr_handle->hw_lock); - if (request_mgr_handle->req_queue_head != - request_mgr_handle->req_queue_tail) { - spin_unlock_bh(&request_mgr_handle->hw_lock); - return -EBUSY; - } - request_mgr_handle->is_runtime_suspended = true; - spin_unlock_bh(&request_mgr_handle->hw_lock); - - return 0; -} - -bool cc_req_queue_suspended(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - return request_mgr_handle->is_runtime_suspended; -} - -#endif diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h index f46cf766fe4d..ff7746aaaf35 100644 --- a/drivers/crypto/ccree/cc_request_mgr.h +++ b/drivers/crypto/ccree/cc_request_mgr.h @@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata); void cc_req_mgr_fini(struct cc_drvdata *drvdata); -#if defined(CONFIG_PM) -int cc_resume_req_queue(struct cc_drvdata *drvdata); - -int cc_suspend_req_queue(struct cc_drvdata *drvdata); - -bool cc_req_queue_suspended(struct cc_drvdata *drvdata); -#endif - #endif /*__REQUEST_MGR_H__*/ diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 91e424378217..f078b2686418 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -23,22 +23,22 @@ config CRYPTO_DEV_CHELSIO will be called chcr. config CHELSIO_IPSEC_INLINE - bool "Chelsio IPSec XFRM Tx crypto offload" - depends on CHELSIO_T4 + bool "Chelsio IPSec XFRM Tx crypto offload" + depends on CHELSIO_T4 depends on CRYPTO_DEV_CHELSIO - depends on XFRM_OFFLOAD - depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - default n - ---help--- - Enable support for IPSec Tx Inline. + depends on XFRM_OFFLOAD + depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD + default n + ---help--- + Enable support for IPSec Tx Inline. config CRYPTO_DEV_CHELSIO_TLS - tristate "Chelsio Crypto Inline TLS Driver" - depends on CHELSIO_T4 - depends on TLS_TOE - select CRYPTO_DEV_CHELSIO - ---help--- - Support Chelsio Inline TLS with Chelsio crypto accelerator. + tristate "Chelsio Crypto Inline TLS Driver" + depends on CHELSIO_T4 + depends on TLS_TOE + select CRYPTO_DEV_CHELSIO + ---help--- + Support Chelsio Inline TLS with Chelsio crypto accelerator. - To compile this driver as a module, choose M here: the module - will be called chtls. + To compile this driver as a module, choose M here: the module + will be called chtls. diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 1b4a5664e604..b4b9b22125d1 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - int err = 0; crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) & - CRYPTO_TFM_RES_MASK; - return err; + return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, @@ -912,7 +905,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -943,7 +935,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx) txq_perchan = ntxq / u_ctx->lldi.nchan; spin_lock(&ctx->dev->lock_chcr_dev); ctx->tx_chan_id = ctx->dev->tx_channel_id; - ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; + ctx->dev->tx_channel_id = + (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan; spin_unlock(&ctx->dev->lock_chcr_dev); rxq_idx = ctx->tx_chan_id * rxq_perchan; rxq_idx += id % rxq_perchan; @@ -2173,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -3195,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) aeadctx->mayverify = VERIFY_SW; break; default: - - crypto_tfm_set_flags((struct crypto_tfm *) tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3222,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3264,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3290,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3314,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; return chcr_ccm_common_setkey(aead, key, keylen); @@ -3329,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, int error; if (keylen < 3) { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3338,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; keylen -= 3; @@ -3362,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (ret) goto out; @@ -3380,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, } else if (keylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); pr_err("GCM: Invalid key length %d\n", keylen); ret = -EINVAL; goto out; @@ -3432,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } if (get_alg_config(¶m, max_authsize)) { pr_err("chcr : Unsupported digest size\n"); @@ -3562,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } + subtype = get_aead_subtype(authenc); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 029a7354f541..e937605670ac 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx) static int chcr_dev_move(struct uld_ctx *u_ctx) { - struct adapter *adap; - mutex_lock(&drv_data.drv_mutex); if (drv_data.last_dev == u_ctx) { if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) @@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx) list_move(&u_ctx->entry, &drv_data.inact_dev); if (list_empty(&drv_data.act_dev)) drv_data.last_dev = NULL; - adap = padap(&u_ctx->dev); - memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); atomic_dec(&drv_data.dev_count); mutex_unlock(&drv_data.drv_mutex); @@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void) static void __exit chcr_crypto_exit(void) { struct uld_ctx *u_ctx, *tmp; + struct adapter *adap; stop_crypto(); - cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); /* Remove all devices from list */ mutex_lock(&drv_data.drv_mutex); list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index d2bc655ab931..459442704eb1 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -179,7 +179,10 @@ struct chtls_hws { u32 copied_seq; u64 tx_seq_no; struct tls_scmd scmd; - struct tls12_crypto_info_aes_gcm_128 crypto_info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + } crypto_info; }; struct chtls_sock { @@ -482,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl, void chtls_tcp_push(struct sock *sk, int flags); int chtls_push_frames(struct chtls_sock *csk, int comp); int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val); -int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode); +int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type); void skb_entail(struct sock *sk, struct sk_buff *skb, int flags); unsigned int keyid_to_addr(int start_addr, int keyid); void free_tls_keyid(struct sock *sk); diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 72e5b0f65a91..9b2745ad9e38 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) return 0; } +static void chtls_purge_wr_queue(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = dequeue_wr(sk)) != NULL) + kfree_skb(skb); +} + static void chtls_release_resources(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); @@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk) kfree_skb(csk->txdata_skb_cache); csk->txdata_skb_cache = NULL; + if (csk->wr_credits != csk->wr_max_credits) { + chtls_purge_wr_queue(sk); + chtls_reset_wr_list(csk); + } + if (csk->l2t_entry) { cxgb4_l2t_release(csk->l2t_entry); csk->l2t_entry = NULL; @@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb) else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } + kfree_skb(skb); } static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb) @@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) kfree_skb(skb); } +/* + * Add an skb to the deferred skb queue for processing from process context. + */ +static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, + defer_handler_t handler) +{ + DEFERRED_SKB_CB(skb)->handler = handler; + spin_lock_bh(&cdev->deferq.lock); + __skb_queue_tail(&cdev->deferq, skb); + if (skb_queue_len(&cdev->deferq) == 1) + schedule_work(&cdev->deferq_task); + spin_unlock_bh(&cdev->deferq.lock); +} + static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, struct chtls_dev *cdev, int status, int queue) { @@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, if (!reply_skb) { req->status = (queue << 1); - send_defer_abort_rpl(cdev, skb); + t4_defer_reply(skb, cdev, send_defer_abort_rpl); return; } @@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); } -/* - * Add an skb to the deferred skb queue for processing from process context. - */ -static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, - defer_handler_t handler) -{ - DEFERRED_SKB_CB(skb)->handler = handler; - spin_lock_bh(&cdev->deferq.lock); - __skb_queue_tail(&cdev->deferq, skb); - if (skb_queue_len(&cdev->deferq) == 1) - schedule_work(&cdev->deferq_task); - spin_unlock_bh(&cdev->deferq.lock); -} - static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, struct chtls_dev *cdev, int status, int queue) @@ -2062,19 +2076,6 @@ rel_skb: return 0; } -static struct sk_buff *dequeue_wr(struct sock *sk) -{ - struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); - struct sk_buff *skb = csk->wr_skb_head; - - if (likely(skb)) { - /* Don't bother clearing the tail */ - csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; - WR_SKB_CB(skb)->next_wr = NULL; - } - return skb; -} - static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) { struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h index 129d7ac649a9..3fac0c74a41f 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h @@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } +static inline void chtls_reset_wr_list(struct chtls_sock *csk) +{ + csk->wr_skb_head = NULL; + csk->wr_skb_tail = NULL; +} + static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) { WR_SKB_CB(skb)->next_wr = NULL; @@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb; csk->wr_skb_tail = skb; } + +static inline struct sk_buff *dequeue_wr(struct sock *sk) +{ + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); + struct sk_buff *skb = NULL; + + skb = csk->wr_skb_head; + + if (likely(skb)) { + /* Don't bother clearing the tail */ + csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; + WR_SKB_CB(skb)->next_wr = NULL; + } + return skb; +} #endif diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index 2a34035d3cfb..f1820aca0d33 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -208,28 +208,53 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx) static int chtls_key_info(struct chtls_sock *csk, struct _key_ctx *kctx, - u32 keylen, u32 optname) + u32 keylen, u32 optname, + int cipher_type) { - unsigned char key[AES_KEYSIZE_128]; - struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; + unsigned char key[AES_MAX_KEY_SIZE]; + unsigned char *key_p, *salt; unsigned char ghash_h[AEAD_H_SIZE]; - int ck_size, key_ctx_size; + int ck_size, key_ctx_size, kctx_mackey_size, salt_size; struct crypto_aes_ctx aes; int ret; - gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *) - &csk->tlshws.crypto_info; - key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + AEAD_H_SIZE; - if (keylen == AES_KEYSIZE_128) { - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; - } else { + /* GCM mode of AES supports 128 and 256 bit encryption, so + * prepare key context base on GCM cipher type + */ + switch (cipher_type) { + case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 = + (struct tls12_crypto_info_aes_gcm_128 *) + &csk->tlshws.crypto_info; + memcpy(key, gcm_ctx_128->key, keylen); + + key_p = gcm_ctx_128->key; + salt = gcm_ctx_128->salt; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; + salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; + kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; + break; + } + case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 = + (struct tls12_crypto_info_aes_gcm_256 *) + &csk->tlshws.crypto_info; + memcpy(key, gcm_ctx_256->key, keylen); + + key_p = gcm_ctx_256->key; + salt = gcm_ctx_256->salt; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; + salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE; + kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; + break; + } + default: pr_err("GCM: Invalid key length %d\n", keylen); return -EINVAL; } - memcpy(key, gcm_ctx->key, keylen); /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context @@ -249,20 +274,20 @@ static int chtls_key_info(struct chtls_sock *csk, key_ctx = ((key_ctx_size >> 4) << 3); kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size, - CHCR_KEYCTX_MAC_KEY_SIZE_128, + kctx_mackey_size, 0, 0, key_ctx); chtls_rxkey_ivauth(kctx); } else { kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size, - CHCR_KEYCTX_MAC_KEY_SIZE_128, + kctx_mackey_size, 0, 0, key_ctx_size >> 4); } - memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); - memcpy(kctx->key, gcm_ctx->key, keylen); + memcpy(kctx->salt, salt, salt_size); + memcpy(kctx->key, key_p, keylen); memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE); /* erase key info from driver */ - memset(gcm_ctx->key, 0, keylen); + memset(key_p, 0, keylen); return 0; } @@ -288,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk) SCMD_TLS_FRAG_ENABLE_V(1); } -int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) +int chtls_setkey(struct chtls_sock *csk, u32 keylen, + u32 optname, int cipher_type) { struct tls_key_req *kwr; struct chtls_dev *cdev; @@ -350,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); kwr->sc_imm.len = cpu_to_be32(klen); + lock_sock(sk); /* key info */ kctx = (struct _key_ctx *)(kwr + 1); - ret = chtls_key_info(csk, kctx, keylen, optname); + ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type); if (ret) goto out_notcb; @@ -388,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) csk->tlshws.txkey = keyid; } + release_sock(sk); return ret; out_notcb: + release_sock(sk); free_tls_keyid(sk); out_nokey: kfree_skb(skb); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 18996935d8ba..a038de90b2ea 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) { struct chtls_listen *clisten; - int err; if (sk->sk_protocol != IPPROTO_TCP) return -EPROTONOSUPPORT; @@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) clisten->cdev = cdev; clisten->sk = sk; mutex_lock(¬ify_mutex); - err = raw_notifier_call_chain(&listen_notify_list, + raw_notifier_call_chain(&listen_notify_list, CHTLS_LISTEN_START, clisten); mutex_unlock(¬ify_mutex); - return err; + return 0; } static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) @@ -486,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, struct tls_crypto_info *crypto_info, tmp_crypto_info; struct chtls_sock *csk; int keylen; + int cipher_type; int rc = 0; csk = rcu_dereference_sk_user_data(sk); @@ -509,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; + /* GCM mode of AES supports 128 and 256 bit encryption, so + * copy keys from user based on GCM cipher type. + */ switch (tmp_crypto_info.cipher_type) { case TLS_CIPHER_AES_GCM_128: { /* Obtain version and type from previous copy */ @@ -525,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; - rc = chtls_setkey(csk, keylen, optname); + cipher_type = TLS_CIPHER_AES_GCM_128; + break; + } + case TLS_CIPHER_AES_GCM_256: { + crypto_info[0] = tmp_crypto_info; + rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), + optval + sizeof(*crypto_info), + sizeof(struct tls12_crypto_info_aes_gcm_256) + - sizeof(*crypto_info)); + + if (rc) { + rc = -EFAULT; + goto out; + } + + keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE; + cipher_type = TLS_CIPHER_AES_GCM_256; break; } default: rc = -EINVAL; goto out; } + rc = chtls_setkey(csk, keylen, optname, cipher_type); out: return rc; } diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 73a899e6f837..f4f18bfc2247 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -110,7 +110,6 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, unsigned int len) { struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - unsigned int ret; tctx->keylen = len; @@ -119,11 +118,9 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, return 0; } - if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) /* not supported at all */ - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; - } /* * The requested key size is not supported by HW, do a fallback @@ -132,20 +129,13 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, tctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(tctx->fallback.cip, key, len); - if (ret) { - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags & - CRYPTO_TFM_RES_MASK); - } - return ret; + return crypto_cipher_setkey(tctx->fallback.cip, key, len); } static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); - unsigned int ret; tctx->keylen = len; @@ -154,11 +144,9 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, return 0; } - if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { + if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) /* not supported at all */ - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; - } /* * The requested key size is not supported by HW, do a fallback @@ -168,11 +156,7 @@ static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, crypto_skcipher_set_flags(tctx->fallback.skcipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); - ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); - crypto_skcipher_set_flags(tfm, - crypto_skcipher_get_flags(tctx->fallback.skcipher) & - CRYPTO_TFM_RES_MASK); - return ret; + return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); } static void diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 4e7323884ae3..354836468c5d 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2507,7 +2507,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) addr = pci_resource_start(pdev, i); size = pci_resource_len(pdev, i); - dev->bar[i] = ioremap_nocache(addr, size); + dev->bar[i] = ioremap(addr, size); if (!dev->bar[i]) { err = -ENOMEM; goto err_out_unmap_bars; diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index c0e7a85fe129..8851161f722f 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -16,16 +16,22 @@ config CRYPTO_DEV_HISI_SEC config CRYPTO_DEV_HISI_SEC2 tristate "Support for HiSilicon SEC2 crypto block cipher accelerator" - select CRYPTO_BLKCIPHER + select CRYPTO_SKCIPHER select CRYPTO_ALGAPI select CRYPTO_LIB_DES select CRYPTO_DEV_HISI_QM + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_HMAC + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 depends on PCI && PCI_MSI depends on ARM64 || (COMPILE_TEST && 64BIT) help Support for HiSilicon SEC Engine of version 2 in crypto subsystem. It provides AES, SM4, and 3DES algorithms with ECB - CBC, and XTS cipher mode. + CBC, and XTS cipher mode, and AEAD algorithms. To compile this as a module, choose M here: the module will be called hisi_sec2. @@ -44,7 +50,6 @@ config CRYPTO_DEV_HISI_ZIP depends on ARM64 || (COMPILE_TEST && 64BIT) depends on !CPU_BIG_ENDIAN || COMPILE_TEST select CRYPTO_DEV_HISI_QM - select SG_SPLIT help Support for HiSilicon ZIP Driver diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 98f037e6ea3e..5d400d69e8e4 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -123,7 +123,7 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) ctx = hpre_req->ctx; id = hpre_alloc_req_id(ctx); - if (id < 0) + if (unlikely(id < 0)) return -EINVAL; ctx->req_list[id] = hpre_req; @@ -174,8 +174,8 @@ static struct hisi_qp *hpre_get_qp_and_start(void) } static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, - struct scatterlist *data, unsigned int len, - int is_src, dma_addr_t *tmp) + struct scatterlist *data, unsigned int len, + int is_src, dma_addr_t *tmp) { struct hpre_ctx *ctx = hpre_req->ctx; struct device *dev = HPRE_DEV(ctx); @@ -190,7 +190,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, } *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir); - if (dma_mapping_error(dev, *tmp)) { + if (unlikely(dma_mapping_error(dev, *tmp))) { dev_err(dev, "dma map data err!\n"); return -ENOMEM; } @@ -199,8 +199,8 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, } static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, - struct scatterlist *data, unsigned int len, - int is_src, dma_addr_t *tmp) + struct scatterlist *data, unsigned int len, + int is_src, dma_addr_t *tmp) { struct hpre_ctx *ctx = hpre_req->ctx; struct device *dev = HPRE_DEV(ctx); @@ -208,11 +208,11 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, int shift; shift = ctx->key_sz - len; - if (shift < 0) + if (unlikely(shift < 0)) return -EINVAL; ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); - if (!ptr) + if (unlikely(!ptr)) return -ENOMEM; if (is_src) { @@ -226,12 +226,12 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, } static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, - struct scatterlist *data, unsigned int len, - int is_src, int is_dh) + struct scatterlist *data, unsigned int len, + int is_src, int is_dh) { struct hpre_sqe *msg = &hpre_req->req; struct hpre_ctx *ctx = hpre_req->ctx; - dma_addr_t tmp; + dma_addr_t tmp = 0; int ret; /* when the data is dh's source, we should format it */ @@ -241,7 +241,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, else ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp); - if (ret) + if (unlikely(ret)) return ret; if (is_src) @@ -253,15 +253,16 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, } static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, - struct hpre_asym_request *req, - struct scatterlist *dst, struct scatterlist *src) + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) { struct device *dev = HPRE_DEV(ctx); struct hpre_sqe *sqe = &req->req; dma_addr_t tmp; tmp = le64_to_cpu(sqe->in); - if (!tmp) + if (unlikely(!tmp)) return; if (src) { @@ -274,7 +275,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, } tmp = le64_to_cpu(sqe->out); - if (!tmp) + if (unlikely(!tmp)) return; if (req->dst) { @@ -288,7 +289,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, } static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, - void **kreq) + void **kreq) { struct hpre_asym_request *req; int err, id, done; @@ -308,7 +309,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & HREE_SQE_DONE_MASK; - if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE) + if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) return 0; return -EINVAL; @@ -375,7 +376,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp) struct hpre_ctx *ctx = qp->qp_ctx; struct hpre_sqe *sqe = resp; - ctx->req_list[sqe->tag]->cb(ctx, resp); + ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp); } static int hpre_ctx_init(struct hpre_ctx *ctx) @@ -454,33 +455,30 @@ static int hpre_dh_compute_value(struct kpp_request *req) int ctr = 0; int ret; - if (!ctx) - return -EINVAL; - ret = hpre_msg_request_set(ctx, req, false); - if (ret) + if (unlikely(ret)) return ret; if (req->src) { ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1); - if (ret) + if (unlikely(ret)) goto clear_all; } ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1); - if (ret) + if (unlikely(ret)) goto clear_all; if (ctx->crt_g2_mode && !req->src) - msg->dw0 |= HPRE_ALG_DH_G2; + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); else - msg->dw0 |= HPRE_ALG_DH; + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); do { ret = hisi_qp_send(ctx->qp, msg); } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); /* success */ - if (!ret) + if (likely(!ret)) return -EINPROGRESS; clear_all: @@ -520,12 +518,12 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) return -EINVAL; if (hpre_is_dh_params_length_valid(params->p_size << - HPRE_BITS_2_BYTES_SHIFT)) + HPRE_BITS_2_BYTES_SHIFT)) return -EINVAL; sz = ctx->key_sz = params->p_size; ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, - &ctx->dh.dma_xa_p, GFP_KERNEL); + &ctx->dh.dma_xa_p, GFP_KERNEL); if (!ctx->dh.xa_p) return -ENOMEM; @@ -559,13 +557,12 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) hisi_qm_stop_qp(ctx->qp); if (ctx->dh.g) { - memset(ctx->dh.g, 0, sz); dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); ctx->dh.g = NULL; } if (ctx->dh.xa_p) { - memset(ctx->dh.xa_p, 0, sz); + memzero_explicit(ctx->dh.xa_p, sz); dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, ctx->dh.dma_xa_p); ctx->dh.xa_p = NULL; @@ -661,9 +658,6 @@ static int hpre_rsa_enc(struct akcipher_request *req) int ctr = 0; int ret; - if (!ctx) - return -EINVAL; - /* For 512 and 1536 bits key size, use soft tfm instead */ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { @@ -673,22 +667,22 @@ static int hpre_rsa_enc(struct akcipher_request *req) return ret; } - if (!ctx->rsa.pubkey) + if (unlikely(!ctx->rsa.pubkey)) return -EINVAL; ret = hpre_msg_request_set(ctx, req, true); - if (ret) + if (unlikely(ret)) return ret; - msg->dw0 |= HPRE_ALG_NC_NCRT; + msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT); msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey); ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); - if (ret) + if (unlikely(ret)) goto clear_all; ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); - if (ret) + if (unlikely(ret)) goto clear_all; do { @@ -696,7 +690,7 @@ static int hpre_rsa_enc(struct akcipher_request *req) } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); /* success */ - if (!ret) + if (likely(!ret)) return -EINPROGRESS; clear_all: @@ -716,9 +710,6 @@ static int hpre_rsa_dec(struct akcipher_request *req) int ctr = 0; int ret; - if (!ctx) - return -EINVAL; - /* For 512 and 1536 bits key size, use soft tfm instead */ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { @@ -728,27 +719,29 @@ static int hpre_rsa_dec(struct akcipher_request *req) return ret; } - if (!ctx->rsa.prikey) + if (unlikely(!ctx->rsa.prikey)) return -EINVAL; ret = hpre_msg_request_set(ctx, req, true); - if (ret) + if (unlikely(ret)) return ret; if (ctx->crt_g2_mode) { msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey); - msg->dw0 |= HPRE_ALG_NC_CRT; + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | + HPRE_ALG_NC_CRT); } else { msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey); - msg->dw0 |= HPRE_ALG_NC_NCRT; + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | + HPRE_ALG_NC_NCRT); } ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); - if (ret) + if (unlikely(ret)) goto clear_all; ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); - if (ret) + if (unlikely(ret)) goto clear_all; do { @@ -756,7 +749,7 @@ static int hpre_rsa_dec(struct akcipher_request *req) } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); /* success */ - if (!ret) + if (likely(!ret)) return -EINPROGRESS; clear_all: @@ -811,10 +804,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value, hpre_rsa_drop_leading_zeros(&ptr, &vlen); - if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { - ctx->rsa.pubkey = NULL; + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) return -EINVAL; - } memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen); @@ -836,17 +827,17 @@ static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value, return 0; } -static int hpre_crt_para_get(char *para, const char *raw, - unsigned int raw_sz, unsigned int para_size) +static int hpre_crt_para_get(char *para, size_t para_sz, + const char *raw, size_t raw_sz) { const char *ptr = raw; size_t len = raw_sz; hpre_rsa_drop_leading_zeros(&ptr, &len); - if (!len || len > para_size) + if (!len || len > para_sz) return -EINVAL; - memcpy(para + para_size - len, ptr, len); + memcpy(para + para_sz - len, ptr, len); return 0; } @@ -864,32 +855,32 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key) if (!ctx->rsa.crt_prikey) return -ENOMEM; - ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq, - rsa_key->dq_sz, hlf_ksz); + ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz, + rsa_key->dq, rsa_key->dq_sz); if (ret) goto free_key; offset = hlf_ksz; - ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp, - rsa_key->dp_sz, hlf_ksz); + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->dp, rsa_key->dp_sz); if (ret) goto free_key; offset = hlf_ksz * HPRE_CRT_Q; - ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, - rsa_key->q, rsa_key->q_sz, hlf_ksz); + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->q, rsa_key->q_sz); if (ret) goto free_key; offset = hlf_ksz * HPRE_CRT_P; - ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, - rsa_key->p, rsa_key->p_sz, hlf_ksz); + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->p, rsa_key->p_sz); if (ret) goto free_key; offset = hlf_ksz * HPRE_CRT_INV; - ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, - rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz); + ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, + rsa_key->qinv, rsa_key->qinv_sz); if (ret) goto free_key; @@ -899,7 +890,7 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key) free_key: offset = hlf_ksz * HPRE_CRT_PRMS; - memset(ctx->rsa.crt_prikey, 0, offset); + memzero_explicit(ctx->rsa.crt_prikey, offset); dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); ctx->rsa.crt_prikey = NULL; @@ -924,14 +915,15 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) } if (ctx->rsa.crt_prikey) { - memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS); + memzero_explicit(ctx->rsa.crt_prikey, + half_key_sz * HPRE_CRT_PRMS); dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); ctx->rsa.crt_prikey = NULL; } if (ctx->rsa.prikey) { - memset(ctx->rsa.prikey, 0, ctx->key_sz); + memzero_explicit(ctx->rsa.prikey, ctx->key_sz); dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, ctx->rsa.dma_prikey); ctx->rsa.prikey = NULL; @@ -1043,6 +1035,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); if (IS_ERR(ctx->rsa.soft_tfm)) { @@ -1050,7 +1043,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->rsa.soft_tfm); } - return hpre_ctx_init(ctx); + ret = hpre_ctx_init(ctx); + if (ret) + crypto_free_akcipher(ctx->rsa.soft_tfm); + + return ret; } static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 34e0424410bf..401747de67a8 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -73,7 +73,7 @@ #define HPRE_DBGFS_VAL_MAX_LEN 20 #define HPRE_PCI_DEVICE_ID 0xa258 #define HPRE_PCI_VF_DEVICE_ID 0xa259 -#define HPRE_ADDR(qm, offset) (qm->io_base + (offset)) +#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset)) #define HPRE_QM_USR_CFG_MASK 0xfffffffe #define HPRE_QM_AXI_CFG_MASK 0xffff #define HPRE_QM_VFG_AX_MASK 0xff @@ -106,18 +106,18 @@ static const char * const hpre_debug_file_name[] = { }; static const struct hpre_hw_error hpre_hw_errors[] = { - { .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" }, - { .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" }, - { .int_msk = BIT(2), .msg = "hpre_data_wr_err" }, - { .int_msk = BIT(3), .msg = "hpre_data_rd_err" }, - { .int_msk = BIT(4), .msg = "hpre_bd_rd_err" }, - { .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" }, - { .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" }, - { .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" }, - { .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" }, - { .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" }, - { .int_msk = GENMASK(15, 10), .msg = "hpre_ooo_rdrsp_err" }, - { .int_msk = GENMASK(21, 16), .msg = "hpre_ooo_wrrsp_err" }, + { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" }, + { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" }, + { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" }, + { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" }, + { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" }, + { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" }, + { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" }, + { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" }, + { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" }, + { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" }, + { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" }, + { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" }, { /* sentinel */ } }; @@ -490,7 +490,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, return -EINVAL; } spin_unlock_irq(&file->lock); - ret = sprintf(tbuf, "%u\n", val); + ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val); return simple_read_from_buffer(buf, count, pos, tbuf, ret); } @@ -557,7 +557,7 @@ static const struct file_operations hpre_ctrl_debug_fops = { static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { - struct dentry *tmp, *file_dir; + struct dentry *file_dir; if (dir) file_dir = dir; @@ -571,10 +571,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, dbg->files[indx].debug = dbg; dbg->files[indx].type = type; dbg->files[indx].index = indx; - tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir, - dbg->files + indx, &hpre_ctrl_debug_fops); - if (!tmp) - return -ENOENT; + debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir, + dbg->files + indx, &hpre_ctrl_debug_fops); return 0; } @@ -585,7 +583,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; - struct dentry *tmp; regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) @@ -595,10 +592,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug) regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); regset->base = qm->io_base; - tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset); - if (!tmp) - return -ENOENT; - + debugfs_create_regset32("regs", 0444, debug->debug_root, regset); return 0; } @@ -609,15 +603,14 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) struct device *dev = &qm->pdev->dev; char buf[HPRE_DBGFS_VAL_MAX_LEN]; struct debugfs_regset32 *regset; - struct dentry *tmp_d, *tmp; + struct dentry *tmp_d; int i, ret; for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { - sprintf(buf, "cluster%d", i); - + ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); + if (ret < 0) + return -EINVAL; tmp_d = debugfs_create_dir(buf, debug->debug_root); - if (!tmp_d) - return -ENOENT; regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) @@ -627,9 +620,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs); regset->base = qm->io_base + hpre_cluster_offsets[i]; - tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset); - if (!tmp) - return -ENOENT; + debugfs_create_regset32("regs", 0444, tmp_d, regset); ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL, i + HPRE_CLUSTER_CTRL); if (ret) @@ -668,9 +659,6 @@ static int hpre_debugfs_init(struct hpre *hpre) int ret; dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); - if (!dir) - return -ENOENT; - qm->debug.debug_root = dir; ret = hisi_qm_debug_init(qm); @@ -1014,8 +1002,6 @@ static void hpre_register_debugfs(void) return; hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL); - if (IS_ERR_OR_NULL(hpre_debugfs_root)) - hpre_debugfs_root = NULL; } static void hpre_unregister_debugfs(void) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index b846d73d9a85..13e2d8d7be94 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -9,10 +9,12 @@ #include "../qm.h" #include "sec_crypto.h" -/* Cipher resource per hardware SEC queue */ -struct sec_cipher_res { +/* Algorithm resource per hardware SEC queue */ +struct sec_alg_res { u8 *c_ivin; dma_addr_t c_ivin_dma; + u8 *out_mac; + dma_addr_t out_mac_dma; }; /* Cipher request of SEC private */ @@ -21,33 +23,35 @@ struct sec_cipher_req { dma_addr_t c_in_dma; struct hisi_acc_hw_sgl *c_out; dma_addr_t c_out_dma; - u8 *c_ivin; - dma_addr_t c_ivin_dma; struct skcipher_request *sk_req; u32 c_len; bool encrypt; }; +struct sec_aead_req { + u8 *out_mac; + dma_addr_t out_mac_dma; + struct aead_request *aead_req; +}; + /* SEC request of Crypto */ struct sec_req { struct sec_sqe sec_sqe; struct sec_ctx *ctx; struct sec_qp_ctx *qp_ctx; - /* Cipher supported only at present */ struct sec_cipher_req c_req; + struct sec_aead_req aead_req; + int err_type; int req_id; /* Status of the SEC request */ - atomic_t fake_busy; + bool fake_busy; }; /** * struct sec_req_op - Operations for SEC request - * @get_res: Get resources for TFM on the SEC device - * @resource_alloc: Allocate resources for queue context on the SEC device - * @resource_free: Free resources for queue context on the SEC device * @buf_map: DMA map the SGL buffers of the request * @buf_unmap: DMA unmap the SGL buffers of the request * @bd_fill: Fill the SEC queue BD @@ -56,18 +60,25 @@ struct sec_req { * @process: Main processing logic of Skcipher */ struct sec_req_op { - int (*get_res)(struct sec_ctx *ctx, struct sec_req *req); - int (*resource_alloc)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx); - void (*resource_free)(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx); int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req); void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req); void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req); int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req); int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req); - void (*callback)(struct sec_ctx *ctx, struct sec_req *req); + void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err); int (*process)(struct sec_ctx *ctx, struct sec_req *req); }; +/* SEC auth context */ +struct sec_auth_ctx { + dma_addr_t a_key_dma; + u8 *a_key; + u8 a_key_len; + u8 mac_len; + u8 a_alg; + struct crypto_shash *hash_tfm; +}; + /* SEC cipher context which cipher's relatives */ struct sec_cipher_ctx { u8 *c_key; @@ -83,9 +94,9 @@ struct sec_cipher_ctx { /* SEC queue context which defines queue's relatives */ struct sec_qp_ctx { struct hisi_qp *qp; - struct sec_req **req_list; + struct sec_req *req_list[QM_Q_DEPTH]; struct idr req_idr; - void *alg_meta_data; + struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; struct mutex req_lock; struct hisi_acc_sgl_pool *c_in_pool; @@ -93,6 +104,11 @@ struct sec_qp_ctx { atomic_t pending_reqs; }; +enum sec_alg_type { + SEC_SKCIPHER, + SEC_AEAD +}; + /* SEC Crypto TFM context which defines queue and cipher .etc relatives */ struct sec_ctx { struct sec_qp_ctx *qp_ctx; @@ -110,7 +126,10 @@ struct sec_ctx { /* Currrent cyclic index to select a queue for decipher */ atomic_t dec_qcyclic; + + enum sec_alg_type alg_type; struct sec_cipher_ctx c_ctx; + struct sec_auth_ctx a_ctx; }; enum sec_endian { diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 0a5391fff485..a2cfcc9ccd94 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -3,7 +3,11 @@ #include <crypto/aes.h> #include <crypto/algapi.h> +#include <crypto/authenc.h> #include <crypto/des.h> +#include <crypto/hash.h> +#include <crypto/internal/aead.h> +#include <crypto/sha.h> #include <crypto/skcipher.h> #include <crypto/xts.h> #include <linux/crypto.h> @@ -27,6 +31,10 @@ #define SEC_SRC_SGL_OFFSET 7 #define SEC_CKEY_OFFSET 9 #define SEC_CMODE_OFFSET 12 +#define SEC_AKEY_OFFSET 5 +#define SEC_AEAD_ALG_OFFSET 11 +#define SEC_AUTH_OFFSET 6 + #define SEC_FLAG_OFFSET 7 #define SEC_FLAG_MASK 0x0780 #define SEC_TYPE_MASK 0x0F @@ -35,12 +43,19 @@ #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) #define SEC_SGL_SGE_NR 128 #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) +#define SEC_CIPHER_AUTH 0xfe +#define SEC_AUTH_CIPHER 0x1 +#define SEC_MAX_MAC_LEN 64 +#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) +#define SEC_SQE_LEN_RATE 4 +#define SEC_SQE_CFLAG 2 +#define SEC_SQE_AEAD_FLAG 3 +#define SEC_SQE_DONE 0x1 -static DEFINE_MUTEX(sec_algs_lock); -static unsigned int sec_active_devs; +static atomic_t sec_active_devs; /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ -static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req) +static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) { if (req->c_req.encrypt) return (u32)atomic_inc_return(&ctx->enc_qcyclic) % @@ -50,7 +65,7 @@ static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req) ctx->hlf_q_num; } -static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req) +static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) { if (req->c_req.encrypt) atomic_dec(&ctx->enc_qcyclic); @@ -67,7 +82,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); mutex_unlock(&qp_ctx->req_lock); - if (req_id < 0) { + if (unlikely(req_id < 0)) { dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); return req_id; } @@ -82,7 +97,7 @@ static void sec_free_req_id(struct sec_req *req) struct sec_qp_ctx *qp_ctx = req->qp_ctx; int req_id = req->req_id; - if (req_id < 0 || req_id >= QM_Q_DEPTH) { + if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); return; } @@ -95,36 +110,66 @@ static void sec_free_req_id(struct sec_req *req) mutex_unlock(&qp_ctx->req_lock); } +static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx) +{ + struct aead_request *aead_req = req->aead_req.aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + u8 *mac_out = qp_ctx->res[req->req_id].out_mac; + size_t authsize = crypto_aead_authsize(tfm); + u8 *mac = mac_out + SEC_MAX_MAC_LEN; + struct scatterlist *sgl = aead_req->src; + size_t sz; + + sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize, + aead_req->cryptlen + aead_req->assoclen - + authsize); + if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { + dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); + return -EBADMSG; + } + + return 0; +} + static void sec_req_cb(struct hisi_qp *qp, void *resp) { struct sec_qp_ctx *qp_ctx = qp->qp_ctx; struct sec_sqe *bd = resp; + struct sec_ctx *ctx; + struct sec_req *req; u16 done, flag; + int err = 0; u8 type; - struct sec_req *req; type = bd->type_cipher_auth & SEC_TYPE_MASK; - if (type == SEC_BD_TYPE2) { - req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; - req->err_type = bd->type2.error_type; - - done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; - flag = (le16_to_cpu(bd->type2.done_flag) & - SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; - if (req->err_type || done != 0x1 || flag != 0x2) - dev_err(SEC_CTX_DEV(req->ctx), - "err_type[%d],done[%d],flag[%d]\n", - req->err_type, done, flag); - } else { + if (unlikely(type != SEC_BD_TYPE2)) { pr_err("err bd type [%d]\n", type); return; } - atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt); + req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; + req->err_type = bd->type2.error_type; + ctx = req->ctx; + done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; + flag = (le16_to_cpu(bd->type2.done_flag) & + SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; + if (unlikely(req->err_type || done != SEC_SQE_DONE || + (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || + (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { + dev_err(SEC_CTX_DEV(ctx), + "err_type[%d],done[%d],flag[%d]\n", + req->err_type, done, flag); + err = -EIO; + } + + if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) + err = sec_aead_verify(req, qp_ctx); - req->ctx->req_op->buf_unmap(req->ctx, req); + atomic64_inc(&ctx->sec->debug.dfx.recv_cnt); - req->ctx->req_op->callback(req->ctx, req); + ctx->req_op->buf_unmap(ctx, req); + + ctx->req_op->callback(ctx, req, err); } static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) @@ -137,11 +182,11 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) mutex_unlock(&qp_ctx->req_lock); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); - if (ret == -EBUSY) + if (unlikely(ret == -EBUSY)) return -ENOBUFS; if (!ret) { - if (atomic_read(&req->fake_busy)) + if (req->fake_busy) ret = -EBUSY; else ret = -EINPROGRESS; @@ -150,6 +195,91 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) return ret; } +/* Get DMA memory resources */ +static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) +{ + int i; + + res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, + &res->c_ivin_dma, GFP_KERNEL); + if (!res->c_ivin) + return -ENOMEM; + + for (i = 1; i < QM_Q_DEPTH; i++) { + res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; + res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; + } + + return 0; +} + +static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) +{ + if (res->c_ivin) + dma_free_coherent(dev, SEC_TOTAL_IV_SZ, + res->c_ivin, res->c_ivin_dma); +} + +static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) +{ + int i; + + res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, + &res->out_mac_dma, GFP_KERNEL); + if (!res->out_mac) + return -ENOMEM; + + for (i = 1; i < QM_Q_DEPTH; i++) { + res[i].out_mac_dma = res->out_mac_dma + + i * (SEC_MAX_MAC_LEN << 1); + res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); + } + + return 0; +} + +static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) +{ + if (res->out_mac) + dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, + res->out_mac, res->out_mac_dma); +} + +static int sec_alg_resource_alloc(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct device *dev = SEC_CTX_DEV(ctx); + struct sec_alg_res *res = qp_ctx->res; + int ret; + + ret = sec_alloc_civ_resource(dev, res); + if (ret) + return ret; + + if (ctx->alg_type == SEC_AEAD) { + ret = sec_alloc_mac_resource(dev, res); + if (ret) + goto get_fail; + } + + return 0; +get_fail: + sec_free_civ_resource(dev, res); + + return ret; +} + +static void sec_alg_resource_free(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + struct device *dev = SEC_CTX_DEV(ctx); + + sec_free_civ_resource(dev, qp_ctx->res); + + if (ctx->alg_type == SEC_AEAD) + sec_free_mac_resource(dev, qp_ctx->res); +} + static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, int qp_ctx_id, int alg_type) { @@ -173,15 +303,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, atomic_set(&qp_ctx->pending_reqs, 0); idr_init(&qp_ctx->req_idr); - qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC); - if (!qp_ctx->req_list) - goto err_destroy_idr; - qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, SEC_SGL_SGE_NR); if (IS_ERR(qp_ctx->c_in_pool)) { dev_err(dev, "fail to create sgl pool for input!\n"); - goto err_free_req_list; + goto err_destroy_idr; } qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, @@ -191,7 +317,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, goto err_free_c_in_pool; } - ret = ctx->req_op->resource_alloc(ctx, qp_ctx); + ret = sec_alg_resource_alloc(ctx, qp_ctx); if (ret) goto err_free_c_out_pool; @@ -202,13 +328,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, return 0; err_queue_free: - ctx->req_op->resource_free(ctx, qp_ctx); + sec_alg_resource_free(ctx, qp_ctx); err_free_c_out_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); err_free_c_in_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); -err_free_req_list: - kfree(qp_ctx->req_list); err_destroy_idr: idr_destroy(&qp_ctx->req_idr); hisi_qm_release_qp(qp); @@ -222,66 +346,42 @@ static void sec_release_qp_ctx(struct sec_ctx *ctx, struct device *dev = SEC_CTX_DEV(ctx); hisi_qm_stop_qp(qp_ctx->qp); - ctx->req_op->resource_free(ctx, qp_ctx); + sec_alg_resource_free(ctx, qp_ctx); hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); idr_destroy(&qp_ctx->req_idr); - kfree(qp_ctx->req_list); hisi_qm_release_qp(qp_ctx->qp); } -static int sec_skcipher_init(struct crypto_skcipher *tfm) +static int sec_ctx_base_init(struct sec_ctx *ctx) { - struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); - struct sec_cipher_ctx *c_ctx; struct sec_dev *sec; - struct device *dev; - struct hisi_qm *qm; int i, ret; - crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); - sec = sec_find_device(cpu_to_node(smp_processor_id())); if (!sec) { - pr_err("find no Hisilicon SEC device!\n"); + pr_err("Can not find proper Hisilicon SEC device!\n"); return -ENODEV; } ctx->sec = sec; - qm = &sec->qm; - dev = &qm->pdev->dev; - ctx->hlf_q_num = sec->ctx_q_num >> 0x1; + ctx->hlf_q_num = sec->ctx_q_num >> 1; /* Half of queue depth is taken as fake requests limit in the queue. */ - ctx->fake_req_limit = QM_Q_DEPTH >> 0x1; + ctx->fake_req_limit = QM_Q_DEPTH >> 1; ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), GFP_KERNEL); if (!ctx->qp_ctx) return -ENOMEM; for (i = 0; i < sec->ctx_q_num; i++) { - ret = sec_create_qp_ctx(qm, ctx, i, 0); + ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); if (ret) goto err_sec_release_qp_ctx; } - c_ctx = &ctx->c_ctx; - c_ctx->ivsize = crypto_skcipher_ivsize(tfm); - if (c_ctx->ivsize > SEC_IV_SIZE) { - dev_err(dev, "get error iv size!\n"); - ret = -EINVAL; - goto err_sec_release_qp_ctx; - } - c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE, - &c_ctx->c_key_dma, GFP_KERNEL); - if (!c_ctx->c_key) { - ret = -ENOMEM; - goto err_sec_release_qp_ctx; - } - return 0; - err_sec_release_qp_ctx: for (i = i - 1; i >= 0; i--) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); @@ -290,17 +390,9 @@ err_sec_release_qp_ctx: return ret; } -static void sec_skcipher_exit(struct crypto_skcipher *tfm) +static void sec_ctx_base_uninit(struct sec_ctx *ctx) { - struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); - struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; - int i = 0; - - if (c_ctx->c_key) { - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, - c_ctx->c_key, c_ctx->c_key_dma); - c_ctx->c_key = NULL; - } + int i; for (i = 0; i < ctx->sec->ctx_q_num; i++) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); @@ -308,6 +400,85 @@ static void sec_skcipher_exit(struct crypto_skcipher *tfm) kfree(ctx->qp_ctx); } +static int sec_cipher_init(struct sec_ctx *ctx) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + + c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + &c_ctx->c_key_dma, GFP_KERNEL); + if (!c_ctx->c_key) + return -ENOMEM; + + return 0; +} + +static void sec_cipher_uninit(struct sec_ctx *ctx) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + + memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); + dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key, c_ctx->c_key_dma); +} + +static int sec_auth_init(struct sec_ctx *ctx) +{ + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + + a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + &a_ctx->a_key_dma, GFP_KERNEL); + if (!a_ctx->a_key) + return -ENOMEM; + + return 0; +} + +static void sec_auth_uninit(struct sec_ctx *ctx) +{ + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + + memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); + dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + a_ctx->a_key, a_ctx->a_key_dma); +} + +static int sec_skcipher_init(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + ctx = crypto_skcipher_ctx(tfm); + ctx->alg_type = SEC_SKCIPHER; + crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); + ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); + if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { + dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); + return -EINVAL; + } + + ret = sec_ctx_base_init(ctx); + if (ret) + return ret; + + ret = sec_cipher_init(ctx); + if (ret) + goto err_cipher_init; + + return 0; +err_cipher_init: + sec_ctx_base_uninit(ctx); + + return ret; +} + +static void sec_skcipher_uninit(struct crypto_skcipher *tfm) +{ + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + + sec_cipher_uninit(ctx); + sec_ctx_base_uninit(ctx); +} + static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, const u32 keylen, const enum sec_cmode c_mode) @@ -420,62 +591,8 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) -static int sec_skcipher_get_res(struct sec_ctx *ctx, - struct sec_req *req) -{ - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct sec_cipher_res *c_res = qp_ctx->alg_meta_data; - struct sec_cipher_req *c_req = &req->c_req; - int req_id = req->req_id; - - c_req->c_ivin = c_res[req_id].c_ivin; - c_req->c_ivin_dma = c_res[req_id].c_ivin_dma; - - return 0; -} - -static int sec_skcipher_resource_alloc(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) -{ - struct device *dev = SEC_CTX_DEV(ctx); - struct sec_cipher_res *res; - int i; - - res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL); - if (!res) - return -ENOMEM; - - res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, - &res->c_ivin_dma, GFP_KERNEL); - if (!res->c_ivin) { - kfree(res); - return -ENOMEM; - } - - for (i = 1; i < QM_Q_DEPTH; i++) { - res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; - res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; - } - qp_ctx->alg_meta_data = res; - - return 0; -} - -static void sec_skcipher_resource_free(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) -{ - struct sec_cipher_res *res = qp_ctx->alg_meta_data; - struct device *dev = SEC_CTX_DEV(ctx); - - if (!res) - return; - - dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma); - kfree(res); -} - -static int sec_skcipher_map(struct device *dev, struct sec_req *req, - struct scatterlist *src, struct scatterlist *dst) +static int sec_cipher_map(struct device *dev, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; @@ -509,12 +626,20 @@ static int sec_skcipher_map(struct device *dev, struct sec_req *req, return 0; } +static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + if (dst != src) + hisi_acc_sg_buf_unmap(dev, src, req->c_in); + + hisi_acc_sg_buf_unmap(dev, dst, req->c_out); +} + static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { - struct sec_cipher_req *c_req = &req->c_req; + struct skcipher_request *sq = req->c_req.sk_req; - return sec_skcipher_map(SEC_CTX_DEV(ctx), req, - c_req->sk_req->src, c_req->sk_req->dst); + return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst); } static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) @@ -523,10 +648,127 @@ static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) struct sec_cipher_req *c_req = &req->c_req; struct skcipher_request *sk_req = c_req->sk_req; - if (sk_req->dst != sk_req->src) - hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in); + sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst); +} + +static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, + struct crypto_authenc_keys *keys) +{ + switch (keys->enckeylen) { + case AES_KEYSIZE_128: + c_ctx->c_key_len = SEC_CKEY_128BIT; + break; + case AES_KEYSIZE_192: + c_ctx->c_key_len = SEC_CKEY_192BIT; + break; + case AES_KEYSIZE_256: + c_ctx->c_key_len = SEC_CKEY_256BIT; + break; + default: + pr_err("hisi_sec2: aead aes key error!\n"); + return -EINVAL; + } + memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); + + return 0; +} + +static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, + struct crypto_authenc_keys *keys) +{ + struct crypto_shash *hash_tfm = ctx->hash_tfm; + SHASH_DESC_ON_STACK(shash, hash_tfm); + int blocksize, ret; - hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out); + if (!keys->authkeylen) { + pr_err("hisi_sec2: aead auth key error!\n"); + return -EINVAL; + } + + blocksize = crypto_shash_blocksize(hash_tfm); + if (keys->authkeylen > blocksize) { + ret = crypto_shash_digest(shash, keys->authkey, + keys->authkeylen, ctx->a_key); + if (ret) { + pr_err("hisi_sec2: aead auth digest error!\n"); + return -EINVAL; + } + ctx->a_key_len = blocksize; + } else { + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + ctx->a_key_len = keys->authkeylen; + } + + return 0; +} + +static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, + const u32 keylen, const enum sec_hash_alg a_alg, + const enum sec_calg c_alg, + const enum sec_mac_len mac_len, + const enum sec_cmode c_mode) +{ + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct crypto_authenc_keys keys; + int ret; + + ctx->a_ctx.a_alg = a_alg; + ctx->c_ctx.c_alg = c_alg; + ctx->a_ctx.mac_len = mac_len; + c_ctx->c_mode = c_mode; + + if (crypto_authenc_extractkeys(&keys, key, keylen)) + goto bad_key; + + ret = sec_aead_aes_set_key(c_ctx, &keys); + if (ret) { + dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); + goto bad_key; + } + + ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); + if (ret) { + dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); + goto bad_key; + } + + return 0; +bad_key: + memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); + + return -EINVAL; +} + + +#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ +static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ + u32 keylen) \ +{ \ + return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ +} + +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, + SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, + SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, + SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) + +static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) +{ + struct aead_request *aq = req->aead_req.aead_req; + + return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst); +} + +static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) +{ + struct device *dev = SEC_CTX_DEV(ctx); + struct sec_cipher_req *cq = &req->c_req; + struct aead_request *aq = req->aead_req.aead_req; + + sec_cipher_unmap(dev, cq, aq->src, aq->dst); } static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) @@ -534,13 +776,13 @@ static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) int ret; ret = ctx->req_op->buf_map(ctx, req); - if (ret) + if (unlikely(ret)) return ret; ctx->req_op->do_transfer(ctx, req); ret = ctx->req_op->bd_fill(ctx, req); - if (ret) + if (unlikely(ret)) goto unmap_req_buf; return ret; @@ -559,10 +801,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) { struct skcipher_request *sk_req = req->c_req.sk_req; - struct sec_cipher_req *c_req = &req->c_req; + u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin; - c_req->c_len = sk_req->cryptlen; - memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); + memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize); } static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) @@ -570,14 +811,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; struct sec_cipher_req *c_req = &req->c_req; struct sec_sqe *sec_sqe = &req->sec_sqe; - u8 de = 0; u8 scene, sa_type, da_type; u8 bd_type, cipher; + u8 de = 0; memset(sec_sqe, 0, sizeof(struct sec_sqe)); sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); - sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); + sec_sqe->type2.c_ivin_addr = + cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma); sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); @@ -611,25 +853,37 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) return 0; } -static void sec_update_iv(struct sec_req *req) +static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) { + struct aead_request *aead_req = req->aead_req.aead_req; struct skcipher_request *sk_req = req->c_req.sk_req; u32 iv_size = req->ctx->c_ctx.ivsize; struct scatterlist *sgl; + unsigned int cryptlen; size_t sz; + u8 *iv; if (req->c_req.encrypt) - sgl = sk_req->dst; + sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; else - sgl = sk_req->src; + sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; - sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv, - iv_size, sk_req->cryptlen - iv_size); - if (sz != iv_size) + if (alg_type == SEC_SKCIPHER) { + iv = sk_req->iv; + cryptlen = sk_req->cryptlen; + } else { + iv = aead_req->iv; + cryptlen = aead_req->cryptlen; + } + + sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, + cryptlen - iv_size); + if (unlikely(sz != iv_size)) dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); } -static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req) +static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, + int err) { struct skcipher_request *sk_req = req->c_req.sk_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; @@ -638,13 +892,109 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req) sec_free_req_id(req); /* IV output at encrypto of CBC mode */ - if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) - sec_update_iv(req); + if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) + sec_update_iv(req, SEC_SKCIPHER); - if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1) + if (req->fake_busy) sk_req->base.complete(&sk_req->base, -EINPROGRESS); - sk_req->base.complete(&sk_req->base, req->err_type); + sk_req->base.complete(&sk_req->base, err); +} + +static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) +{ + struct aead_request *aead_req = req->aead_req.aead_req; + u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin; + + memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize); +} + +static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, + struct sec_req *req, struct sec_sqe *sec_sqe) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; + struct aead_request *aq = a_req->aead_req; + + sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); + + sec_sqe->type2.mac_key_alg = + cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); + + sec_sqe->type2.mac_key_alg |= + cpu_to_le32((u32)((ctx->a_key_len) / + SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + + sec_sqe->type2.mac_key_alg |= + cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); + + sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; + + if (dir) + sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; + else + sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; + + sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); + + sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); + + sec_sqe->type2.mac_addr = + cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma); +} + +static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_sqe *sec_sqe = &req->sec_sqe; + int ret; + + ret = sec_skcipher_bd_fill(ctx, req); + if (unlikely(ret)) { + dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); + return ret; + } + + sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); + + return 0; +} + +static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) +{ + struct aead_request *a_req = req->aead_req.aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); + struct sec_cipher_req *c_req = &req->c_req; + size_t authsize = crypto_aead_authsize(tfm); + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + size_t sz; + + atomic_dec(&qp_ctx->pending_reqs); + + if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) + sec_update_iv(req, SEC_AEAD); + + /* Copy output mac */ + if (!err && c_req->encrypt) { + struct scatterlist *sgl = a_req->dst; + + sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), + qp_ctx->res[req->req_id].out_mac, + authsize, a_req->cryptlen + + a_req->assoclen); + + if (unlikely(sz != authsize)) { + dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); + err = -EINVAL; + } + } + + sec_free_req_id(req); + + if (req->fake_busy) + a_req->base.complete(&a_req->base, -EINPROGRESS); + + a_req->base.complete(&a_req->base, err); } static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) @@ -653,37 +1003,30 @@ static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) atomic_dec(&qp_ctx->pending_reqs); sec_free_req_id(req); - sec_put_queue_id(ctx, req); + sec_free_queue_id(ctx, req); } static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) { struct sec_qp_ctx *qp_ctx; - int issue_id, ret; + int queue_id; /* To load balance */ - issue_id = sec_get_queue_id(ctx, req); - qp_ctx = &ctx->qp_ctx[issue_id]; + queue_id = sec_alloc_queue_id(ctx, req); + qp_ctx = &ctx->qp_ctx[queue_id]; req->req_id = sec_alloc_req_id(req, qp_ctx); - if (req->req_id < 0) { - sec_put_queue_id(ctx, req); + if (unlikely(req->req_id < 0)) { + sec_free_queue_id(ctx, req); return req->req_id; } if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) - atomic_set(&req->fake_busy, 1); + req->fake_busy = true; else - atomic_set(&req->fake_busy, 0); - - ret = ctx->req_op->get_res(ctx, req); - if (ret) { - atomic_dec(&qp_ctx->pending_reqs); - sec_request_uninit(ctx, req); - dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n"); - } + req->fake_busy = false; - return ret; + return 0; } static int sec_process(struct sec_ctx *ctx, struct sec_req *req) @@ -691,20 +1034,20 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) int ret; ret = sec_request_init(ctx, req); - if (ret) + if (unlikely(ret)) return ret; ret = sec_request_transfer(ctx, req); - if (ret) + if (unlikely(ret)) goto err_uninit_req; /* Output IV as decrypto */ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) - sec_update_iv(req); + sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); - if (ret != -EBUSY && ret != -EINPROGRESS) { - dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n"); + if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) { + dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); goto err_send_req; } @@ -712,9 +1055,16 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) err_send_req: /* As failing, restore the IV from user */ - if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) - memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin, - ctx->c_ctx.ivsize); + if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { + if (ctx->alg_type == SEC_SKCIPHER) + memcpy(req->c_req.sk_req->iv, + req->qp_ctx->res[req->req_id].c_ivin, + ctx->c_ctx.ivsize); + else + memcpy(req->aead_req.aead_req->iv, + req->qp_ctx->res[req->req_id].c_ivin, + ctx->c_ctx.ivsize); + } sec_request_untransfer(ctx, req); err_uninit_req: @@ -723,10 +1073,7 @@ err_uninit_req: return ret; } -static struct sec_req_op sec_req_ops_tbl = { - .get_res = sec_skcipher_get_res, - .resource_alloc = sec_skcipher_resource_alloc, - .resource_free = sec_skcipher_resource_free, +static const struct sec_req_op sec_skcipher_req_ops = { .buf_map = sec_skcipher_sgl_map, .buf_unmap = sec_skcipher_sgl_unmap, .do_transfer = sec_skcipher_copy_iv, @@ -736,39 +1083,139 @@ static struct sec_req_op sec_req_ops_tbl = { .process = sec_process, }; +static const struct sec_req_op sec_aead_req_ops = { + .buf_map = sec_aead_sgl_map, + .buf_unmap = sec_aead_sgl_unmap, + .do_transfer = sec_aead_copy_iv, + .bd_fill = sec_aead_bd_fill, + .bd_send = sec_bd_send, + .callback = sec_aead_callback, + .process = sec_process, +}; + static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->req_op = &sec_req_ops_tbl; + ctx->req_op = &sec_skcipher_req_ops; return sec_skcipher_init(tfm); } static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) { - sec_skcipher_exit(tfm); + sec_skcipher_uninit(tfm); } -static int sec_skcipher_param_check(struct sec_ctx *ctx, - struct skcipher_request *sk_req) +static int sec_aead_init(struct crypto_aead *tfm) { - u8 c_alg = ctx->c_ctx.c_alg; + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + int ret; + + crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); + ctx->alg_type = SEC_AEAD; + ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); + if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { + dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); + return -EINVAL; + } + + ctx->req_op = &sec_aead_req_ops; + ret = sec_ctx_base_init(ctx); + if (ret) + return ret; + + ret = sec_auth_init(ctx); + if (ret) + goto err_auth_init; + + ret = sec_cipher_init(ctx); + if (ret) + goto err_cipher_init; + + return ret; + +err_cipher_init: + sec_auth_uninit(ctx); +err_auth_init: + sec_ctx_base_uninit(ctx); + + return ret; +} + +static void sec_aead_exit(struct crypto_aead *tfm) +{ + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + + sec_cipher_uninit(ctx); + sec_auth_uninit(ctx); + sec_ctx_base_uninit(ctx); +} + +static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) +{ + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + int ret; + + ret = sec_aead_init(tfm); + if (ret) { + pr_err("hisi_sec2: aead init error!\n"); + return ret; + } + + auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(auth_ctx->hash_tfm)) { + dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); + sec_aead_exit(tfm); + return PTR_ERR(auth_ctx->hash_tfm); + } + + return 0; +} + +static void sec_aead_ctx_exit(struct crypto_aead *tfm) +{ + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_shash(ctx->a_ctx.hash_tfm); + sec_aead_exit(tfm); +} + +static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) +{ + return sec_aead_ctx_init(tfm, "sha1"); +} + +static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) +{ + return sec_aead_ctx_init(tfm, "sha256"); +} + +static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) +{ + return sec_aead_ctx_init(tfm, "sha512"); +} + +static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +{ + struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = SEC_CTX_DEV(ctx); + u8 c_alg = ctx->c_ctx.c_alg; - if (!sk_req->src || !sk_req->dst) { + if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } - + sreq->c_req.c_len = sk_req->cryptlen; if (c_alg == SEC_CALG_3DES) { - if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) { + if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { dev_err(dev, "skcipher 3des input length error!\n"); return -EINVAL; } return 0; } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { - if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) { + if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { dev_err(dev, "skcipher aes input length error!\n"); return -EINVAL; } @@ -789,14 +1236,14 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) if (!sk_req->cryptlen) return 0; - ret = sec_skcipher_param_check(ctx, sk_req); - if (ret) - return ret; - req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + ret = sec_skcipher_param_check(ctx, req); + if (unlikely(ret)) + return -EINVAL; + return ctx->req_op->process(ctx, req); } @@ -837,7 +1284,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) -static struct skcipher_alg sec_algs[] = { +static struct skcipher_alg sec_skciphers[] = { SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0) @@ -867,23 +1314,133 @@ static struct skcipher_alg sec_algs[] = { AES_BLOCK_SIZE, AES_BLOCK_SIZE) }; +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +{ + u8 c_alg = ctx->c_ctx.c_alg; + struct aead_request *req = sreq->aead_req.aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + size_t authsize = crypto_aead_authsize(tfm); + + if (unlikely(!req->src || !req->dst || !req->cryptlen)) { + dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); + return -EINVAL; + } + + /* Support AES only */ + if (unlikely(c_alg != SEC_CALG_AES)) { + dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); + return -EINVAL; + + } + if (sreq->c_req.encrypt) + sreq->c_req.c_len = req->cryptlen; + else + sreq->c_req.c_len = req->cryptlen - authsize; + + if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); + return -EINVAL; + } + + return 0; +} + +static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); + struct sec_req *req = aead_request_ctx(a_req); + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + int ret; + + req->aead_req.aead_req = a_req; + req->c_req.encrypt = encrypt; + req->ctx = ctx; + + ret = sec_aead_param_check(ctx, req); + if (unlikely(ret)) + return -EINVAL; + + return ctx->req_op->process(ctx, req); +} + +static int sec_aead_encrypt(struct aead_request *a_req) +{ + return sec_aead_crypto(a_req, true); +} + +static int sec_aead_decrypt(struct aead_request *a_req) +{ + return sec_aead_crypto(a_req, false); +} + +#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\ + ctx_exit, blk_size, iv_size, max_authsize)\ +{\ + .base = {\ + .cra_name = sec_cra_name,\ + .cra_driver_name = "hisi_sec_"sec_cra_name,\ + .cra_priority = SEC_PRIORITY,\ + .cra_flags = CRYPTO_ALG_ASYNC,\ + .cra_blocksize = blk_size,\ + .cra_ctxsize = sizeof(struct sec_ctx),\ + .cra_module = THIS_MODULE,\ + },\ + .init = ctx_init,\ + .exit = ctx_exit,\ + .setkey = sec_set_key,\ + .decrypt = sec_aead_decrypt,\ + .encrypt = sec_aead_encrypt,\ + .ivsize = iv_size,\ + .maxauthsize = max_authsize,\ +} + +#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\ + SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\ + sec_aead_ctx_exit, blksize, ivsize, authsize) + +static struct aead_alg sec_aeads[] = { + SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", + sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, + AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), + + SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", + sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, + AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), + + SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", + sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, + AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), +}; + int sec_register_to_crypto(void) { int ret = 0; /* To avoid repeat register */ - mutex_lock(&sec_algs_lock); - if (++sec_active_devs == 1) - ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); - mutex_unlock(&sec_algs_lock); + if (atomic_add_return(1, &sec_active_devs) == 1) { + ret = crypto_register_skciphers(sec_skciphers, + ARRAY_SIZE(sec_skciphers)); + if (ret) + return ret; + + ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); + if (ret) + goto reg_aead_fail; + } + + return ret; + +reg_aead_fail: + crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); return ret; } void sec_unregister_from_crypto(void) { - mutex_lock(&sec_algs_lock); - if (--sec_active_devs == 0) - crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); - mutex_unlock(&sec_algs_lock); + if (atomic_sub_return(1, &sec_active_devs) == 0) { + crypto_unregister_skciphers(sec_skciphers, + ARRAY_SIZE(sec_skciphers)); + crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); + } } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 097dce828340..b2786e17d8fe 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -14,6 +14,18 @@ enum sec_calg { SEC_CALG_SM4 = 0x3, }; +enum sec_hash_alg { + SEC_A_HMAC_SHA1 = 0x10, + SEC_A_HMAC_SHA256 = 0x11, + SEC_A_HMAC_SHA512 = 0x15, +}; + +enum sec_mac_len { + SEC_HMAC_SHA1_MAC = 20, + SEC_HMAC_SHA256_MAC = 32, + SEC_HMAC_SHA512_MAC = 64, +}; + enum sec_cmode { SEC_CMODE_ECB = 0x0, SEC_CMODE_CBC = 0x1, @@ -34,6 +46,12 @@ enum sec_bd_type { SEC_BD_TYPE2 = 0x2, }; +enum sec_auth { + SEC_NO_AUTH = 0x0, + SEC_AUTH_TYPE1 = 0x1, + SEC_AUTH_TYPE2 = 0x2, +}; + enum sec_cipher_dir { SEC_CIPHER_ENC = 0x1, SEC_CIPHER_DEC = 0x2, @@ -48,8 +66,8 @@ enum sec_addr_type { struct sec_sqe_type2 { /* - * mac_len: 0~5 bits - * a_key_len: 6~10 bits + * mac_len: 0~4 bits + * a_key_len: 5~10 bits * a_alg: 11~16 bits */ __le32 mac_key_alg; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index ab742dfbab99..2bbaf1e2dae7 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -32,6 +32,7 @@ #define SEC_PF_DEF_Q_NUM 64 #define SEC_PF_DEF_Q_BASE 0 #define SEC_CTX_Q_NUM_DEF 24 +#define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) @@ -221,7 +222,7 @@ static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) if (ret) return -EINVAL; - if (!ctx_q_num || ctx_q_num > QM_Q_DEPTH || ctx_q_num & 0x1) { + if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); return -EINVAL; } @@ -235,7 +236,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = { }; static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); -MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)"); +MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); static const struct pci_device_id sec_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, @@ -608,13 +609,13 @@ static const struct file_operations sec_dbg_fops = { .write = sec_debug_write, }; -static int debugfs_atomic64_t_get(void *data, u64 *val) +static int sec_debugfs_atomic64_get(void *data, u64 *val) { - *val = atomic64_read((atomic64_t *)data); - return 0; + *val = atomic64_read((atomic64_t *)data); + return 0; } -DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL, - "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, + NULL, "%lld\n"); static int sec_core_debug_init(struct sec_dev *sec) { @@ -636,11 +637,11 @@ static int sec_core_debug_init(struct sec_dev *sec) debugfs_create_regset32("regs", 0444, tmp_d, regset); - debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt, - &fops_atomic64_t_ro); + debugfs_create_file("send_cnt", 0444, tmp_d, + &dfx->send_cnt, &sec_atomic64_ops); - debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt, - &fops_atomic64_t_ro); + debugfs_create_file("recv_cnt", 0444, tmp_d, + &dfx->recv_cnt, &sec_atomic64_ops); return 0; } diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 012023c347b1..0e8c7e324fb4 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -202,18 +202,21 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, dma_addr_t curr_sgl_dma = 0; struct acc_hw_sge *curr_hw_sge; struct scatterlist *sg; - int i, ret, sg_n; + int i, sg_n, sg_n_mapped; if (!dev || !sgl || !pool || !hw_sgl_dma) return ERR_PTR(-EINVAL); sg_n = sg_nents(sgl); - if (sg_n > pool->sge_nr) + + sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + if (!sg_n_mapped) return ERR_PTR(-EINVAL); - ret = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); - if (!ret) + if (sg_n_mapped > pool->sge_nr) { + dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); return ERR_PTR(-EINVAL); + } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); if (IS_ERR(curr_hw_sgl)) { @@ -224,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; - for_each_sg(sgl, sg, sg_n, i) { + for_each_sg(sgl, sg, sg_n_mapped, i) { sg_map_to_hw_sg(sg, curr_hw_sge); inc_hw_sgl_sge(curr_hw_sgl); curr_hw_sge++; @@ -260,7 +263,3 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, hw_sgl->entry_length_in_sgl = 0; } EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); -MODULE_DESCRIPTION("HiSilicon Accelerator SGL support"); diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 79fc4dd3fe00..bc1db26598bb 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -11,6 +11,10 @@ /* hisi_zip_sqe dw3 */ #define HZIP_BD_STATUS_M GENMASK(7, 0) +/* hisi_zip_sqe dw7 */ +#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0) +/* hisi_zip_sqe dw8 */ +#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0) /* hisi_zip_sqe dw9 */ #define HZIP_REQ_TYPE_M GENMASK(7, 0) #define HZIP_ALG_TYPE_ZLIB 0x02 diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 795428c1d07e..9815d5e3ccd0 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -46,10 +46,8 @@ enum hisi_zip_alg_type { struct hisi_zip_req { struct acomp_req *req; - struct scatterlist *src; - struct scatterlist *dst; - size_t slen; - size_t dlen; + int sskip; + int dskip; struct hisi_acc_hw_sgl *hw_src; struct hisi_acc_hw_sgl *hw_dst; dma_addr_t dma_src; @@ -119,13 +117,15 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, dma_addr_t s_addr, dma_addr_t d_addr, u32 slen, - u32 dlen) + u32 dlen, int sskip, int dskip) { memset(sqe, 0, sizeof(struct hisi_zip_sqe)); - sqe->input_data_length = slen; + sqe->input_data_length = slen - sskip; + sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip); + sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip); sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); - sqe->dest_avail_out = dlen; + sqe->dest_avail_out = dlen - dskip; sqe->source_addr_l = lower_32_bits(s_addr); sqe->source_addr_h = upper_32_bits(s_addr); sqe->dest_addr_l = lower_32_bits(d_addr); @@ -327,11 +327,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, { struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - if (qp_ctx->qp->alg_type == HZIP_ALG_TYPE_COMP) - kfree(req->dst); - else - kfree(req->src); - write_lock(&req_q->req_lock); clear_bit(req->req_id, req_q->req_bitmap); memset(req, 0, sizeof(struct hisi_zip_req)); @@ -359,8 +354,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) } dlen = sqe->produced; - hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); - hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); + hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; acomp_req->dlen = dlen + head_size; @@ -454,20 +449,6 @@ static size_t get_comp_head_size(struct scatterlist *src, u8 req_type) } } -static int get_sg_skip_bytes(struct scatterlist *sgl, size_t bytes, - size_t remains, struct scatterlist **out) -{ -#define SPLIT_NUM 2 - size_t split_sizes[SPLIT_NUM]; - int out_mapped_nents[SPLIT_NUM]; - - split_sizes[0] = bytes; - split_sizes[1] = remains; - - return sg_split(sgl, 0, 0, SPLIT_NUM, split_sizes, out, - out_mapped_nents, GFP_KERNEL); -} - static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, struct hisi_zip_qp_ctx *qp_ctx, size_t head_size, bool is_comp) @@ -475,31 +456,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct hisi_zip_req *q = req_q->q; struct hisi_zip_req *req_cache; - struct scatterlist *out[2]; - struct scatterlist *sgl; - size_t len; - int ret, req_id; - - /* - * remove/add zlib/gzip head, as hardware operations do not include - * comp head. so split req->src to get sgl without heads in acomp, or - * add comp head to req->dst ahead of that hardware output compressed - * data in sgl splited from req->dst without comp head. - */ - if (is_comp) { - sgl = req->dst; - len = req->dlen - head_size; - } else { - sgl = req->src; - len = req->slen - head_size; - } - - ret = get_sg_skip_bytes(sgl, head_size, len, out); - if (ret) - return ERR_PTR(ret); - - /* sgl for comp head is useless, so free it now */ - kfree(out[0]); + int req_id; write_lock(&req_q->req_lock); @@ -507,7 +464,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, if (req_id >= req_q->size) { write_unlock(&req_q->req_lock); dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); - kfree(out[1]); return ERR_PTR(-EBUSY); } set_bit(req_id, req_q->req_bitmap); @@ -515,16 +471,13 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, req_cache = q + req_id; req_cache->req_id = req_id; req_cache->req = req; + if (is_comp) { - req_cache->src = req->src; - req_cache->dst = out[1]; - req_cache->slen = req->slen; - req_cache->dlen = req->dlen - head_size; + req_cache->sskip = 0; + req_cache->dskip = head_size; } else { - req_cache->src = out[1]; - req_cache->dst = req->dst; - req_cache->slen = req->slen - head_size; - req_cache->dlen = req->dlen; + req_cache->sskip = head_size; + req_cache->dskip = 0; } write_unlock(&req_q->req_lock); @@ -536,6 +489,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_qp_ctx *qp_ctx) { struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; + struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; @@ -543,16 +497,16 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, dma_addr_t output; int ret; - if (!req->src || !req->slen || !req->dst || !req->dlen) + if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) return -EINVAL; - req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->src, pool, + req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, req->req_id << 1, &input); if (IS_ERR(req->hw_src)) return PTR_ERR(req->hw_src); req->dma_src = input; - req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, req->dst, pool, + req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, (req->req_id << 1) + 1, &output); if (IS_ERR(req->hw_dst)) { @@ -561,8 +515,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, } req->dma_dst = output; - hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, req->slen, - req->dlen); + hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen, + a_req->dlen, req->sskip, req->dskip); hisi_zip_config_buf_type(zip_sqe, HZIP_SGL); hisi_zip_config_tag(zip_sqe, req->req_id); @@ -574,9 +528,9 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, return -EINPROGRESS; err_unmap_output: - hisi_acc_sg_buf_unmap(dev, req->dst, req->hw_dst); + hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); err_unmap_input: - hisi_acc_sg_buf_unmap(dev, req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); return ret; } diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index fe4cc8babe1c..25d5227f74a1 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -332,10 +332,10 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) struct dma_slave_config dma_conf; int err = -EINVAL; - hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx"); - if (!hdev->dma_lch) { + hdev->dma_lch = dma_request_chan(hdev->dev, "tx"); + if (IS_ERR(hdev->dma_lch)) { dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); - return -EBUSY; + return PTR_ERR(hdev->dma_lch); } dma_conf.direction = DMA_MEM_TO_DEV; dma_conf.dst_addr = hdev->bus_addr; diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 64894d8b442a..2cb53fbae841 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -501,8 +501,8 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) writel(upper_32_bits(priv->ring[i].cdr.base_dma), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); - writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 14) | - priv->config.cd_size, + writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP | + (priv->config.cd_offset << 14) | priv->config.cd_size, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); writel(((cd_fetch_cnt * (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) | @@ -974,16 +974,18 @@ int safexcel_invalidate_cache(struct crypto_async_request *async, { struct safexcel_command_desc *cdesc; struct safexcel_result_desc *rdesc; + struct safexcel_token *dmmy; int ret = 0; /* Prepare command descriptor */ - cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma); + cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma, + &dmmy); if (IS_ERR(cdesc)) return PTR_ERR(cdesc); cdesc->control_data.type = EIP197_TYPE_EXTENDED; cdesc->control_data.options = 0; - cdesc->control_data.refresh = 0; + cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK; cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR; /* Prepare result descriptor */ @@ -1331,6 +1333,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) priv->config.cd_size = EIP197_CD64_FETCH_SIZE; priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; + priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask; /* res token is behind the descr, but ofs must be rounded to buswdth */ priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask; @@ -1341,6 +1344,7 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) /* convert dwords to bytes */ priv->config.cd_offset *= sizeof(u32); + priv->config.cdsh_offset *= sizeof(u32); priv->config.rd_offset *= sizeof(u32); priv->config.res_offset *= sizeof(u32); } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index b4624b5687ce..94016c505abb 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -40,7 +40,8 @@ /* Static configuration */ #define EIP197_DEFAULT_RING_SIZE 400 -#define EIP197_MAX_TOKENS 19 +#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */ +#define EIP197_MAX_TOKENS 16 #define EIP197_MAX_RINGS 4 #define EIP197_FETCH_DEPTH 2 #define EIP197_MAX_BATCH_SZ 64 @@ -207,6 +208,7 @@ /* EIP197_HIA_xDR_DESC_SIZE */ #define EIP197_xDR_DESC_MODE_64BIT BIT(31) +#define EIP197_CDR_DESC_MODE_ADCP BIT(30) /* EIP197_HIA_xDR_DMA_CFG */ #define EIP197_HIA_xDR_WR_RES_BUF BIT(22) @@ -277,9 +279,9 @@ #define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16) #define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20) #define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24) -#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29)) +#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29) #define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29) -#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31) +#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30) /* EIP197_HIA_DFE/DSE_THR_CTRL */ #define EIP197_DxE_THR_CTRL_EN BIT(30) @@ -553,6 +555,8 @@ static inline void eip197_noop_token(struct safexcel_token *token) { token->opcode = EIP197_TOKEN_OPCODE_NOOP; token->packet_length = BIT(2); + token->stat = 0; + token->instructions = 0; } /* Instructions */ @@ -574,14 +578,13 @@ struct safexcel_control_data_desc { u16 application_id; u16 rsvd; - u8 refresh:2; - u32 context_lo:30; + u32 context_lo; u32 context_hi; u32 control0; u32 control1; - u32 token[EIP197_MAX_TOKENS]; + u32 token[EIP197_EMB_TOKENS]; } __packed; #define EIP197_OPTION_MAGIC_VALUE BIT(0) @@ -591,7 +594,10 @@ struct safexcel_control_data_desc { #define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) +#define EIP197_TYPE_BCLA 0x0 #define EIP197_TYPE_EXTENDED 0x3 +#define EIP197_CONTEXT_SMALL 0x2 +#define EIP197_CONTEXT_SIZE_MASK 0x3 /* Basic Command Descriptor format */ struct safexcel_command_desc { @@ -599,13 +605,16 @@ struct safexcel_command_desc { u8 rsvd0:5; u8 last_seg:1; u8 first_seg:1; - u16 additional_cdata_size:8; + u8 additional_cdata_size:8; u32 rsvd1; u32 data_lo; u32 data_hi; + u32 atok_lo; + u32 atok_hi; + struct safexcel_control_data_desc control_data; } __packed; @@ -629,15 +638,20 @@ enum eip197_fw { struct safexcel_desc_ring { void *base; + void *shbase; void *base_end; + void *shbase_end; dma_addr_t base_dma; + dma_addr_t shbase_dma; /* write and read pointers */ void *write; + void *shwrite; void *read; /* descriptor element offset */ - unsigned offset; + unsigned int offset; + unsigned int shoffset; }; enum safexcel_alg_type { @@ -652,6 +666,7 @@ struct safexcel_config { u32 cd_size; u32 cd_offset; + u32 cdsh_offset; u32 rd_size; u32 rd_offset; @@ -862,7 +877,8 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr bool first, bool last, dma_addr_t data, u32 len, u32 full_data_len, - dma_addr_t context); + dma_addr_t context, + struct safexcel_token **atoken); struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv, int ring_id, bool first, bool last, diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index c02995694b41..0c5e80c3f6e3 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -47,8 +47,12 @@ struct safexcel_cipher_ctx { u32 mode; enum safexcel_cipher_alg alg; - char aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */ - char xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */ + u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */ + u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */ + u8 aadskip; + u8 blocksz; + u32 ivmask; + u32 ctrinit; __le32 key[16]; u32 nonce; @@ -72,251 +76,298 @@ struct safexcel_cipher_req { int nr_src, nr_dst; }; -static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, - struct safexcel_command_desc *cdesc) +static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc) { - u32 block_sz = 0; - - if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD || - ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */ + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) { cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - /* 32 bit nonce */ cdesc->control_data.token[0] = ctx->nonce; /* 64 bit IV part */ memcpy(&cdesc->control_data.token[1], iv, 8); - - if (ctx->alg == SAFEXCEL_CHACHA20 || - ctx->xcm == EIP197_XCM_MODE_CCM) { - /* 32 bit counter, starting at 0 */ - cdesc->control_data.token[3] = 0; - } else { - /* 32 bit counter, start at 1 (big endian!) */ - cdesc->control_data.token[3] = - (__force u32)cpu_to_be32(1); - } - - return; - } else if (ctx->xcm == EIP197_XCM_MODE_GCM || - (ctx->aead && ctx->alg == SAFEXCEL_CHACHA20)) { - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - - /* 96 bit IV part */ - memcpy(&cdesc->control_data.token[0], iv, 12); - - if (ctx->alg == SAFEXCEL_CHACHA20) { - /* 32 bit counter, starting at 0 */ - cdesc->control_data.token[3] = 0; - } else { - /* 32 bit counter, start at 1 (big endian!) */ - *(__be32 *)&cdesc->control_data.token[3] = - cpu_to_be32(1); - } - - return; - } else if (ctx->alg == SAFEXCEL_CHACHA20) { + /* 32 bit counter, start at 0 or 1 (big endian!) */ + cdesc->control_data.token[3] = + (__force u32)cpu_to_be32(ctx->ctrinit); + return 4; + } + if (ctx->alg == SAFEXCEL_CHACHA20) { cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - /* 96 bit nonce part */ memcpy(&cdesc->control_data.token[0], &iv[4], 12); /* 32 bit counter */ cdesc->control_data.token[3] = *(u32 *)iv; - - return; - } else if (ctx->xcm == EIP197_XCM_MODE_CCM) { - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - - /* Variable length IV part */ - memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]); - /* Start variable length counter at 0 */ - memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0], - 0, iv[0] + 1); - - return; + return 4; } - if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) { - switch (ctx->alg) { - case SAFEXCEL_DES: - block_sz = DES_BLOCK_SIZE; - cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; - break; - case SAFEXCEL_3DES: - block_sz = DES3_EDE_BLOCK_SIZE; - cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; - break; - case SAFEXCEL_SM4: - block_sz = SM4_BLOCK_SIZE; - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - break; - case SAFEXCEL_AES: - block_sz = AES_BLOCK_SIZE; - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - break; - default: - break; - } - memcpy(cdesc->control_data.token, iv, block_sz); - } + cdesc->control_data.options |= ctx->ivmask; + memcpy(cdesc->control_data.token, iv, ctx->blocksz); + return ctx->blocksz / sizeof(u32); } static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, struct safexcel_command_desc *cdesc, + struct safexcel_token *atoken, u32 length) { struct safexcel_token *token; + int ivlen; - safexcel_cipher_token(ctx, iv, cdesc); + ivlen = safexcel_skcipher_iv(ctx, iv, cdesc); + if (ivlen == 4) { + /* No space in cdesc, instruction moves to atoken */ + cdesc->additional_cdata_size = 1; + token = atoken; + } else { + /* Everything fits in cdesc */ + token = (struct safexcel_token *)(cdesc->control_data.token + 2); + /* Need to pad with NOP */ + eip197_noop_token(&token[1]); + } - /* skip over worst case IV of 4 dwords, no need to be exact */ - token = (struct safexcel_token *)(cdesc->control_data.token + 4); + token->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + token->packet_length = length; + token->stat = EIP197_TOKEN_STAT_LAST_PACKET | + EIP197_TOKEN_STAT_LAST_HASH; + token->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYPTO | + EIP197_TOKEN_INS_TYPE_OUTPUT; +} - token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[0].packet_length = length; - token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET | - EIP197_TOKEN_STAT_LAST_HASH; - token[0].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYPTO | - EIP197_TOKEN_INS_TYPE_OUTPUT; +static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv, + struct safexcel_command_desc *cdesc) +{ + if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD || + ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */ + /* 32 bit nonce */ + cdesc->control_data.token[0] = ctx->nonce; + /* 64 bit IV part */ + memcpy(&cdesc->control_data.token[1], iv, 8); + /* 32 bit counter, start at 0 or 1 (big endian!) */ + cdesc->control_data.token[3] = + (__force u32)cpu_to_be32(ctx->ctrinit); + return; + } + if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) { + /* 96 bit IV part */ + memcpy(&cdesc->control_data.token[0], iv, 12); + /* 32 bit counter, start at 0 or 1 (big endian!) */ + cdesc->control_data.token[3] = + (__force u32)cpu_to_be32(ctx->ctrinit); + return; + } + /* CBC */ + memcpy(cdesc->control_data.token, iv, ctx->blocksz); } static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, struct safexcel_command_desc *cdesc, + struct safexcel_token *atoken, enum safexcel_cipher_direction direction, u32 cryptlen, u32 assoclen, u32 digestsize) { - struct safexcel_token *token; + struct safexcel_token *aadref; + int atoksize = 2; /* Start with minimum size */ + int assocadj = assoclen - ctx->aadskip, aadalign; - safexcel_cipher_token(ctx, iv, cdesc); + /* Always 4 dwords of embedded IV for AEAD modes */ + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; - if (direction == SAFEXCEL_ENCRYPT) { - /* align end of instruction sequence to end of token */ - token = (struct safexcel_token *)(cdesc->control_data.token + - EIP197_MAX_TOKENS - 14); - - token[13].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[13].packet_length = digestsize; - token[13].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | - EIP197_TOKEN_INS_INSERT_HASH_DIGEST; - } else { + if (direction == SAFEXCEL_DECRYPT) cryptlen -= digestsize; - /* align end of instruction sequence to end of token */ - token = (struct safexcel_token *)(cdesc->control_data.token + - EIP197_MAX_TOKENS - 15); - - token[13].opcode = EIP197_TOKEN_OPCODE_RETRIEVE; - token[13].packet_length = digestsize; - token[13].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[13].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; - - token[14].opcode = EIP197_TOKEN_OPCODE_VERIFY; - token[14].packet_length = digestsize | - EIP197_TOKEN_HASH_RESULT_VERIFY; - token[14].stat = EIP197_TOKEN_STAT_LAST_HASH | - EIP197_TOKEN_STAT_LAST_PACKET; - token[14].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; - } - - if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) { - /* For ESP mode (and not GMAC), skip over the IV */ - token[8].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[8].packet_length = EIP197_AEAD_IPSEC_IV_SIZE; - - assoclen -= EIP197_AEAD_IPSEC_IV_SIZE; - } + if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) { + /* Construct IV block B0 for the CBC-MAC */ + u8 *final_iv = (u8 *)cdesc->control_data.token; + u8 *cbcmaciv = (u8 *)&atoken[1]; + __le32 *aadlen = (__le32 *)&atoken[5]; + + if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) { + /* Length + nonce */ + cdesc->control_data.token[0] = ctx->nonce; + /* Fixup flags byte */ + *(__le32 *)cbcmaciv = + cpu_to_le32(ctx->nonce | + ((assocadj > 0) << 6) | + ((digestsize - 2) << 2)); + /* 64 bit IV part */ + memcpy(&cdesc->control_data.token[1], iv, 8); + memcpy(cbcmaciv + 4, iv, 8); + /* Start counter at 0 */ + cdesc->control_data.token[3] = 0; + /* Message length */ + *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen); + } else { + /* Variable length IV part */ + memcpy(final_iv, iv, 15 - iv[0]); + memcpy(cbcmaciv, iv, 15 - iv[0]); + /* Start variable length counter at 0 */ + memset(final_iv + 15 - iv[0], 0, iv[0] + 1); + memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1); + /* fixup flags byte */ + cbcmaciv[0] |= ((assocadj > 0) << 6) | + ((digestsize - 2) << 2); + /* insert lower 2 bytes of message length */ + cbcmaciv[14] = cryptlen >> 8; + cbcmaciv[15] = cryptlen & 255; + } - token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[6].packet_length = assoclen; - token[6].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_HASH; + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = AES_BLOCK_SIZE + + ((assocadj > 0) << 1); + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN | + EIP197_TOKEN_INS_TYPE_HASH; + + if (likely(assocadj)) { + *aadlen = cpu_to_le32((assocadj >> 8) | + (assocadj & 255) << 8); + atoken += 6; + atoksize += 7; + } else { + atoken += 5; + atoksize += 6; + } - if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) { - token[11].opcode = EIP197_TOKEN_OPCODE_DIRECTION; - token[11].packet_length = cryptlen; - token[11].stat = EIP197_TOKEN_STAT_LAST_HASH; - if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) { - token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH; - /* Do not send to crypt engine in case of GMAC */ - token[11].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; + /* Process AAD data */ + aadref = atoken; + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = assocadj; + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH; + atoken++; + + /* For CCM only, align AAD data towards hash engine */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + aadalign = (assocadj + 2) & 15; + atoken->packet_length = assocadj && aadalign ? + 16 - aadalign : + 0; + if (likely(cryptlen)) { + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH; } else { - token[11].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_CRYPTO | - EIP197_TOKEN_INS_TYPE_HASH | - EIP197_TOKEN_INS_TYPE_OUTPUT; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; } - } else if (ctx->xcm != EIP197_XCM_MODE_CCM) { - token[6].stat = EIP197_TOKEN_STAT_LAST_HASH; + } else { + safexcel_aead_iv(ctx, iv, cdesc); + + /* Process AAD data */ + aadref = atoken; + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = assocadj; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH; } + atoken++; - if (!ctx->xcm) - return; + if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) { + /* For ESP mode (and not GMAC), skip over the IV */ + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE; + atoken->stat = 0; + atoken->instructions = 0; + atoken++; + atoksize++; + } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 && + direction == SAFEXCEL_DECRYPT)) { + /* Poly-chacha decryption needs a dummy NOP here ... */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = 16; /* According to Op Manual */ + atoken->stat = 0; + atoken->instructions = 0; + atoken++; + atoksize++; + } - token[9].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES; - token[9].packet_length = 0; - token[9].instructions = AES_BLOCK_SIZE; + if (ctx->xcm) { + /* For GCM and CCM, obtain enc(Y0) */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES; + atoken->packet_length = 0; + atoken->stat = 0; + atoken->instructions = AES_BLOCK_SIZE; + atoken++; + + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = AES_BLOCK_SIZE; + atoken->stat = 0; + atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_TYPE_CRYPTO; + atoken++; + atoksize += 2; + } - token[10].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[10].packet_length = AES_BLOCK_SIZE; - token[10].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | - EIP197_TOKEN_INS_TYPE_CRYPTO; + if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) { + /* Fixup stat field for AAD direction instruction */ + aadref->stat = 0; - if (ctx->xcm != EIP197_XCM_MODE_GCM) { - u8 *final_iv = (u8 *)cdesc->control_data.token; - u8 *cbcmaciv = (u8 *)&token[1]; - __le32 *aadlen = (__le32 *)&token[5]; + /* Process crypto data */ + atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION; + atoken->packet_length = cryptlen; - /* Construct IV block B0 for the CBC-MAC */ - token[0].opcode = EIP197_TOKEN_OPCODE_INSERT; - token[0].packet_length = AES_BLOCK_SIZE + - ((assoclen > 0) << 1); - token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN | - EIP197_TOKEN_INS_TYPE_HASH; - /* Variable length IV part */ - memcpy(cbcmaciv, final_iv, 15 - final_iv[0]); - /* fixup flags byte */ - cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2); - /* Clear upper bytes of variable message length to 0 */ - memset(cbcmaciv + 15 - final_iv[0], 0, final_iv[0] - 1); - /* insert lower 2 bytes of message length */ - cbcmaciv[14] = cryptlen >> 8; - cbcmaciv[15] = cryptlen & 255; + if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) { + /* Fixup instruction field for AAD dir instruction */ + aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH; - if (assoclen) { - *aadlen = cpu_to_le32((assoclen >> 8) | - ((assoclen & 0xff) << 8)); - assoclen += 2; + /* Do not send to crypt engine in case of GMAC */ + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; + } else { + atoken->instructions = EIP197_TOKEN_INS_LAST | + EIP197_TOKEN_INS_TYPE_CRYPTO | + EIP197_TOKEN_INS_TYPE_HASH | + EIP197_TOKEN_INS_TYPE_OUTPUT; } - token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH; - - /* Align AAD data towards hash engine */ - token[7].opcode = EIP197_TOKEN_OPCODE_INSERT; - assoclen &= 15; - token[7].packet_length = assoclen ? 16 - assoclen : 0; - - if (likely(cryptlen)) { - token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH; - - /* Align crypto data towards hash engine */ - token[11].stat = 0; - - token[12].opcode = EIP197_TOKEN_OPCODE_INSERT; - cryptlen &= 15; - token[12].packet_length = cryptlen ? 16 - cryptlen : 0; - token[12].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[12].instructions = EIP197_TOKEN_INS_TYPE_HASH; + cryptlen &= 15; + if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) { + atoken->stat = 0; + /* For CCM only, pad crypto data to the hash engine */ + atoken++; + atoksize++; + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = 16 - cryptlen; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; + atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH; } else { - token[7].stat = EIP197_TOKEN_STAT_LAST_HASH; - token[7].instructions = EIP197_TOKEN_INS_LAST | - EIP197_TOKEN_INS_TYPE_HASH; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH; } + atoken++; + atoksize++; } + + if (direction == SAFEXCEL_ENCRYPT) { + /* Append ICV */ + atoken->opcode = EIP197_TOKEN_OPCODE_INSERT; + atoken->packet_length = digestsize; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | + EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + } else { + /* Extract ICV */ + atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE; + atoken->packet_length = digestsize; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + atoken++; + atoksize++; + + /* Verify ICV */ + atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY; + atoken->packet_length = digestsize | + EIP197_TOKEN_HASH_RESULT_VERIFY; + atoken->stat = EIP197_TOKEN_STAT_LAST_HASH | + EIP197_TOKEN_STAT_LAST_PACKET; + atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT; + } + + /* Fixup length of the token in the command descriptor */ + cdesc->additional_cdata_size = atoksize; } static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, @@ -329,10 +380,8 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < len / sizeof(u32); i++) { @@ -382,12 +431,12 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, case SAFEXCEL_DES: err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen); if (unlikely(err)) - goto badkey_expflags; + goto badkey; break; case SAFEXCEL_3DES: err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); if (unlikely(err)) - goto badkey_expflags; + goto badkey; break; case SAFEXCEL_AES: err = aes_expandkey(&aes, keys.enckey, keys.enckeylen); @@ -450,9 +499,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, goto badkey; } - crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) & - CRYPTO_TFM_RES_MASK); - if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && (memcmp(ctx->ipad, istate.state, ctx->state_sz) || memcmp(ctx->opad, ostate.state, ctx->state_sz))) @@ -470,8 +516,6 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key, return 0; badkey: - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -badkey_expflags: memzero_explicit(&keys, sizeof(keys)); return err; } @@ -656,6 +700,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, unsigned int totlen; unsigned int totlen_src = cryptlen + assoclen; unsigned int totlen_dst = totlen_src; + struct safexcel_token *atoken; int n_cdesc = 0, n_rdesc = 0; int queued, i, ret = 0; bool first = true; @@ -730,56 +775,60 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); - /* The EIP cannot deal with zero length input packets! */ - if (totlen == 0) - totlen = 1; + if (!totlen) { + /* + * The EIP97 cannot deal with zero length input packets! + * So stuff a dummy command descriptor indicating a 1 byte + * (dummy) input packet, using the context record as source. + */ + first_cdesc = safexcel_add_cdesc(priv, ring, + 1, 1, ctx->base.ctxr_dma, + 1, 1, ctx->base.ctxr_dma, + &atoken); + if (IS_ERR(first_cdesc)) { + /* No space left in the command descriptor ring */ + ret = PTR_ERR(first_cdesc); + goto cdesc_rollback; + } + n_cdesc = 1; + goto skip_cdesc; + } /* command descriptors */ for_each_sg(src, sg, sreq->nr_src, i) { int len = sg_dma_len(sg); /* Do not overflow the request */ - if (queued - len < 0) + if (queued < len) len = queued; cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), sg_dma_address(sg), len, totlen, - ctx->base.ctxr_dma); + ctx->base.ctxr_dma, &atoken); if (IS_ERR(cdesc)) { /* No space left in the command descriptor ring */ ret = PTR_ERR(cdesc); goto cdesc_rollback; } - n_cdesc++; - if (n_cdesc == 1) { + if (!n_cdesc) first_cdesc = cdesc; - } + n_cdesc++; queued -= len; if (!queued) break; } - - if (unlikely(!n_cdesc)) { - /* - * Special case: zero length input buffer. - * The engine always needs the 1st command descriptor, however! - */ - first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen, - ctx->base.ctxr_dma); - n_cdesc = 1; - } - +skip_cdesc: /* Add context control words and token to first command descriptor */ safexcel_context_control(ctx, base, sreq, first_cdesc); if (ctx->aead) - safexcel_aead_token(ctx, iv, first_cdesc, + safexcel_aead_token(ctx, iv, first_cdesc, atoken, sreq->direction, cryptlen, assoclen, digestsize); else - safexcel_skcipher_token(ctx, iv, first_cdesc, + safexcel_skcipher_token(ctx, iv, first_cdesc, atoken, cryptlen); /* result descriptors */ @@ -1166,6 +1215,8 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) ctx->base.send = safexcel_skcipher_send; ctx->base.handle_result = safexcel_skcipher_handle_result; + ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD; + ctx->ctrinit = 1; return 0; } @@ -1230,6 +1281,8 @@ static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_AES; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -1264,6 +1317,7 @@ static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; return 0; } @@ -1300,6 +1354,7 @@ static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB; return 0; } @@ -1336,6 +1391,7 @@ static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB; return 0; } @@ -1381,10 +1437,8 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm, /* exclude the nonce here */ keylen = len - CTR_RFC3686_NONCE_SIZE; ret = aes_expandkey(&aes, key, keylen); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < keylen / sizeof(u32); i++) { @@ -1410,6 +1464,7 @@ static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; return 0; } @@ -1445,6 +1500,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); + struct safexcel_crypto_priv *priv = ctx->priv; int ret; ret = verify_skcipher_des_key(ctfm, key); @@ -1452,7 +1508,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, return ret; /* if context exits and key changed, need to invalidate it */ - if (ctx->base.ctxr_dma) + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) if (memcmp(ctx->key, key, len)) ctx->base.needs_inv = true; @@ -1468,6 +1524,8 @@ static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_DES; + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; return 0; } @@ -1505,6 +1563,8 @@ static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_DES; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -1537,6 +1597,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, const u8 *key, unsigned int len) { struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); + struct safexcel_crypto_priv *priv = ctx->priv; int err; err = verify_skcipher_des3_key(ctfm, key); @@ -1544,7 +1605,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, return err; /* if context exits and key changed, need to invalidate it */ - if (ctx->base.ctxr_dma) + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) if (memcmp(ctx->key, key, len)) ctx->base.needs_inv = true; @@ -1560,6 +1621,8 @@ static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_3DES; + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; return 0; } @@ -1597,6 +1660,8 @@ static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_3DES; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -1652,6 +1717,9 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm) ctx->priv = tmpl->priv; ctx->alg = SAFEXCEL_AES; /* default */ + ctx->blocksz = AES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD; + ctx->ctrinit = 1; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */ ctx->aead = true; ctx->base.send = safexcel_aead_send; @@ -1840,6 +1908,8 @@ static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha1_cra_init(tfm); ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -1874,6 +1944,8 @@ static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha256_cra_init(tfm); ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -1908,6 +1980,8 @@ static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha224_cra_init(tfm); ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -1942,6 +2016,8 @@ static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha512_cra_init(tfm); ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -1976,6 +2052,8 @@ static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha384_cra_init(tfm); ctx->alg = SAFEXCEL_3DES; /* override default */ + ctx->blocksz = DES3_EDE_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -2010,6 +2088,8 @@ static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha1_cra_init(tfm); ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -2044,6 +2124,8 @@ static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha256_cra_init(tfm); ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -2078,6 +2160,8 @@ static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha224_cra_init(tfm); ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -2112,6 +2196,8 @@ static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha512_cra_init(tfm); ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -2146,6 +2232,8 @@ static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm) safexcel_aead_sha384_cra_init(tfm); ctx->alg = SAFEXCEL_DES; /* override default */ + ctx->blocksz = DES_BLOCK_SIZE; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -2362,10 +2450,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm, /* Only half of the key data is cipher key */ keylen = (len >> 1); ret = aes_expandkey(&aes, key, keylen); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < keylen / sizeof(u32); i++) { @@ -2381,10 +2467,8 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm, /* The other half is the tweak key */ ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen); - if (ret) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < keylen / sizeof(u32); i++) { @@ -2412,6 +2496,7 @@ static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_AES; + ctx->blocksz = AES_BLOCK_SIZE; ctx->xts = 1; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS; return 0; @@ -2472,7 +2557,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, ret = aes_expandkey(&aes, key, len); if (ret) { - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&aes, sizeof(aes)); return ret; } @@ -2496,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) & CRYPTO_TFM_REQ_MASK); ret = crypto_cipher_setkey(ctx->hkaes, key, len); - crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; @@ -2532,10 +2614,7 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm) ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ ctx->hkaes = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->hkaes)) - return PTR_ERR(ctx->hkaes); - - return 0; + return PTR_ERR_OR_ZERO(ctx->hkaes); } static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm) @@ -2589,7 +2668,6 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key, ret = aes_expandkey(&aes, key, len); if (ret) { - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&aes, sizeof(aes)); return ret; } @@ -2632,6 +2710,7 @@ static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm) ctx->state_sz = 3 * AES_BLOCK_SIZE; ctx->xcm = EIP197_XCM_MODE_CCM; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ + ctx->ctrinit = 0; return 0; } @@ -2719,10 +2798,9 @@ static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm, { struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm); - if (len != CHACHA_KEY_SIZE) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len != CHACHA_KEY_SIZE) return -EINVAL; - } + safexcel_chacha20_setkey(ctx, key); return 0; @@ -2734,6 +2812,7 @@ static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_CHACHA20; + ctx->ctrinit = 0; ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32; return 0; } @@ -2775,10 +2854,9 @@ static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm, len -= EIP197_AEAD_IPSEC_NONCE_SIZE; ctx->nonce = *(u32 *)(key + len); } - if (len != CHACHA_KEY_SIZE) { - crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len != CHACHA_KEY_SIZE) return -EINVAL; - } + safexcel_chacha20_setkey(ctx, key); return 0; @@ -2885,6 +2963,7 @@ static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm) ctx->alg = SAFEXCEL_CHACHA20; ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 | CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK; + ctx->ctrinit = 0; ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305; ctx->state_sz = 0; /* Precomputed by HW */ return 0; @@ -2933,6 +3012,7 @@ static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm) ret = safexcel_aead_chachapoly_cra_init(tfm); ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP; + ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE; return ret; } @@ -2971,10 +3051,8 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm, struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - if (len != SM4_KEY_SIZE) { - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len != SM4_KEY_SIZE) return -EINVAL; - } if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) if (memcmp(ctx->key, key, SM4_KEY_SIZE)) @@ -3013,6 +3091,8 @@ static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_SM4; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB; + ctx->blocksz = 0; + ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD; return 0; } @@ -3047,6 +3127,7 @@ static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; return 0; } @@ -3083,6 +3164,7 @@ static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB; return 0; } @@ -3119,6 +3201,7 @@ static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB; return 0; } @@ -3169,6 +3252,7 @@ static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm) safexcel_skcipher_cra_init(tfm); ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; return 0; } @@ -3228,6 +3312,7 @@ static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm) safexcel_aead_cra_init(tfm); ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; ctx->state_sz = SHA1_DIGEST_SIZE; return 0; @@ -3335,6 +3420,7 @@ static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm) safexcel_aead_fallback_cra_init(tfm); ctx->alg = SAFEXCEL_SM4; + ctx->blocksz = SM4_BLOCK_SIZE; ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3; ctx->state_sz = SM3_DIGEST_SIZE; return 0; @@ -3473,6 +3559,7 @@ static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm) ret = safexcel_aead_gcm_cra_init(tfm); ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP; + ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE; return ret; } @@ -3607,6 +3694,7 @@ static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm) ret = safexcel_aead_ccm_cra_init(tfm); ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP; + ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE; return ret; } diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 2134daef24f6..43962bc709c6 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -87,12 +87,14 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc, input_length &= 15; if (unlikely(cbcmac && input_length)) { + token[0].stat = 0; token[1].opcode = EIP197_TOKEN_OPCODE_INSERT; token[1].packet_length = 16 - input_length; token[1].stat = EIP197_TOKEN_STAT_LAST_HASH; token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH; } else { token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; + eip197_noop_token(&token[1]); } token[2].opcode = EIP197_TOKEN_OPCODE_INSERT; @@ -101,6 +103,8 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc, token[2].packet_length = result_length; token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; + + eip197_noop_token(&token[3]); } static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, @@ -111,6 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, u64 count = 0; cdesc->control_data.control0 = ctx->alg; + cdesc->control_data.control1 = 0; /* * Copy the input digest if needed, and setup the context @@ -277,7 +282,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, sreq->processed = sreq->block_sz; sreq->hmac = 0; - ctx->base.needs_inv = true; + if (priv->flags & EIP197_TRC_CACHE) + ctx->base.needs_inv = true; areq->nbytes = 0; safexcel_ahash_enqueue(areq); @@ -314,6 +320,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; + struct safexcel_token *dmmy; int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0; u64 queued, len; @@ -397,7 +404,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, first_cdesc = safexcel_add_cdesc(priv, ring, 1, (cache_len == len), req->cache_dma, cache_len, - len, ctx->base.ctxr_dma); + len, ctx->base.ctxr_dma, + &dmmy); if (IS_ERR(first_cdesc)) { ret = PTR_ERR(first_cdesc); goto unmap_cache; @@ -436,7 +444,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - sglen), sg_dma_address(sg) + skip, sglen, - len, ctx->base.ctxr_dma); + len, ctx->base.ctxr_dma, &dmmy); if (IS_ERR(cdesc)) { ret = PTR_ERR(cdesc); goto unmap_sg; @@ -1911,10 +1919,8 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key, { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - if (keylen != sizeof(u32)) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } memcpy(ctx->ipad, key, sizeof(u32)); return 0; @@ -1987,10 +1993,8 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE); for (i = 0; i < len / sizeof(u32); i++) @@ -2057,18 +2061,14 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } /* precompute the XCBC key material */ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); ret = crypto_cipher_setkey(ctx->kaes, key, len); - crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; @@ -2088,8 +2088,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, ret = crypto_cipher_setkey(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, AES_MIN_KEY_SIZE); - crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; @@ -2160,10 +2158,8 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, int ret, i; ret = aes_expandkey(&aes, key, len); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } for (i = 0; i < len / sizeof(u32); i++) ctx->ipad[i + 8] = @@ -2174,8 +2170,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); ret = crypto_cipher_setkey(ctx->kaes, key, len); - crypto_ahash_set_flags(tfm, crypto_cipher_get_flags(ctx->kaes) & - CRYPTO_TFM_RES_MASK); if (ret) return ret; diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c index 9237ba745c2f..e454c3d44f07 100644 --- a/drivers/crypto/inside-secure/safexcel_ring.c +++ b/drivers/crypto/inside-secure/safexcel_ring.c @@ -14,6 +14,11 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, struct safexcel_desc_ring *cdr, struct safexcel_desc_ring *rdr) { + int i; + struct safexcel_command_desc *cdesc; + dma_addr_t atok; + + /* Actual command descriptor ring */ cdr->offset = priv->config.cd_offset; cdr->base = dmam_alloc_coherent(priv->dev, cdr->offset * EIP197_DEFAULT_RING_SIZE, @@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1); cdr->read = cdr->base; + /* Command descriptor shadow ring for storing additional token data */ + cdr->shoffset = priv->config.cdsh_offset; + cdr->shbase = dmam_alloc_coherent(priv->dev, + cdr->shoffset * + EIP197_DEFAULT_RING_SIZE, + &cdr->shbase_dma, GFP_KERNEL); + if (!cdr->shbase) + return -ENOMEM; + cdr->shwrite = cdr->shbase; + cdr->shbase_end = cdr->shbase + cdr->shoffset * + (EIP197_DEFAULT_RING_SIZE - 1); + + /* + * Populate command descriptors with physical pointers to shadow descs. + * Note that we only need to do this once if we don't overwrite them. + */ + cdesc = cdr->base; + atok = cdr->shbase_dma; + for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) { + cdesc->atok_lo = lower_32_bits(atok); + cdesc->atok_hi = upper_32_bits(atok); + cdesc = (void *)cdesc + cdr->offset; + atok += cdr->shoffset; + } + rdr->offset = priv->config.rd_offset; + /* Use shoffset for result token offset here */ + rdr->shoffset = priv->config.res_offset; rdr->base = dmam_alloc_coherent(priv->dev, rdr->offset * EIP197_DEFAULT_RING_SIZE, &rdr->base_dma, GFP_KERNEL); @@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv) return (atomic_inc_return(&priv->ring_used) % priv->config.rings); } -static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv, - struct safexcel_desc_ring *ring) +static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv, + struct safexcel_desc_ring *ring, + bool first, + struct safexcel_token **atoken) { void *ptr = ring->write; + if (first) + *atoken = ring->shwrite; + + if ((ring->write == ring->read - ring->offset) || + (ring->read == ring->base && ring->write == ring->base_end)) + return ERR_PTR(-ENOMEM); + + if (ring->write == ring->base_end) { + ring->write = ring->base; + ring->shwrite = ring->shbase; + } else { + ring->write += ring->offset; + ring->shwrite += ring->shoffset; + } + + return ptr; +} + +static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv, + struct safexcel_desc_ring *ring, + struct result_data_desc **rtoken) +{ + void *ptr = ring->write; + + /* Result token at relative offset shoffset */ + *rtoken = ring->write + ring->shoffset; + if ((ring->write == ring->read - ring->offset) || (ring->read == ring->base && ring->write == ring->base_end)) return ERR_PTR(-ENOMEM); @@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, if (ring->write == ring->read) return; - if (ring->write == ring->base) + if (ring->write == ring->base) { ring->write = ring->base_end; - else + ring->shwrite = ring->shbase_end; + } else { ring->write -= ring->offset; + ring->shwrite -= ring->shoffset; + } } struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, @@ -117,26 +181,26 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr bool first, bool last, dma_addr_t data, u32 data_len, u32 full_data_len, - dma_addr_t context) { + dma_addr_t context, + struct safexcel_token **atoken) +{ struct safexcel_command_desc *cdesc; - int i; - cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr); + cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr, + first, atoken); if (IS_ERR(cdesc)) return cdesc; - memset(cdesc, 0, sizeof(struct safexcel_command_desc)); - - cdesc->first_seg = first; - cdesc->last_seg = last; cdesc->particle_size = data_len; + cdesc->rsvd0 = 0; + cdesc->last_seg = last; + cdesc->first_seg = first; + cdesc->additional_cdata_size = 0; + cdesc->rsvd1 = 0; cdesc->data_lo = lower_32_bits(data); cdesc->data_hi = upper_32_bits(data); - if (first && context) { - struct safexcel_token *token = - (struct safexcel_token *)cdesc->control_data.token; - + if (first) { /* * Note that the length here MUST be >0 or else the EIP(1)97 * may hang. Newer EIP197 firmware actually incorporates this @@ -146,20 +210,12 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr cdesc->control_data.packet_length = full_data_len ?: 1; cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE | EIP197_OPTION_64BIT_CTX | - EIP197_OPTION_CTX_CTRL_IN_CMD; - cdesc->control_data.context_lo = - (lower_32_bits(context) & GENMASK(31, 2)) >> 2; + EIP197_OPTION_CTX_CTRL_IN_CMD | + EIP197_OPTION_RC_AUTO; + cdesc->control_data.type = EIP197_TYPE_BCLA; + cdesc->control_data.context_lo = lower_32_bits(context) | + EIP197_CONTEXT_SMALL; cdesc->control_data.context_hi = upper_32_bits(context); - - if (priv->version == EIP197B_MRVL || - priv->version == EIP197D_MRVL) - cdesc->control_data.options |= EIP197_OPTION_RC_AUTO; - - /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ - cdesc->control_data.refresh = 2; - - for (i = 0; i < EIP197_MAX_TOKENS; i++) - eip197_noop_token(&token[i]); } return cdesc; @@ -171,19 +227,27 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri dma_addr_t data, u32 len) { struct safexcel_result_desc *rdesc; + struct result_data_desc *rtoken; - rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr); + rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr, + &rtoken); if (IS_ERR(rdesc)) return rdesc; - memset(rdesc, 0, sizeof(struct safexcel_result_desc)); - - rdesc->first_seg = first; + rdesc->particle_size = len; + rdesc->rsvd0 = 0; + rdesc->descriptor_overflow = 0; + rdesc->buffer_overflow = 0; rdesc->last_seg = last; + rdesc->first_seg = first; rdesc->result_size = EIP197_RD64_RESULT_SIZE; - rdesc->particle_size = len; + rdesc->rsvd1 = 0; rdesc->data_lo = lower_32_bits(data); rdesc->data_hi = upper_32_bits(data); + /* Clear length & error code in result token */ + rtoken->packet_length = 0; + rtoken->error_code = 0; + return rdesc; } diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 391e3b4df364..ad73fc946682 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -740,7 +740,7 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, u32 keylen_cfg = 0; struct ix_sa_dir *dir; struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; + int err; dir = encrypt ? &ctx->encrypt : &ctx->decrypt; cinfo = dir->npe_ctx; @@ -757,12 +757,13 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, case 24: keylen_cfg = MOD_AES192; break; case 32: keylen_cfg = MOD_AES256; break; default: - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } cipher_cfg |= keylen_cfg; } else { - crypto_des_verify_key(tfm, key); + err = crypto_des_verify_key(tfm, key); + if (err) + return err; } /* write cfg word to cryptinfo */ *(u32*)cinfo = cpu_to_be32(cipher_cfg); @@ -819,7 +820,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *flags = &tfm->base.crt_flags; int ret; init_completion(&ctx->completion); @@ -835,16 +835,6 @@ static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key, if (ret) goto out; ret = setup_cipher(&tfm->base, 1, key, key_len); - if (ret) - goto out; - - if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { - ret = -EINVAL; - } else { - *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; - } - } out: if (!atomic_dec_and_test(&ctx->configuring)) wait_for_completion(&ctx->completion); @@ -1096,7 +1086,6 @@ free_buf_src: static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) { struct ixp_ctx *ctx = crypto_aead_ctx(tfm); - u32 *flags = &tfm->base.crt_flags; unsigned digest_len = crypto_aead_maxauthsize(tfm); int ret; @@ -1120,17 +1109,6 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) goto out; ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, ctx->authkey_len, digest_len); - if (ret) - goto out; - - if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { - ret = -EINVAL; - goto out; - } else { - *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; - } - } out: if (!atomic_dec_and_test(&ctx->configuring)) wait_for_completion(&ctx->completion); @@ -1169,7 +1147,6 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return aead_setup(tfm, crypto_aead_authsize(tfm)); badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index d8e8c857770c..c24f34a48cef 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -255,10 +255,8 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, int i; ret = aes_expandkey(&ctx->aes, key, len); - if (ret) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return ret; - } remaining = (ctx->aes.key_length - 16) / 4; offset = ctx->aes.key_length + 24 - remaining; diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c index 90880a81c534..78d660d963e2 100644 --- a/drivers/crypto/mediatek/mtk-aes.c +++ b/drivers/crypto/mediatek/mtk-aes.c @@ -652,7 +652,6 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm, break; default: - crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -1022,7 +1021,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, break; default: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -1033,8 +1031,6 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(ctr, key, keylen); - crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) & - CRYPTO_TFM_RES_MASK); if (err) return err; diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index f438b425c655..435ac1c83df9 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -492,7 +492,6 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); - unsigned int ret; /* * AES 128 is supposed by the hardware, store key into temporary @@ -513,16 +512,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(actx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - - ret = crypto_sync_skcipher_setkey(actx->fallback, key, len); - if (!ret) - return 0; - - tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) & - CRYPTO_TFM_RES_MASK; - - return ret; + return crypto_sync_skcipher_setkey(actx->fallback, key, len); } static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 63bd565048f4..f5c468f2cc82 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -746,7 +746,6 @@ static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key, ctx->enc_type |= ENC_TYPE_ALG_AES256; break; default: - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c index 9bbedbccfadf..32dc00dc570b 100644 --- a/drivers/crypto/omap-aes-gcm.c +++ b/drivers/crypto/omap-aes-gcm.c @@ -13,6 +13,7 @@ #include <linux/dmaengine.h> #include <linux/omap-dma.h> #include <linux/interrupt.h> +#include <linux/pm_runtime.h> #include <crypto/aes.h> #include <crypto/gcm.h> #include <crypto/scatterwalk.h> @@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret) { struct aead_request *req = dd->aead_req; - dd->flags &= ~FLAGS_BUSY; dd->in_sg = NULL; dd->out_sg = NULL; - req->base.complete(&req->base, ret); + crypto_finalize_aead_request(dd->engine, req, ret); + + pm_runtime_mark_last_busy(dd->dev); + pm_runtime_put_autosuspend(dd->dev); } static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) @@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) } omap_aes_gcm_finish_req(dd, ret); - omap_aes_gcm_handle_queue(dd, NULL); } static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, @@ -120,11 +122,16 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, OMAP_CRYPTO_FORCE_SINGLE_ENTRY, FLAGS_ASSOC_DATA_ST_SHIFT, &dd->flags); + if (ret) + return ret; } if (cryptlen) { tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen); + if (nsg) + sg_unmark_end(dd->in_sgl); + ret = omap_crypto_align_sg(&tmp, cryptlen, AES_BLOCK_SIZE, &dd->in_sgl[nsg], OMAP_CRYPTO_COPY_DATA | @@ -132,6 +139,8 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, OMAP_CRYPTO_FORCE_SINGLE_ENTRY, FLAGS_IN_DATA_ST_SHIFT, &dd->flags); + if (ret) + return ret; } dd->in_sg = dd->in_sgl; @@ -142,18 +151,20 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, dd->out_sg = req->dst; dd->orig_out = req->dst; - dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen); + dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen); flags = 0; if (req->src == req->dst || dd->out_sg == sg_arr) flags |= OMAP_CRYPTO_FORCE_COPY; - ret = omap_crypto_align_sg(&dd->out_sg, cryptlen, - AES_BLOCK_SIZE, &dd->out_sgl, - flags, - FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); - if (ret) - return ret; + if (cryptlen) { + ret = omap_crypto_align_sg(&dd->out_sg, cryptlen, + AES_BLOCK_SIZE, &dd->out_sgl, + flags, + FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); + if (ret) + return ret; + } dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen); dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen); @@ -161,62 +172,12 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, return 0; } -static void omap_aes_gcm_complete(struct crypto_async_request *req, int err) -{ - struct omap_aes_gcm_result *res = req->data; - - if (err == -EINPROGRESS) - return; - - res->err = err; - complete(&res->completion); -} - static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv) { - struct scatterlist iv_sg, tag_sg; - struct skcipher_request *sk_req; - struct omap_aes_gcm_result result; - struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - int ret = 0; - - sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL); - if (!sk_req) { - pr_err("skcipher: Failed to allocate request\n"); - return -ENOMEM; - } - - init_completion(&result.completion); - - sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE); - sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE); - skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - omap_aes_gcm_complete, &result); - ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen); - skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE, - NULL); - ret = crypto_skcipher_encrypt(sk_req); - switch (ret) { - case 0: - break; - case -EINPROGRESS: - case -EBUSY: - ret = wait_for_completion_interruptible(&result.completion); - if (!ret) { - ret = result.err; - if (!ret) { - reinit_completion(&result.completion); - break; - } - } - /* fall through */ - default: - pr_err("Encryption of IV failed for GCM mode\n"); - break; - } + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - skcipher_request_free(sk_req); - return ret; + aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv); + return 0; } void omap_aes_gcm_dma_out_callback(void *data) @@ -246,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data) static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, struct aead_request *req) { - struct omap_aes_ctx *ctx; - struct aead_request *backlog; - struct omap_aes_reqctx *rctx; - unsigned long flags; - int err, ret = 0; - - spin_lock_irqsave(&dd->lock, flags); if (req) - ret = aead_enqueue_request(&dd->aead_queue, req); - if (dd->flags & FLAGS_BUSY) { - spin_unlock_irqrestore(&dd->lock, flags); - return ret; - } - - backlog = aead_get_backlog(&dd->aead_queue); - req = aead_dequeue_request(&dd->aead_queue); - if (req) - dd->flags |= FLAGS_BUSY; - spin_unlock_irqrestore(&dd->lock, flags); - - if (!req) - return ret; + return crypto_transfer_aead_request_to_engine(dd->engine, req); - if (backlog) - backlog->base.complete(&backlog->base, -EINPROGRESS); + return 0; +} - ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - rctx = aead_request_ctx(req); +static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, + base); + struct omap_aes_reqctx *rctx = aead_request_ctx(req); + struct omap_aes_dev *dd = rctx->dd; + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + int err; - dd->ctx = ctx; - rctx->dd = dd; dd->aead_req = req; rctx->mode &= FLAGS_MODE_MASK; @@ -286,16 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, if (err) return err; - err = omap_aes_write_ctrl(dd); - if (!err) - err = omap_aes_crypt_dma_start(dd); + dd->ctx = &ctx->octx; - if (err) { - omap_aes_gcm_finish_req(dd, err); - omap_aes_gcm_handle_queue(dd, NULL); - } - - return ret; + return omap_aes_write_ctrl(dd); } static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode) @@ -350,36 +288,39 @@ int omap_aes_gcm_decrypt(struct aead_request *req) int omap_aes_4106gcm_encrypt(struct aead_request *req) { - struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct omap_aes_reqctx *rctx = aead_request_ctx(req); - memcpy(rctx->iv, ctx->nonce, 4); + memcpy(rctx->iv, ctx->octx.nonce, 4); memcpy(rctx->iv + 4, req->iv, 8); - return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM | + return crypto_ipsec_check_assoclen(req->assoclen) ?: + omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM | FLAGS_RFC4106_GCM); } int omap_aes_4106gcm_decrypt(struct aead_request *req) { - struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct omap_aes_reqctx *rctx = aead_request_ctx(req); - memcpy(rctx->iv, ctx->nonce, 4); + memcpy(rctx->iv, ctx->octx.nonce, 4); memcpy(rctx->iv + 4, req->iv, 8); - return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM); + return crypto_ipsec_check_assoclen(req->assoclen) ?: + omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM); } int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); + int ret; - if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) - return -EINVAL; + ret = aes_expandkey(&ctx->actx, key, keylen); + if (ret) + return ret; - memcpy(ctx->key, key, keylen); - ctx->keylen = keylen; + memcpy(ctx->octx.key, key, keylen); + ctx->octx.keylen = keylen; return 0; } @@ -387,19 +328,63 @@ int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); + int ret; if (keylen < 4) return -EINVAL; - keylen -= 4; - if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) - return -EINVAL; - memcpy(ctx->key, key, keylen); - memcpy(ctx->nonce, key + keylen, 4); - ctx->keylen = keylen; + ret = aes_expandkey(&ctx->actx, key, keylen); + if (ret) + return ret; + + memcpy(ctx->octx.key, key, keylen); + memcpy(ctx->octx.nonce, key + keylen, 4); + ctx->octx.keylen = keylen; + + return 0; +} + +int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + return crypto_gcm_check_authsize(authsize); +} + +int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent, + unsigned int authsize) +{ + return crypto_rfc4106_check_authsize(authsize); +} + +static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, + base); + struct omap_aes_reqctx *rctx = aead_request_ctx(req); + struct omap_aes_dev *dd = rctx->dd; + int ret = 0; + + if (!dd) + return -ENODEV; + + if (dd->in_sg_len) + ret = omap_aes_crypt_dma_start(dd); + else + omap_aes_gcm_dma_out_callback(dd); + + return ret; +} + +int omap_aes_gcm_cra_init(struct crypto_aead *tfm) +{ + struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req; + ctx->enginectx.op.unprepare_request = NULL; + ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req; + + crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); return 0; } diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index a1fc03ed01f3..824ddf2a66ff 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -269,13 +269,14 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd, struct scatterlist *out_sg, int in_sg_len, int out_sg_len) { - struct dma_async_tx_descriptor *tx_in, *tx_out; + struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc; struct dma_slave_config cfg; int ret; if (dd->pio_only) { scatterwalk_start(&dd->in_walk, dd->in_sg); - scatterwalk_start(&dd->out_walk, dd->out_sg); + if (out_sg_len) + scatterwalk_start(&dd->out_walk, dd->out_sg); /* Enable DATAIN interrupt and let it take care of the rest */ @@ -312,34 +313,45 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd, /* No callback necessary */ tx_in->callback_param = dd; + tx_in->callback = NULL; /* OUT */ - ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); - if (ret) { - dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", - ret); - return ret; - } + if (out_sg_len) { + ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); + if (ret) { + dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", + ret); + return ret; + } - tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, - DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!tx_out) { - dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); - return -EINVAL; + tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, + out_sg_len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_out) { + dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); + return -EINVAL; + } + + cb_desc = tx_out; + } else { + cb_desc = tx_in; } if (dd->flags & FLAGS_GCM) - tx_out->callback = omap_aes_gcm_dma_out_callback; + cb_desc->callback = omap_aes_gcm_dma_out_callback; else - tx_out->callback = omap_aes_dma_out_callback; - tx_out->callback_param = dd; + cb_desc->callback = omap_aes_dma_out_callback; + cb_desc->callback_param = dd; + dmaengine_submit(tx_in); - dmaengine_submit(tx_out); + if (tx_out) + dmaengine_submit(tx_out); dma_async_issue_pending(dd->dma_lch_in); - dma_async_issue_pending(dd->dma_lch_out); + if (out_sg_len) + dma_async_issue_pending(dd->dma_lch_out); /* start DMA */ dd->pdata->trigger(dd, dd->total); @@ -361,11 +373,13 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) return -EINVAL; } - err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, - DMA_FROM_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return -EINVAL; + if (dd->out_sg_len) { + err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + if (!err) { + dev_err(dd->dev, "dma_map_sg() error\n"); + return -EINVAL; + } } } @@ -373,8 +387,9 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) dd->out_sg_len); if (err && !dd->pio_only) { dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, - DMA_FROM_DEVICE); + if (dd->out_sg_len) + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); } return err; @@ -479,6 +494,14 @@ static int omap_aes_crypt_req(struct crypto_engine *engine, return omap_aes_crypt_dma_start(dd); } +static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf) +{ + int i; + + for (i = 0; i < 4; i++) + ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i)); +} + static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; @@ -494,12 +517,16 @@ static void omap_aes_done_task(unsigned long data) omap_aes_crypt_dma_stop(dd); } - omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save, + omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save, FLAGS_IN_DATA_ST_SHIFT, dd->flags); - omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save, + omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save, FLAGS_OUT_DATA_ST_SHIFT, dd->flags); + /* Update IV output */ + if (dd->flags & (FLAGS_CBC | FLAGS_CTR)) + omap_aes_copy_ivout(dd, dd->req->iv); + omap_aes_finish_req(dd, 0); pr_debug("exit\n"); @@ -513,6 +540,9 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) struct omap_aes_dev *dd; int ret; + if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR)) + return -EINVAL; + pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); @@ -627,36 +657,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm) return 0; } -static int omap_aes_gcm_cra_init(struct crypto_aead *tfm) -{ - struct omap_aes_dev *dd = NULL; - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); - int err; - - /* Find AES device, currently picks the first device */ - spin_lock_bh(&list_lock); - list_for_each_entry(dd, &dev_list, list) { - break; - } - spin_unlock_bh(&list_lock); - - err = pm_runtime_get_sync(dd->dev); - if (err < 0) { - dev_err(dd->dev, "%s: failed to get_sync(%d)\n", - __func__, err); - return err; - } - - tfm->reqsize = sizeof(struct omap_aes_reqctx); - ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0); - if (IS_ERR(ctx->ctr)) { - pr_warn("could not load aes driver for encrypting IV\n"); - return PTR_ERR(ctx->ctr); - } - - return 0; -} - static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) { struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -667,19 +667,6 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) ctx->fallback = NULL; } -static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm) -{ - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); - - if (ctx->fallback) - crypto_free_sync_skcipher(ctx->fallback); - - ctx->fallback = NULL; - - if (ctx->ctr) - crypto_free_skcipher(ctx->ctr); -} - /* ********************** ALGS ************************************ */ static struct skcipher_alg algs_ecb_cbc[] = { @@ -732,7 +719,7 @@ static struct skcipher_alg algs_ctr[] = { .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct omap_aes_ctx), .base.cra_module = THIS_MODULE, @@ -763,15 +750,15 @@ static struct aead_alg algs_aead_gcm[] = { .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .init = omap_aes_gcm_cra_init, - .exit = omap_aes_gcm_cra_exit, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = omap_aes_gcm_setkey, + .setauthsize = omap_aes_gcm_setauthsize, .encrypt = omap_aes_gcm_encrypt, .decrypt = omap_aes_gcm_decrypt, }, @@ -783,15 +770,15 @@ static struct aead_alg algs_aead_gcm[] = { .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct omap_aes_ctx), + .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .init = omap_aes_gcm_cra_init, - .exit = omap_aes_gcm_cra_exit, .maxauthsize = AES_BLOCK_SIZE, .ivsize = GCM_RFC4106_IV_SIZE, .setkey = omap_aes_4106gcm_setkey, + .setauthsize = omap_aes_4106gcm_setauthsize, .encrypt = omap_aes_4106gcm_encrypt, .decrypt = omap_aes_4106gcm_decrypt, }, @@ -1296,7 +1283,8 @@ static int omap_aes_remove(struct platform_device *pdev) tasklet_kill(&dd->done_task); omap_aes_dma_cleanup(dd); pm_runtime_disable(dd->dev); - dd = NULL; + + sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group); return 0; } diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h index 2d3575231e31..2d111bf906e1 100644 --- a/drivers/crypto/omap-aes.h +++ b/drivers/crypto/omap-aes.h @@ -9,6 +9,7 @@ #ifndef __OMAP_AES_H__ #define __OMAP_AES_H__ +#include <crypto/aes.h> #include <crypto/engine.h> #define DST_MAXBURST 4 @@ -79,7 +80,6 @@ #define FLAGS_INIT BIT(5) #define FLAGS_FAST BIT(6) -#define FLAGS_BUSY BIT(7) #define FLAGS_IN_DATA_ST_SHIFT 8 #define FLAGS_OUT_DATA_ST_SHIFT 10 @@ -98,7 +98,11 @@ struct omap_aes_ctx { u32 key[AES_KEYSIZE_256 / sizeof(u32)]; u8 nonce[4]; struct crypto_sync_skcipher *fallback; - struct crypto_skcipher *ctr; +}; + +struct omap_aes_gcm_ctx { + struct omap_aes_ctx octx; + struct crypto_aes_ctx actx; }; struct omap_aes_reqctx { @@ -202,8 +206,12 @@ int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); int omap_aes_gcm_encrypt(struct aead_request *req); int omap_aes_gcm_decrypt(struct aead_request *req); +int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize); int omap_aes_4106gcm_encrypt(struct aead_request *req); int omap_aes_4106gcm_decrypt(struct aead_request *req); +int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent, + unsigned int authsize); +int omap_aes_gcm_cra_init(struct crypto_aead *tfm); int omap_aes_write_ctrl(struct omap_aes_dev *dd); int omap_aes_crypt_dma_start(struct omap_aes_dev *dd); int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd); diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c index 7d592d93bb1c..cc88b7362bc2 100644 --- a/drivers/crypto/omap-crypto.c +++ b/drivers/crypto/omap-crypto.c @@ -154,6 +154,41 @@ int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs, } EXPORT_SYMBOL_GPL(omap_crypto_align_sg); +static void omap_crypto_copy_data(struct scatterlist *src, + struct scatterlist *dst, + int offset, int len) +{ + int amt; + void *srcb, *dstb; + int srco = 0, dsto = offset; + + while (src && dst && len) { + if (srco >= src->length) { + srco -= src->length; + src = sg_next(src); + continue; + } + + if (dsto >= dst->length) { + dsto -= dst->length; + dst = sg_next(dst); + continue; + } + + amt = min(src->length - srco, dst->length - dsto); + amt = min(len, amt); + + srcb = sg_virt(src) + srco; + dstb = sg_virt(dst) + dsto; + + memcpy(dstb, srcb, amt); + + srco += amt; + dsto += amt; + len -= amt; + } +} + void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig, int offset, int len, u8 flags_shift, unsigned long flags) @@ -171,7 +206,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig, pages = get_order(len); if (orig && (flags & OMAP_CRYPTO_COPY_MASK)) - scatterwalk_map_and_copy(buf, orig, offset, len, 1); + omap_crypto_copy_data(sg, orig, offset, len); if (flags & OMAP_CRYPTO_DATA_COPIED) free_pages((unsigned long)buf, pages); diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 4c4dbc2b377e..8eda43319204 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -597,6 +597,7 @@ static int omap_des_crypt_req(struct crypto_engine *engine, static void omap_des_done_task(unsigned long data) { struct omap_des_dev *dd = (struct omap_des_dev *)data; + int i; pr_debug("enter done_task\n"); @@ -615,6 +616,11 @@ static void omap_des_done_task(unsigned long data) omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save, FLAGS_OUT_DATA_ST_SHIFT, dd->flags); + if ((dd->flags & FLAGS_CBC) && dd->req->iv) + for (i = 0; i < 2; i++) + ((u32 *)dd->req->iv)[i] = + omap_des_read(dd, DES_REG_IV(dd, i)); + omap_des_finish_req(dd, 0); pr_debug("exit\n"); @@ -631,10 +637,11 @@ static int omap_des_crypt(struct skcipher_request *req, unsigned long mode) !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); - if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) { - pr_err("request size is not exact amount of DES blocks\n"); + if (!req->cryptlen) + return 0; + + if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) return -EINVAL; - } dd = omap_des_find_dev(ctx); if (!dd) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ac80bc6af093..4f915a4ef5b0 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -112,6 +112,8 @@ #define FLAGS_BE32_SHA1 8 #define FLAGS_SGS_COPIED 9 #define FLAGS_SGS_ALLOCED 10 +#define FLAGS_HUGE 11 + /* context flags */ #define FLAGS_FINUP 16 @@ -136,6 +138,8 @@ #define BUFLEN SHA512_BLOCK_SIZE #define OMAP_SHA_DMA_THRESHOLD 256 +#define OMAP_SHA_MAX_DMA_LEN (1024 * 2048) + struct omap_sham_dev; struct omap_sham_reqctx { @@ -644,6 +648,8 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx, struct scatterlist *tmp; int offset = ctx->offset; + ctx->total = new_len; + if (ctx->bufcnt) n++; @@ -661,15 +667,16 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx, sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); tmp = sg_next(tmp); ctx->sg_len++; + new_len -= ctx->bufcnt; } while (sg && new_len) { int len = sg->length - offset; - if (offset) { + if (len <= 0) { offset -= sg->length; - if (offset < 0) - offset = 0; + sg = sg_next(sg); + continue; } if (new_len < len) @@ -677,33 +684,37 @@ static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx, if (len > 0) { new_len -= len; - sg_set_page(tmp, sg_page(sg), len, sg->offset); + sg_set_page(tmp, sg_page(sg), len, sg->offset + offset); + offset = 0; + ctx->offset = 0; + ctx->sg_len++; if (new_len <= 0) - sg_mark_end(tmp); + break; tmp = sg_next(tmp); - ctx->sg_len++; } sg = sg_next(sg); } + if (tmp) + sg_mark_end(tmp); + set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags); + ctx->offset += new_len - ctx->bufcnt; ctx->bufcnt = 0; return 0; } static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx, - struct scatterlist *sg, int bs, int new_len) + struct scatterlist *sg, int bs, + unsigned int new_len) { int pages; void *buf; - int len; - - len = new_len + ctx->bufcnt; - pages = get_order(ctx->total); + pages = get_order(new_len); buf = (void *)__get_free_pages(GFP_ATOMIC, pages); if (!buf) { @@ -715,14 +726,15 @@ static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx, memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset, - ctx->total - ctx->bufcnt, 0); + min(new_len, ctx->total) - ctx->bufcnt, 0); sg_init_table(ctx->sgl, 1); - sg_set_buf(ctx->sgl, buf, len); + sg_set_buf(ctx->sgl, buf, new_len); ctx->sg = ctx->sgl; set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags); ctx->sg_len = 1; + ctx->offset += new_len - ctx->bufcnt; ctx->bufcnt = 0; - ctx->offset = 0; + ctx->total = new_len; return 0; } @@ -737,6 +749,7 @@ static int omap_sham_align_sgs(struct scatterlist *sg, struct scatterlist *sg_tmp = sg; int new_len; int offset = rctx->offset; + int bufcnt = rctx->bufcnt; if (!sg || !sg->length || !nbytes) return 0; @@ -751,12 +764,28 @@ static int omap_sham_align_sgs(struct scatterlist *sg, else new_len = (new_len - 1) / bs * bs; + if (!new_len) + return 0; + if (nbytes != new_len) list_ok = false; while (nbytes > 0 && sg_tmp) { n++; + if (bufcnt) { + if (!IS_ALIGNED(bufcnt, bs)) { + aligned = false; + break; + } + nbytes -= bufcnt; + bufcnt = 0; + if (!nbytes) + list_ok = false; + + continue; + } + #ifdef CONFIG_ZONE_DMA if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) { aligned = false; @@ -794,13 +823,27 @@ static int omap_sham_align_sgs(struct scatterlist *sg, } } + if (new_len > OMAP_SHA_MAX_DMA_LEN) { + new_len = OMAP_SHA_MAX_DMA_LEN; + aligned = false; + } + if (!aligned) return omap_sham_copy_sgs(rctx, sg, bs, new_len); else if (!list_ok) return omap_sham_copy_sg_lists(rctx, sg, bs, new_len); + rctx->total = new_len; + rctx->offset += new_len; rctx->sg_len = n; - rctx->sg = sg; + if (rctx->bufcnt) { + sg_init_table(rctx->sgl, 2); + sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); + sg_chain(rctx->sgl, 2, sg); + rctx->sg = rctx->sgl; + } else { + rctx->sg = sg; + } return 0; } @@ -810,31 +853,35 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) struct omap_sham_reqctx *rctx = ahash_request_ctx(req); int bs; int ret; - int nbytes; + unsigned int nbytes; bool final = rctx->flags & BIT(FLAGS_FINUP); - int xmit_len, hash_later; + int hash_later; bs = get_block_size(rctx); + nbytes = rctx->bufcnt; + if (update) - nbytes = req->nbytes; - else - nbytes = 0; + nbytes += req->nbytes - rctx->offset; - rctx->total = nbytes + rctx->bufcnt; + dev_dbg(rctx->dd->dev, + "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n", + __func__, nbytes, bs, rctx->total, rctx->offset, + rctx->bufcnt); - if (!rctx->total) + if (!nbytes) return 0; - if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) { + rctx->total = nbytes; + + if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) { int len = bs - rctx->bufcnt % bs; - if (len > nbytes) - len = nbytes; + if (len > req->nbytes) + len = req->nbytes; scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src, 0, len, 0); rctx->bufcnt += len; - nbytes -= len; rctx->offset = len; } @@ -845,64 +892,25 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) if (ret) return ret; - xmit_len = rctx->total; - - if (!IS_ALIGNED(xmit_len, bs)) { - if (final) - xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; - else - xmit_len = xmit_len / bs * bs; - } else if (!final) { - xmit_len -= bs; - } - - hash_later = rctx->total - xmit_len; + hash_later = nbytes - rctx->total; if (hash_later < 0) hash_later = 0; - if (rctx->bufcnt && nbytes) { - /* have data from previous operation and current */ - sg_init_table(rctx->sgl, 2); - sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); - - sg_chain(rctx->sgl, 2, req->src); - - rctx->sg = rctx->sgl; - - rctx->sg_len++; - } else if (rctx->bufcnt) { - /* have buffered data only */ - sg_init_table(rctx->sgl, 1); - sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len); - - rctx->sg = rctx->sgl; - - rctx->sg_len = 1; - } - if (hash_later) { - int offset = 0; - - if (hash_later > req->nbytes) { - memcpy(rctx->buffer, rctx->buffer + xmit_len, - hash_later - req->nbytes); - offset = hash_later - req->nbytes; - } - - if (req->nbytes) { - scatterwalk_map_and_copy(rctx->buffer + offset, - req->src, - offset + req->nbytes - - hash_later, hash_later, 0); - } + scatterwalk_map_and_copy(rctx->buffer, + req->src, + req->nbytes - hash_later, + hash_later, 0); rctx->bufcnt = hash_later; } else { rctx->bufcnt = 0; } - if (!final) - rctx->total = xmit_len; + if (hash_later > rctx->buflen) + set_bit(FLAGS_HUGE, &rctx->dd->flags); + + rctx->total = min(nbytes, rctx->total); return 0; } @@ -998,10 +1006,11 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err; - bool final = ctx->flags & BIT(FLAGS_FINUP); + bool final = (ctx->flags & BIT(FLAGS_FINUP)) && + !(dd->flags & BIT(FLAGS_HUGE)); - dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", - ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); + dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d", + ctx->total, ctx->digcnt, final); if (ctx->total < get_block_size(ctx) || ctx->total < dd->fallback_sz) @@ -1024,6 +1033,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err = 0, use_dma = 1; + if (dd->flags & BIT(FLAGS_HUGE)) + return 0; + if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode) /* * faster to handle last block with cpu or @@ -1083,7 +1095,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) free_pages((unsigned long)sg_virt(ctx->sg), - get_order(ctx->sg->length + ctx->bufcnt)); + get_order(ctx->sg->length)); if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) kfree(ctx->sg); @@ -1092,6 +1104,21 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED)); + if (dd->flags & BIT(FLAGS_HUGE)) { + dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) | + BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE)); + omap_sham_prepare_request(req, ctx->op == OP_UPDATE); + if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) { + err = omap_sham_update_req(dd); + if (err != -EINPROGRESS && + (ctx->flags & BIT(FLAGS_FINUP))) + err = omap_sham_final_req(dd); + } else if (ctx->op == OP_FINAL) { + omap_sham_final_req(dd); + } + return; + } + if (!err) { dd->pdata->copy_hash(req, 1); if (test_bit(FLAGS_FINAL, &dd->flags)) @@ -1107,6 +1134,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); + ctx->offset = 0; + if (req->base.complete) req->base.complete(&req->base, err); } @@ -1158,7 +1187,7 @@ retry: /* request has changed - restore hash */ dd->pdata->copy_hash(req, 0); - if (ctx->op == OP_UPDATE) { + if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) { err = omap_sham_update_req(dd); if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) /* no final() after finup() */ @@ -1730,6 +1759,8 @@ static void omap_sham_done_task(unsigned long data) struct omap_sham_dev *dd = (struct omap_sham_dev *)data; int err = 0; + dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags); + if (!test_bit(FLAGS_BUSY, &dd->flags)) { omap_sham_handle_queue(dd, NULL); return; @@ -2223,6 +2254,8 @@ static int omap_sham_remove(struct platform_device *pdev) if (!dd->polling_mode) dma_release_channel(dd->dma_lch); + sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group); + return 0; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index c5b60f50e1b5..594d6b1695d5 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -108,14 +108,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, { struct aes_ctx *ctx = aes_ctx(tfm); const __le32 *key = (const __le32 *)in_key; - u32 *flags = &tfm->crt_flags; struct crypto_aes_ctx gen_aes; int cpu; - if (key_len % 8) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (key_len % 8) return -EINVAL; - } /* * If the hardware is capable of generating the extended key @@ -146,10 +143,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, ctx->cword.encrypt.keygen = 1; ctx->cword.decrypt.keygen = 1; - if (aes_expandkey(&gen_aes, in_key, key_len)) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (aes_expandkey(&gen_aes, in_key, key_len)) return -EINVAL; - } memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index ddf1b549fdca..c826abe79e79 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -190,13 +190,11 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out) return padlock_sha256_finup(desc, buf, 0, out); } -static int padlock_cra_init(struct crypto_tfm *tfm) +static int padlock_init_tfm(struct crypto_shash *hash) { - struct crypto_shash *hash = __crypto_shash_cast(tfm); - const char *fallback_driver_name = crypto_tfm_alg_name(tfm); - struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); + const char *fallback_driver_name = crypto_shash_alg_name(hash); + struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash); struct crypto_shash *fallback_tfm; - int err = -ENOMEM; /* Allocate a fallback and abort if it failed. */ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, @@ -204,21 +202,17 @@ static int padlock_cra_init(struct crypto_tfm *tfm) if (IS_ERR(fallback_tfm)) { printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", fallback_driver_name); - err = PTR_ERR(fallback_tfm); - goto out; + return PTR_ERR(fallback_tfm); } ctx->fallback = fallback_tfm; hash->descsize += crypto_shash_descsize(fallback_tfm); return 0; - -out: - return err; } -static void padlock_cra_exit(struct crypto_tfm *tfm) +static void padlock_exit_tfm(struct crypto_shash *hash) { - struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); + struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash); crypto_free_shash(ctx->fallback); } @@ -231,6 +225,8 @@ static struct shash_alg sha1_alg = { .final = padlock_sha1_final, .export = padlock_sha_export, .import = padlock_sha_import, + .init_tfm = padlock_init_tfm, + .exit_tfm = padlock_exit_tfm, .descsize = sizeof(struct padlock_sha_desc), .statesize = sizeof(struct sha1_state), .base = { @@ -241,8 +237,6 @@ static struct shash_alg sha1_alg = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, - .cra_init = padlock_cra_init, - .cra_exit = padlock_cra_exit, } }; @@ -254,6 +248,8 @@ static struct shash_alg sha256_alg = { .final = padlock_sha256_final, .export = padlock_sha_export, .import = padlock_sha_import, + .init_tfm = padlock_init_tfm, + .exit_tfm = padlock_exit_tfm, .descsize = sizeof(struct padlock_sha_desc), .statesize = sizeof(struct sha256_state), .base = { @@ -264,8 +260,6 @@ static struct shash_alg sha256_alg = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, - .cra_init = padlock_cra_init, - .cra_exit = padlock_cra_exit, } }; diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 29da449b3e9e..7384e91c8b32 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -465,9 +465,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(ctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (err) return err; @@ -490,7 +487,6 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, return 0; badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -780,10 +776,8 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); int err = 0; - if (len > AES_MAX_KEY_SIZE) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (len > AES_MAX_KEY_SIZE) return -EINVAL; - } /* * IPSec engine only supports 128 and 256 bit AES keys. If we get a @@ -805,12 +799,6 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, CRYPTO_TFM_REQ_MASK); err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len); - - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - crypto_sync_skcipher_get_flags(ctx->sw_cipher) & - CRYPTO_TFM_RES_MASK; - if (err) goto sw_setkey_failed; } @@ -830,7 +818,6 @@ static int spacc_kasumi_f8_setkey(struct crypto_skcipher *cipher, int err = 0; if (len > AES_MAX_KEY_SIZE) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); err = -EINVAL; goto out; } @@ -1595,6 +1582,11 @@ static const struct of_device_id spacc_of_id_table[] = { MODULE_DEVICE_TABLE(of, spacc_of_id_table); #endif /* CONFIG_OF */ +static void spacc_tasklet_kill(void *data) +{ + tasklet_kill(data); +} + static int spacc_probe(struct platform_device *pdev) { int i, err, ret; @@ -1637,6 +1629,14 @@ static int spacc_probe(struct platform_device *pdev) return -ENXIO; } + tasklet_init(&engine->complete, spacc_spacc_complete, + (unsigned long)engine); + + ret = devm_add_action(&pdev->dev, spacc_tasklet_kill, + &engine->complete); + if (ret) + return ret; + if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, engine->name, engine)) { dev_err(engine->dev, "failed to request IRQ\n"); @@ -1694,8 +1694,6 @@ static int spacc_probe(struct platform_device *pdev) INIT_LIST_HEAD(&engine->completed); INIT_LIST_HEAD(&engine->in_progress); engine->in_flight = 0; - tasklet_init(&engine->complete, spacc_spacc_complete, - (unsigned long)engine); platform_set_drvdata(pdev, engine); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 35bca76b640f..833bb1d3a11b 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -570,7 +570,6 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return 0; bad_key: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; error: @@ -586,14 +585,11 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, int alg; if (qat_alg_validate_key(keylen, &alg, mode)) - goto bad_key; + return -EINVAL; qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode); qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode); return 0; -bad_key: - crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile index 8caa04e1ec43..14ade8a7d664 100644 --- a/drivers/crypto/qce/Makefile +++ b/drivers/crypto/qce/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o qcrypto-objs := core.o \ common.o \ - dma.o \ - sha.o \ - skcipher.o + dma.o + +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index da1188abc9ba..629e7f34dc09 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) qce_write(qce, offset + i * sizeof(u32), 0); } -static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) +static u32 qce_config_reg(struct qce_device *qce, int little) { - u32 cfg = 0; + u32 beats = (qce->burst_size >> 3) - 1; + u32 pipe_pair = qce->pipe_pair_id; + u32 config; - if (IS_AES(flags)) { - if (aes_key_size == AES_KEYSIZE_128) - cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; - else if (aes_key_size == AES_KEYSIZE_256) - cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; - } + config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; + config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | + BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); + config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; + config &= ~HIGH_SPD_EN_N_SHIFT; - if (IS_AES(flags)) - cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; - else if (IS_DES(flags) || IS_3DES(flags)) - cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; + if (little) + config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); - if (IS_DES(flags)) - cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; + return config; +} - if (IS_3DES(flags)) - cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) +{ + __be32 *d = dst; + const u8 *s = src; + unsigned int n; - switch (flags & QCE_MODE_MASK) { - case QCE_MODE_ECB: - cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; - break; - case QCE_MODE_CBC: - cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; - break; - case QCE_MODE_CTR: - cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; - break; - case QCE_MODE_XTS: - cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; - break; - case QCE_MODE_CCM: - cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; - cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; - break; - default: - return ~0; + n = len / sizeof(u32); + for (; n > 0; n--) { + *d = cpu_to_be32p((const __u32 *) s); + s += sizeof(__u32); + d++; } +} - return cfg; +static void qce_setup_config(struct qce_device *qce) +{ + u32 config; + + /* get big endianness */ + config = qce_config_reg(qce, 0); + + /* clear status */ + qce_write(qce, REG_STATUS, 0); + qce_write(qce, REG_CONFIG, config); +} + +static inline void qce_crypto_go(struct qce_device *qce) +{ + qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); } +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA static u32 qce_auth_cfg(unsigned long flags, u32 key_size) { u32 cfg = 0; @@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size) return cfg; } -static u32 qce_config_reg(struct qce_device *qce, int little) -{ - u32 beats = (qce->burst_size >> 3) - 1; - u32 pipe_pair = qce->pipe_pair_id; - u32 config; - - config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; - config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | - BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); - config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; - config &= ~HIGH_SPD_EN_N_SHIFT; - - if (little) - config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); - - return config; -} - -void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) -{ - __be32 *d = dst; - const u8 *s = src; - unsigned int n; - - n = len / sizeof(u32); - for (; n > 0; n--) { - *d = cpu_to_be32p((const __u32 *) s); - s += sizeof(__u32); - d++; - } -} - -static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) -{ - u8 swap[QCE_AES_IV_LENGTH]; - u32 i, j; - - if (ivsize > QCE_AES_IV_LENGTH) - return; - - memset(swap, 0, QCE_AES_IV_LENGTH); - - for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; - i < QCE_AES_IV_LENGTH; i++, j--) - swap[i] = src[j]; - - qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); -} - -static void qce_xtskey(struct qce_device *qce, const u8 *enckey, - unsigned int enckeylen, unsigned int cryptlen) -{ - u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; - unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); - unsigned int xtsdusize; - - qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, - enckeylen / 2); - qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); - - /* xts du size 512B */ - xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); - qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); -} - -static void qce_setup_config(struct qce_device *qce) -{ - u32 config; - - /* get big endianness */ - config = qce_config_reg(qce, 0); - - /* clear status */ - qce_write(qce, REG_STATUS, 0); - qce_write(qce, REG_CONFIG, config); -} - -static inline void qce_crypto_go(struct qce_device *qce) -{ - qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); -} - static int qce_setup_regs_ahash(struct crypto_async_request *async_req, u32 totallen, u32 offset) { @@ -303,6 +225,87 @@ go_proc: return 0; } +#endif + +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER +static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) +{ + u32 cfg = 0; + + if (IS_AES(flags)) { + if (aes_key_size == AES_KEYSIZE_128) + cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; + else if (aes_key_size == AES_KEYSIZE_256) + cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; + } + + if (IS_AES(flags)) + cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; + else if (IS_DES(flags) || IS_3DES(flags)) + cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; + + if (IS_DES(flags)) + cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; + + if (IS_3DES(flags)) + cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; + + switch (flags & QCE_MODE_MASK) { + case QCE_MODE_ECB: + cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CBC: + cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CTR: + cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; + break; + case QCE_MODE_XTS: + cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; + break; + case QCE_MODE_CCM: + cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; + cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; + break; + default: + return ~0; + } + + return cfg; +} + +static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) +{ + u8 swap[QCE_AES_IV_LENGTH]; + u32 i, j; + + if (ivsize > QCE_AES_IV_LENGTH) + return; + + memset(swap, 0, QCE_AES_IV_LENGTH); + + for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; + i < QCE_AES_IV_LENGTH; i++, j--) + swap[i] = src[j]; + + qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); +} + +static void qce_xtskey(struct qce_device *qce, const u8 *enckey, + unsigned int enckeylen, unsigned int cryptlen) +{ + u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; + unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); + unsigned int xtsdusize; + + qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, + enckeylen / 2); + qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); + + /* xts du size 512B */ + xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); + qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); +} static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, u32 totallen, u32 offset) @@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, return 0; } +#endif int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, u32 offset) { switch (type) { +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER case CRYPTO_ALG_TYPE_SKCIPHER: return qce_setup_regs_skcipher(async_req, totallen, offset); +#endif +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA case CRYPTO_ALG_TYPE_AHASH: return qce_setup_regs_ahash(async_req, totallen, offset); +#endif default: return -EINVAL; } diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index 0a44a6eeacf5..cb6d61eb7302 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -22,8 +22,12 @@ #define QCE_QUEUE_LENGTH 1 static const struct qce_algo_ops *qce_ops[] = { +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER &skcipher_ops, +#endif +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA &ahash_ops, +#endif }; static void qce_unregister_algs(struct qce_device *qce) diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c index 40a59214d2e1..7da893dc00e7 100644 --- a/drivers/crypto/qce/dma.c +++ b/drivers/crypto/qce/dma.c @@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma) } struct scatterlist * -qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) +qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl, + int max_ents) { struct scatterlist *sg = sgt->sgl, *sg_last = NULL; @@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl) if (!sg) return ERR_PTR(-EINVAL); - while (new_sgl && sg) { + while (new_sgl && sg && max_ents) { sg_set_page(sg, sg_page(new_sgl), new_sgl->length, new_sgl->offset); sg_last = sg; sg = sg_next(sg); new_sgl = sg_next(new_sgl); + max_ents--; } return sg_last; diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h index 1e25a9e0e6f8..ed25a0d9829e 100644 --- a/drivers/crypto/qce/dma.h +++ b/drivers/crypto/qce/dma.h @@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in, void qce_dma_issue_pending(struct qce_dma_data *dma); int qce_dma_terminate_all(struct qce_dma_data *dma); struct scatterlist * -qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add); +qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add, + int max_ents); #endif /* _DMA_H_ */ diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 95ab16fc8fd6..1ab62e7d5f3c 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); ret = crypto_wait_req(crypto_ahash_digest(req), &wait); - if (ret) - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); kfree(buf); err_free_req: diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index fee07323f8f9..4217b745f124 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -21,6 +21,7 @@ static void qce_skcipher_done(void *data) struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); struct qce_device *qce = tmpl->qce; + struct qce_result_dump *result_buf = qce->dma.result_buf; enum dma_data_direction dir_src, dir_dst; u32 status; int error; @@ -45,6 +46,7 @@ static void qce_skcipher_done(void *data) if (error < 0) dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); + memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize); qce->async_req_done(tmpl->qce, error); } @@ -95,13 +97,13 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); - sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); + sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; } - sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); + sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; @@ -154,12 +156,13 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, { struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; int ret; if (!key || !keylen) return -EINVAL; - switch (keylen) { + switch (IS_XTS(flags) ? keylen >> 1 : keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_256: break; @@ -213,13 +216,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + int keylen; int ret; rctx->flags = tmpl->alg_flags; rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; + keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; - if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && - ctx->enc_keylen != AES_KEYSIZE_256) { + if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_256) { SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); skcipher_request_set_sync_tfm(subreq, ctx->fallback); @@ -252,7 +257,14 @@ static int qce_skcipher_init(struct crypto_skcipher *tfm) memset(ctx, 0, sizeof(*ctx)); crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); + return 0; +} + +static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) +{ + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + qce_skcipher_init(tfm); ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), 0, CRYPTO_ALG_NEED_FALLBACK); return PTR_ERR_OR_ZERO(ctx->fallback); @@ -270,6 +282,7 @@ struct qce_skcipher_def { const char *name; const char *drv_name; unsigned int blocksize; + unsigned int chunksize; unsigned int ivsize; unsigned int min_keysize; unsigned int max_keysize; @@ -298,7 +311,8 @@ static const struct qce_skcipher_def skcipher_def[] = { .flags = QCE_ALG_AES | QCE_MODE_CTR, .name = "ctr(aes)", .drv_name = "ctr-aes-qce", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, + .chunksize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -309,8 +323,8 @@ static const struct qce_skcipher_def skcipher_def[] = { .drv_name = "xts-aes-qce", .blocksize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, + .min_keysize = AES_MIN_KEY_SIZE * 2, + .max_keysize = AES_MAX_KEY_SIZE * 2, }, { .flags = QCE_ALG_DES | QCE_MODE_ECB, @@ -368,6 +382,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, def->drv_name); alg->base.cra_blocksize = def->blocksize; + alg->chunksize = def->chunksize; alg->ivsize = def->ivsize; alg->min_keysize = def->min_keysize; alg->max_keysize = def->max_keysize; @@ -379,14 +394,18 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg->base.cra_priority = 300; alg->base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); alg->base.cra_alignmask = 0; alg->base.cra_module = THIS_MODULE; - alg->init = qce_skcipher_init; - alg->exit = qce_skcipher_exit; + if (IS_AES(def->flags)) { + alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + alg->init = qce_skcipher_init_fallback; + alg->exit = qce_skcipher_exit; + } else { + alg->init = qce_skcipher_init; + } INIT_LIST_HEAD(&tmpl->entry); tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index ca4de4ddfe1f..4a75c8e1fa6c 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -34,10 +34,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) { - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + keylen != AES_KEYSIZE_256) return -EINVAL; - } ctx->keylen = keylen; memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); return 0; diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index d4ea2f11ca68..466e30bd529c 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -601,7 +601,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm); - int ret; ctx->keylen = keylen; @@ -621,13 +620,7 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); - - tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) & - CRYPTO_TFM_RES_MASK; - return ret; + return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); } static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index 1aba9372cd23..4ef3eb11361c 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig @@ -4,7 +4,7 @@ config CRYPTO_DEV_STM32_CRC depends on ARCH_STM32 select CRYPTO_HASH help - This enables support for the CRC32 hw accelerator which can be found + This enables support for the CRC32 hw accelerator which can be found on STMicroelectronics STM32 SOC. config CRYPTO_DEV_STM32_HASH @@ -17,7 +17,7 @@ config CRYPTO_DEV_STM32_HASH select CRYPTO_SHA256 select CRYPTO_ENGINE help - This enables support for the HASH hw accelerator which can be found + This enables support for the HASH hw accelerator which can be found on STMicroelectronics STM32 SOC. config CRYPTO_DEV_STM32_CRYP @@ -27,5 +27,5 @@ config CRYPTO_DEV_STM32_CRYP select CRYPTO_ENGINE select CRYPTO_LIB_DES help - This enables support for the CRYP (AES/DES/TDES) hw accelerator which + This enables support for the CRYP (AES/DES/TDES) hw accelerator which can be found on STMicroelectronics STM32 SOC. diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 9e11c3480353..8e92e4ac79f1 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -85,10 +85,8 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, { struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm); - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != sizeof(u32)) return -EINVAL; - } mctx->key = get_unaligned_le32(key); return 0; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index cfc8e0e37bee..167b80eec437 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -518,10 +518,10 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) dma_conf.dst_maxburst = hdev->dma_maxburst; dma_conf.device_fc = false; - hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in"); - if (!hdev->dma_lch) { + hdev->dma_lch = dma_request_chan(hdev->dev, "in"); + if (IS_ERR(hdev->dma_lch)) { dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); - return -EBUSY; + return PTR_ERR(hdev->dma_lch); } err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index d71d65846e47..9c6db7f698c4 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -914,7 +914,6 @@ static int aead_setkey(struct crypto_aead *authenc, return 0; badkey: - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -929,11 +928,11 @@ static int aead_des3_setkey(struct crypto_aead *authenc, err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; + goto out; err = -EINVAL; if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) - goto badkey; + goto out; err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen); if (err) @@ -954,10 +953,6 @@ static int aead_des3_setkey(struct crypto_aead *authenc, out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static void talitos_sg_unmap(struct device *dev, @@ -1528,8 +1523,6 @@ static int skcipher_aes_setkey(struct crypto_skcipher *cipher, keylen == AES_KEYSIZE_256) return skcipher_setkey(cipher, key, keylen); - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } @@ -2234,10 +2227,8 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, /* Must get the hash of the long key */ ret = keyhash(tfm, key, keylen, hash); - if (ret) { - crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (ret) return -EINVAL; - } keysize = digestsize; memcpy(ctx->key, hash, digestsize); diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index b731895aa241..f56d65c56ccf 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -11,18 +11,18 @@ config CRYPTO_DEV_UX500_CRYP select CRYPTO_SKCIPHER select CRYPTO_LIB_DES help - This selects the crypto driver for the UX500_CRYP hardware. It supports - AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. + This selects the crypto driver for the UX500_CRYP hardware. It supports + AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes. config CRYPTO_DEV_UX500_HASH - tristate "UX500 crypto driver for HASH block" - depends on CRYPTO_DEV_UX500 - select CRYPTO_HASH + tristate "UX500 crypto driver for HASH block" + depends on CRYPTO_DEV_UX500 + select CRYPTO_HASH select CRYPTO_SHA1 select CRYPTO_SHA256 - help - This selects the hash driver for the UX500_HASH hardware. - Depends on UX500/STM DMA if running in DMA mode. + help + This selects the hash driver for the UX500_HASH hardware. + Depends on UX500/STM DMA if running in DMA mode. config CRYPTO_DEV_UX500_DEBUG bool "Activate ux500 platform debug-mode for crypto and hash block" diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 95fb694a2667..800dfc4d16c4 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -951,7 +951,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct cryp_ctx *ctx = crypto_skcipher_ctx(cipher); - u32 *flags = &cipher->base.crt_flags; pr_debug(DEV_DBG_NAME " [%s]", __func__); @@ -970,7 +969,6 @@ static int aes_skcipher_setkey(struct crypto_skcipher *cipher, default: pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__); - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 4b71e80951b7..fd045e64972a 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -272,11 +272,11 @@ static int virtio_crypto_alg_skcipher_init_sessions( if (keylen > vcrypto->max_cipher_key_len) { pr_err("virtio_crypto: the key is too long\n"); - goto bad_key; + return -EINVAL; } if (virtio_crypto_alg_validate_key(keylen, &alg)) - goto bad_key; + return -EINVAL; /* Create encryption session */ ret = virtio_crypto_alg_skcipher_init_session(ctx, @@ -291,10 +291,6 @@ static int virtio_crypto_alg_skcipher_init_sessions( return ret; } return 0; - -bad_key: - crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } /* Note: kernel crypto API realization */ diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index d59e736882f6..9fee1b1532a4 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -84,6 +84,9 @@ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc) u8 tweak[AES_BLOCK_SIZE]; int ret; + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) { struct skcipher_request *subreq = skcipher_request_ctx(req); diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 35535833b6f7..0b1df12e0f21 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -77,7 +77,7 @@ config DEVFREQ_GOV_PASSIVE comment "DEVFREQ Drivers" config ARM_EXYNOS_BUS_DEVFREQ - tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver" + tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver" depends on ARCH_EXYNOS || COMPILE_TEST select DEVFREQ_GOV_SIMPLE_ONDEMAND select DEVFREQ_GOV_PASSIVE @@ -91,6 +91,16 @@ config ARM_EXYNOS_BUS_DEVFREQ and adjusts the operating frequencies and voltages with OPP support. This does not yet operate with optimal voltages. +config ARM_IMX8M_DDRC_DEVFREQ + tristate "i.MX8M DDRC DEVFREQ Driver" + depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \ + (COMPILE_TEST && HAVE_ARM_SMCCC) + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select DEVFREQ_GOV_USERSPACE + help + This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows + adjusting DRAM frequency. + config ARM_TEGRA_DEVFREQ tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver" depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \ @@ -115,14 +125,15 @@ config ARM_TEGRA20_DEVFREQ config ARM_RK3399_DMC_DEVFREQ tristate "ARM RK3399 DMC DEVFREQ Driver" - depends on ARCH_ROCKCHIP + depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \ + (COMPILE_TEST && HAVE_ARM_SMCCC) select DEVFREQ_EVENT_ROCKCHIP_DFI select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ_EVENT help - This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). - It sets the frequency for the memory controller and reads the usage counts - from hardware. + This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). + It sets the frequency for the memory controller and reads the usage counts + from hardware. source "drivers/devfreq/event/Kconfig" diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 338ae8440db6..3eb4d5e6635c 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o # DEVFREQ Drivers obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o +obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c index 3dc5fd6065a3..8c31b0f2e28f 100644 --- a/drivers/devfreq/devfreq-event.c +++ b/drivers/devfreq/devfreq-event.c @@ -346,9 +346,9 @@ EXPORT_SYMBOL_GPL(devfreq_event_add_edev); /** * devfreq_event_remove_edev() - Remove the devfreq-event device registered. - * @dev : the devfreq-event device + * @edev : the devfreq-event device * - * Note that this function remove the registered devfreq-event device. + * Note that this function removes the registered devfreq-event device. */ int devfreq_event_remove_edev(struct devfreq_event_dev *edev) { diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 57f6944d65a6..cceee8bc3c2f 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/sched.h> +#include <linux/debugfs.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> @@ -33,6 +34,7 @@ #define HZ_PER_KHZ 1000 static struct class *devfreq_class; +static struct dentry *devfreq_debugfs; /* * devfreq core provides delayed work based load monitoring helper @@ -209,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq) int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) { int lev, prev_lev, ret = 0; - unsigned long cur_time; + u64 cur_time; lockdep_assert_held(&devfreq->lock); - cur_time = jiffies; + cur_time = get_jiffies_64(); /* Immediately exit if previous_freq is not initialized yet. */ if (!devfreq->previous_freq) @@ -224,8 +226,8 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) goto out; } - devfreq->time_in_state[prev_lev] += - cur_time - devfreq->last_stat_updated; + devfreq->stats.time_in_state[prev_lev] += + cur_time - devfreq->stats.last_update; lev = devfreq_get_freq_level(devfreq, freq); if (lev < 0) { @@ -234,13 +236,13 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) } if (lev != prev_lev) { - devfreq->trans_table[(prev_lev * - devfreq->profile->max_state) + lev]++; - devfreq->total_trans++; + devfreq->stats.trans_table[ + (prev_lev * devfreq->profile->max_state) + lev]++; + devfreq->stats.total_trans++; } out: - devfreq->last_stat_updated = cur_time; + devfreq->stats.last_update = cur_time; return ret; } EXPORT_SYMBOL(devfreq_update_status); @@ -535,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq) msecs_to_jiffies(devfreq->profile->polling_ms)); out_update: - devfreq->last_stat_updated = jiffies; + devfreq->stats.last_update = get_jiffies_64(); devfreq->stop_polling = false; if (devfreq->profile->get_cur_freq && @@ -807,28 +809,29 @@ struct devfreq *devfreq_add_device(struct device *dev, goto err_out; } - devfreq->trans_table = devm_kzalloc(&devfreq->dev, + devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev, array3_size(sizeof(unsigned int), devfreq->profile->max_state, devfreq->profile->max_state), GFP_KERNEL); - if (!devfreq->trans_table) { + if (!devfreq->stats.trans_table) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } - devfreq->time_in_state = devm_kcalloc(&devfreq->dev, + devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev, devfreq->profile->max_state, - sizeof(unsigned long), + sizeof(*devfreq->stats.time_in_state), GFP_KERNEL); - if (!devfreq->time_in_state) { + if (!devfreq->stats.time_in_state) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } - devfreq->last_stat_updated = jiffies; + devfreq->stats.total_trans = 0; + devfreq->stats.last_update = get_jiffies_64(); srcu_init_notifier_head(&devfreq->transition_notifier_list); @@ -1259,6 +1262,14 @@ err_out: } EXPORT_SYMBOL(devfreq_remove_governor); +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct devfreq *devfreq = to_devfreq(dev); + return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent)); +} +static DEVICE_ATTR_RO(name); + static ssize_t governor_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1461,6 +1472,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%lu\n", min_freq); } +static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1501,7 +1513,6 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1580,18 +1591,47 @@ static ssize_t trans_stat_show(struct device *dev, devfreq->profile->freq_table[i]); for (j = 0; j < max_state; j++) len += sprintf(buf + len, "%10u", - devfreq->trans_table[(i * max_state) + j]); - len += sprintf(buf + len, "%10u\n", - jiffies_to_msecs(devfreq->time_in_state[i])); + devfreq->stats.trans_table[(i * max_state) + j]); + + len += sprintf(buf + len, "%10llu\n", (u64) + jiffies64_to_msecs(devfreq->stats.time_in_state[i])); } len += sprintf(buf + len, "Total transition : %u\n", - devfreq->total_trans); + devfreq->stats.total_trans); return len; } -static DEVICE_ATTR_RO(trans_stat); + +static ssize_t trans_stat_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct devfreq *df = to_devfreq(dev); + int err, value; + + if (df->profile->max_state == 0) + return count; + + err = kstrtoint(buf, 10, &value); + if (err || value != 0) + return -EINVAL; + + mutex_lock(&df->lock); + memset(df->stats.time_in_state, 0, (df->profile->max_state * + sizeof(*df->stats.time_in_state))); + memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int), + df->profile->max_state, + df->profile->max_state)); + df->stats.total_trans = 0; + df->stats.last_update = get_jiffies_64(); + mutex_unlock(&df->lock); + + return count; +} +static DEVICE_ATTR_RW(trans_stat); static struct attribute *devfreq_attrs[] = { + &dev_attr_name.attr, &dev_attr_governor.attr, &dev_attr_available_governors.attr, &dev_attr_cur_freq.attr, @@ -1605,6 +1645,81 @@ static struct attribute *devfreq_attrs[] = { }; ATTRIBUTE_GROUPS(devfreq); +/** + * devfreq_summary_show() - Show the summary of the devfreq devices + * @s: seq_file instance to show the summary of devfreq devices + * @data: not used + * + * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file. + * It helps that user can know the detailed information of the devfreq devices. + * + * Return 0 always because it shows the information without any data change. + */ +static int devfreq_summary_show(struct seq_file *s, void *data) +{ + struct devfreq *devfreq; + struct devfreq *p_devfreq = NULL; + unsigned long cur_freq, min_freq, max_freq; + unsigned int polling_ms; + + seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n", + "dev_name", + "dev", + "parent_dev", + "governor", + "polling_ms", + "cur_freq_Hz", + "min_freq_Hz", + "max_freq_Hz"); + seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n", + "------------------------------", + "----------", + "----------", + "---------------", + "----------", + "------------", + "------------", + "------------"); + + mutex_lock(&devfreq_list_lock); + + list_for_each_entry_reverse(devfreq, &devfreq_list, node) { +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) + if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE, + DEVFREQ_NAME_LEN)) { + struct devfreq_passive_data *data = devfreq->data; + + if (data) + p_devfreq = data->parent; + } else { + p_devfreq = NULL; + } +#endif + + mutex_lock(&devfreq->lock); + cur_freq = devfreq->previous_freq, + get_freq_range(devfreq, &min_freq, &max_freq); + polling_ms = devfreq->profile->polling_ms, + mutex_unlock(&devfreq->lock); + + seq_printf(s, + "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n", + dev_name(devfreq->dev.parent), + dev_name(&devfreq->dev), + p_devfreq ? dev_name(&p_devfreq->dev) : "null", + devfreq->governor_name, + polling_ms, + cur_freq, + min_freq, + max_freq); + } + + mutex_unlock(&devfreq_list_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(devfreq_summary); + static int __init devfreq_init(void) { devfreq_class = class_create(THIS_MODULE, "devfreq"); @@ -1621,6 +1736,11 @@ static int __init devfreq_init(void) } devfreq_class->dev_groups = devfreq_groups; + devfreq_debugfs = debugfs_create_dir("devfreq", NULL); + debugfs_create_file("devfreq_summary", 0444, + devfreq_debugfs, NULL, + &devfreq_summary_fops); + return 0; } subsys_initcall(devfreq_init); @@ -1814,7 +1934,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res) /** * devm_devfreq_register_notifier() - - Resource-managed devfreq_register_notifier() + * - Resource-managed devfreq_register_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. @@ -1850,7 +1970,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier); /** * devm_devfreq_unregister_notifier() - - Resource-managed devfreq_unregister_notifier() + * - Resource-managed devfreq_unregister_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index cef2cf5347ca..878825372f6f 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -15,7 +15,7 @@ menuconfig PM_DEVFREQ_EVENT if PM_DEVFREQ_EVENT config DEVFREQ_EVENT_EXYNOS_NOCP - tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" + tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP select REGMAP_MMIO @@ -24,7 +24,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP (Network on Chip) Probe counters to measure the bandwidth of AXI bus. config DEVFREQ_EVENT_EXYNOS_PPMU - tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" + tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP help @@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU config DEVFREQ_EVENT_ROCKCHIP_DFI tristate "ROCKCHIP DFI DEVFREQ event Driver" - depends on ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST help This add the devfreq-event driver for Rockchip SoC. It provides DFI (DDR Monitor Module) driver to count ddr load. diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 1c565926db9f..ccc531ee6938 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support + * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h index 55cc96284a36..2d6f08cfd0c5 100644 --- a/drivers/devfreq/event/exynos-nocp.h +++ b/drivers/devfreq/event/exynos-nocp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file + * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 85c7a77bf3f0..17ed980d9099 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support + * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support * * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> @@ -101,17 +101,22 @@ static struct __exynos_ppmu_events { PPMU_EVENT(dmc1_1), }; -static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +static int __exynos_ppmu_find_ppmu_id(const char *edev_name) { int i; for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) - if (!strcmp(edev->desc->name, ppmu_events[i].name)) + if (!strcmp(edev_name, ppmu_events[i].name)) return ppmu_events[i].id; return -EINVAL; } +static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +{ + return __exynos_ppmu_find_ppmu_id(edev->desc->name); +} + /* * The devfreq-event ops structure for PPMU v1.1 */ @@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np, * use default if not. */ if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) { - struct devfreq_event_dev edev; int id; /* Not all registers take the same value for * read+write data count. */ - edev.desc = &desc[j]; - id = exynos_ppmu_find_ppmu_id(&edev); + id = __exynos_ppmu_find_ppmu_id(desc[j].name); switch (id) { case PPMU_PMNCNT0: diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h index 284420047455..97f667d0cbdd 100644 --- a/drivers/devfreq/event/exynos-ppmu.h +++ b/drivers/devfreq/event/exynos-ppmu.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * exynos_ppmu.h - EXYNOS PPMU header file + * exynos_ppmu.h - Exynos PPMU header file * * Copyright (c) 2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 5d1042188727..9a88faaf8b27 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -177,7 +177,6 @@ static int rockchip_dfi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_dfi *data; - struct resource *res; struct devfreq_event_desc *desc; struct device_node *np = pdev->dev.of_node, *node; @@ -185,8 +184,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev) if (!data) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) return PTR_ERR(data->regs); @@ -200,6 +198,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { data->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(data->regmap_pmu)) return PTR_ERR(data->regmap_pmu); } diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index c832673273a2..8fa8eb541373 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -15,11 +15,10 @@ #include <linux/device.h> #include <linux/export.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <linux/slab.h> #define DEFAULT_SATURATION_RATIO 40 @@ -127,6 +126,7 @@ static int exynos_bus_get_dev_status(struct device *dev, ret = exynos_bus_get_event(bus, &edata); if (ret < 0) { + dev_err(dev, "failed to get event from devfreq-event devices\n"); stat->total_time = stat->busy_time = 0; goto err; } @@ -287,52 +287,12 @@ err_clk: return ret; } -static int exynos_bus_probe(struct platform_device *pdev) +static int exynos_bus_profile_init(struct exynos_bus *bus, + struct devfreq_dev_profile *profile) { - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node, *node; - struct devfreq_dev_profile *profile; + struct device *dev = bus->dev; struct devfreq_simple_ondemand_data *ondemand_data; - struct devfreq_passive_data *passive_data; - struct devfreq *parent_devfreq; - struct exynos_bus *bus; - int ret, max_state; - unsigned long min_freq, max_freq; - bool passive = false; - - if (!np) { - dev_err(dev, "failed to find devicetree node\n"); - return -EINVAL; - } - - bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); - if (!bus) - return -ENOMEM; - mutex_init(&bus->lock); - bus->dev = &pdev->dev; - platform_set_drvdata(pdev, bus); - - profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); - if (!profile) - return -ENOMEM; - - node = of_parse_phandle(dev->of_node, "devfreq", 0); - if (node) { - of_node_put(node); - passive = true; - } else { - ret = exynos_bus_parent_parse_of(np, bus); - if (ret < 0) - return ret; - } - - /* Parse the device-tree to get the resource information */ - ret = exynos_bus_parse_of(np, bus); - if (ret < 0) - goto err_reg; - - if (passive) - goto passive; + int ret; /* Initialize the struct profile and governor data for parent device */ profile->polling_ms = 50; @@ -341,10 +301,9 @@ static int exynos_bus_probe(struct platform_device *pdev) profile->exit = exynos_bus_exit; ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL); - if (!ondemand_data) { - ret = -ENOMEM; - goto err; - } + if (!ondemand_data) + return -ENOMEM; + ondemand_data->upthreshold = 40; ondemand_data->downdifferential = 5; @@ -354,15 +313,14 @@ static int exynos_bus_probe(struct platform_device *pdev) ondemand_data); if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq device\n"); - ret = PTR_ERR(bus->devfreq); - goto err; + return PTR_ERR(bus->devfreq); } /* Register opp_notifier to catch the change of OPP */ ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq); if (ret < 0) { dev_err(dev, "failed to register opp notifier\n"); - goto err; + return ret; } /* @@ -372,33 +330,44 @@ static int exynos_bus_probe(struct platform_device *pdev) ret = exynos_bus_enable_edev(bus); if (ret < 0) { dev_err(dev, "failed to enable devfreq-event devices\n"); - goto err; + return ret; } ret = exynos_bus_set_event(bus); if (ret < 0) { dev_err(dev, "failed to set event to devfreq-event devices\n"); - goto err; + goto err_edev; } - goto out; -passive: + return 0; + +err_edev: + if (exynos_bus_disable_edev(bus)) + dev_warn(dev, "failed to disable the devfreq-event devices\n"); + + return ret; +} + +static int exynos_bus_profile_init_passive(struct exynos_bus *bus, + struct devfreq_dev_profile *profile) +{ + struct device *dev = bus->dev; + struct devfreq_passive_data *passive_data; + struct devfreq *parent_devfreq; + /* Initialize the struct profile and governor data for passive device */ profile->target = exynos_bus_target; profile->exit = exynos_bus_passive_exit; /* Get the instance of parent devfreq device */ parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0); - if (IS_ERR(parent_devfreq)) { - ret = -EPROBE_DEFER; - goto err; - } + if (IS_ERR(parent_devfreq)) + return -EPROBE_DEFER; passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL); - if (!passive_data) { - ret = -ENOMEM; - goto err; - } + if (!passive_data) + return -ENOMEM; + passive_data->parent = parent_devfreq; /* Add devfreq device for exynos bus with passive governor */ @@ -407,11 +376,61 @@ passive: if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq dev with passive governor\n"); - ret = PTR_ERR(bus->devfreq); - goto err; + return PTR_ERR(bus->devfreq); + } + + return 0; +} + +static int exynos_bus_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node, *node; + struct devfreq_dev_profile *profile; + struct exynos_bus *bus; + int ret, max_state; + unsigned long min_freq, max_freq; + bool passive = false; + + if (!np) { + dev_err(dev, "failed to find devicetree node\n"); + return -EINVAL; } -out: + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + mutex_init(&bus->lock); + bus->dev = &pdev->dev; + platform_set_drvdata(pdev, bus); + + profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); + if (!profile) + return -ENOMEM; + + node = of_parse_phandle(dev->of_node, "devfreq", 0); + if (node) { + of_node_put(node); + passive = true; + } else { + ret = exynos_bus_parent_parse_of(np, bus); + if (ret < 0) + return ret; + } + + /* Parse the device-tree to get the resource information */ + ret = exynos_bus_parse_of(np, bus); + if (ret < 0) + goto err_reg; + + if (passive) + ret = exynos_bus_profile_init_passive(bus, profile); + else + ret = exynos_bus_profile_init(bus, profile); + + if (ret < 0) + goto err; + max_state = bus->devfreq->profile->max_state; min_freq = (bus->devfreq->profile->freq_table[0] / 1000); max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000); diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c new file mode 100644 index 000000000000..bc82d3653bff --- /dev/null +++ b/drivers/devfreq/imx8m-ddrc.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/devfreq.h> +#include <linux/pm_opp.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/arm-smccc.h> + +#define IMX_SIP_DDR_DVFS 0xc2000004 + +/* Query available frequencies. */ +#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10 +#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11 + +/* + * This should be in a 1:1 mapping with devicetree OPPs but + * firmware provides additional info. + */ +struct imx8m_ddrc_freq { + unsigned long rate; + unsigned long smcarg; + int dram_core_parent_index; + int dram_alt_parent_index; + int dram_apb_parent_index; +}; + +/* Hardware limitation */ +#define IMX8M_DDRC_MAX_FREQ_COUNT 4 + +/* + * i.MX8M DRAM Controller clocks have the following structure (abridged): + * + * +----------+ |\ +------+ + * | dram_pll |-------|M| dram_core | | + * +----------+ |U|---------->| D | + * /--|X| | D | + * dram_alt_root | |/ | R | + * | | C | + * +---------+ | | + * |FIX DIV/4| | | + * +---------+ | | + * composite: | | | + * +----------+ | | | + * | dram_alt |----/ | | + * +----------+ | | + * | dram_apb |-------------------->| | + * +----------+ +------+ + * + * The dram_pll is used for higher rates and dram_alt is used for lower rates. + * + * Frequency switching is implemented in TF-A (via SMC call) and can change the + * configuration of the clocks, including mux parents. The dram_alt and + * dram_apb clocks are "imx composite" and their parent can change too. + * + * We need to prepare/enable the new mux parents head of switching and update + * their information afterwards. + */ +struct imx8m_ddrc { + struct devfreq_dev_profile profile; + struct devfreq *devfreq; + + /* For frequency switching: */ + struct clk *dram_core; + struct clk *dram_pll; + struct clk *dram_alt; + struct clk *dram_apb; + + int freq_count; + struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT]; +}; + +static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv, + unsigned long rate) +{ + struct imx8m_ddrc_freq *freq; + int i; + + /* + * Firmware reports values in MT/s, so we round-down from Hz + * Rounding is extra generous to ensure a match. + */ + rate = DIV_ROUND_CLOSEST(rate, 250000); + for (i = 0; i < priv->freq_count; ++i) { + freq = &priv->freq_table[i]; + if (freq->rate == rate || + freq->rate + 1 == rate || + freq->rate - 1 == rate) + return freq; + } + + return NULL; +} + +static void imx8m_ddrc_smc_set_freq(int target_freq) +{ + struct arm_smccc_res res; + u32 online_cpus = 0; + int cpu; + + local_irq_disable(); + + for_each_online_cpu(cpu) + online_cpus |= (1 << (cpu * 8)); + + /* change the ddr freqency */ + arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus, + 0, 0, 0, 0, 0, &res); + + local_irq_enable(); +} + +static struct clk *clk_get_parent_by_index(struct clk *clk, int index) +{ + struct clk_hw *hw; + + hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index); + + return hw ? hw->clk : NULL; +} + +static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct clk *new_dram_core_parent; + struct clk *new_dram_alt_parent; + struct clk *new_dram_apb_parent; + int ret; + + /* + * Fetch new parents + * + * new_dram_alt_parent and new_dram_apb_parent are optional but + * new_dram_core_parent is not. + */ + new_dram_core_parent = clk_get_parent_by_index( + priv->dram_core, freq->dram_core_parent_index - 1); + if (!new_dram_core_parent) { + dev_err(dev, "failed to fetch new dram_core parent\n"); + return -EINVAL; + } + if (freq->dram_alt_parent_index) { + new_dram_alt_parent = clk_get_parent_by_index( + priv->dram_alt, + freq->dram_alt_parent_index - 1); + if (!new_dram_alt_parent) { + dev_err(dev, "failed to fetch new dram_alt parent\n"); + return -EINVAL; + } + } else + new_dram_alt_parent = NULL; + + if (freq->dram_apb_parent_index) { + new_dram_apb_parent = clk_get_parent_by_index( + priv->dram_apb, + freq->dram_apb_parent_index - 1); + if (!new_dram_apb_parent) { + dev_err(dev, "failed to fetch new dram_apb parent\n"); + return -EINVAL; + } + } else + new_dram_apb_parent = NULL; + + /* increase reference counts and ensure clks are ON before switch */ + ret = clk_prepare_enable(new_dram_core_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_core parent: %d\n", + ret); + goto out; + } + ret = clk_prepare_enable(new_dram_alt_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_alt parent: %d\n", + ret); + goto out_disable_core_parent; + } + ret = clk_prepare_enable(new_dram_apb_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_apb parent: %d\n", + ret); + goto out_disable_alt_parent; + } + + imx8m_ddrc_smc_set_freq(freq->smcarg); + + /* update parents in clk tree after switch. */ + ret = clk_set_parent(priv->dram_core, new_dram_core_parent); + if (ret) + dev_warn(dev, "failed to set dram_core parent: %d\n", ret); + if (new_dram_alt_parent) { + ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent); + if (ret) + dev_warn(dev, "failed to set dram_alt parent: %d\n", + ret); + } + if (new_dram_apb_parent) { + ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent); + if (ret) + dev_warn(dev, "failed to set dram_apb parent: %d\n", + ret); + } + + /* + * Explicitly refresh dram PLL rate. + * + * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be + * automatically refreshed when clk_get_rate is called on children. + */ + clk_get_rate(priv->dram_pll); + + /* + * clk_set_parent transfer the reference count from old parent. + * now we drop extra reference counts used during the switch + */ + clk_disable_unprepare(new_dram_apb_parent); +out_disable_alt_parent: + clk_disable_unprepare(new_dram_alt_parent); +out_disable_core_parent: + clk_disable_unprepare(new_dram_core_parent); +out: + return ret; +} + +static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct imx8m_ddrc_freq *freq_info; + struct dev_pm_opp *new_opp; + unsigned long old_freq, new_freq; + int ret; + + new_opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(new_opp)) { + ret = PTR_ERR(new_opp); + dev_err(dev, "failed to get recommended opp: %d\n", ret); + return ret; + } + dev_pm_opp_put(new_opp); + + old_freq = clk_get_rate(priv->dram_core); + if (*freq == old_freq) + return 0; + + freq_info = imx8m_ddrc_find_freq(priv, *freq); + if (!freq_info) + return -EINVAL; + + /* + * Read back the clk rate to verify switch was correct and so that + * we can report it on all error paths. + */ + ret = imx8m_ddrc_set_freq(dev, freq_info); + + new_freq = clk_get_rate(priv->dram_core); + if (ret) + dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n", + *freq, old_freq, ret, new_freq); + else if (*freq != new_freq) + dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n", + *freq, old_freq, new_freq); + else + dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n", + *freq, old_freq); + + return ret; +} + +static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + + *freq = clk_get_rate(priv->dram_core); + + return 0; +} + +static int imx8m_ddrc_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + + stat->busy_time = 0; + stat->total_time = 0; + stat->current_frequency = clk_get_rate(priv->dram_core); + + return 0; +} + +static int imx8m_ddrc_init_freq_info(struct device *dev) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct arm_smccc_res res; + int index; + + /* An error here means DDR DVFS API not supported by firmware */ + arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT, + 0, 0, 0, 0, 0, 0, &res); + priv->freq_count = res.a0; + if (priv->freq_count <= 0 || + priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT) + return -ENODEV; + + for (index = 0; index < priv->freq_count; ++index) { + struct imx8m_ddrc_freq *freq = &priv->freq_table[index]; + + arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO, + index, 0, 0, 0, 0, 0, &res); + /* Result should be strictly positive */ + if ((long)res.a0 <= 0) + return -ENODEV; + + freq->rate = res.a0; + freq->smcarg = index; + freq->dram_core_parent_index = res.a1; + freq->dram_alt_parent_index = res.a2; + freq->dram_apb_parent_index = res.a3; + + /* dram_core has 2 options: dram_pll or dram_alt_root */ + if (freq->dram_core_parent_index != 1 && + freq->dram_core_parent_index != 2) + return -ENODEV; + /* dram_apb and dram_alt have exactly 8 possible parents */ + if (freq->dram_alt_parent_index > 8 || + freq->dram_apb_parent_index > 8) + return -ENODEV; + /* dram_core from alt requires explicit dram_alt parent */ + if (freq->dram_core_parent_index == 2 && + freq->dram_alt_parent_index == 0) + return -ENODEV; + } + + return 0; +} + +static int imx8m_ddrc_check_opps(struct device *dev) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct imx8m_ddrc_freq *freq_info; + struct dev_pm_opp *opp; + unsigned long freq; + int i, opp_count; + + /* Enumerate DT OPPs and disable those not supported by firmware */ + opp_count = dev_pm_opp_get_opp_count(dev); + if (opp_count < 0) + return opp_count; + for (i = 0, freq = 0; i < opp_count; ++i, ++freq) { + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) { + dev_err(dev, "Failed enumerating OPPs: %ld\n", + PTR_ERR(opp)); + return PTR_ERR(opp); + } + dev_pm_opp_put(opp); + + freq_info = imx8m_ddrc_find_freq(priv, freq); + if (!freq_info) { + dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n", + freq, DIV_ROUND_CLOSEST(freq, 250000)); + dev_pm_opp_disable(dev, freq); + } + } + + return 0; +} + +static void imx8m_ddrc_exit(struct device *dev) +{ + dev_pm_opp_of_remove_table(dev); +} + +static int imx8m_ddrc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct imx8m_ddrc *priv; + const char *gov = DEVFREQ_GOV_USERSPACE; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + ret = imx8m_ddrc_init_freq_info(dev); + if (ret) { + dev_err(dev, "failed to init firmware freq info: %d\n", ret); + return ret; + } + + priv->dram_core = devm_clk_get(dev, "core"); + if (IS_ERR(priv->dram_core)) { + ret = PTR_ERR(priv->dram_core); + dev_err(dev, "failed to fetch core clock: %d\n", ret); + return ret; + } + priv->dram_pll = devm_clk_get(dev, "pll"); + if (IS_ERR(priv->dram_pll)) { + ret = PTR_ERR(priv->dram_pll); + dev_err(dev, "failed to fetch pll clock: %d\n", ret); + return ret; + } + priv->dram_alt = devm_clk_get(dev, "alt"); + if (IS_ERR(priv->dram_alt)) { + ret = PTR_ERR(priv->dram_alt); + dev_err(dev, "failed to fetch alt clock: %d\n", ret); + return ret; + } + priv->dram_apb = devm_clk_get(dev, "apb"); + if (IS_ERR(priv->dram_apb)) { + ret = PTR_ERR(priv->dram_apb); + dev_err(dev, "failed to fetch apb clock: %d\n", ret); + return ret; + } + + ret = dev_pm_opp_of_add_table(dev); + if (ret < 0) { + dev_err(dev, "failed to get OPP table\n"); + return ret; + } + + ret = imx8m_ddrc_check_opps(dev); + if (ret < 0) + goto err; + + priv->profile.polling_ms = 1000; + priv->profile.target = imx8m_ddrc_target; + priv->profile.get_dev_status = imx8m_ddrc_get_dev_status; + priv->profile.exit = imx8m_ddrc_exit; + priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq; + priv->profile.initial_freq = clk_get_rate(priv->dram_core); + + priv->devfreq = devm_devfreq_add_device(dev, &priv->profile, + gov, NULL); + if (IS_ERR(priv->devfreq)) { + ret = PTR_ERR(priv->devfreq); + dev_err(dev, "failed to add devfreq device: %d\n", ret); + goto err; + } + + return 0; + +err: + dev_pm_opp_of_remove_table(dev); + return ret; +} + +static const struct of_device_id imx8m_ddrc_of_match[] = { + { .compatible = "fsl,imx8m-ddrc", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match); + +static struct platform_driver imx8m_ddrc_platdrv = { + .probe = imx8m_ddrc_probe, + .driver = { + .name = "imx8m-ddrc-devfreq", + .of_match_table = of_match_ptr(imx8m_ddrc_of_match), + }, +}; +module_platform_driver(imx8m_ddrc_platdrv); + +MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver"); +MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index 2e65d7279d79..24f04f78285b 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) if (res.a0) { dev_err(dev, "Failed to set dram param: %ld\n", res.a0); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } } } @@ -372,8 +373,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { data->regmap_pmu = syscon_node_to_regmap(node); - if (IS_ERR(data->regmap_pmu)) - return PTR_ERR(data->regmap_pmu); + of_node_put(node); + if (IS_ERR(data->regmap_pmu)) { + ret = PTR_ERR(data->regmap_pmu); + goto err_edev; + } } regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); @@ -391,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq; break; default: - return -EINVAL; + ret = -EINVAL; + goto err_edev; }; arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, @@ -425,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) */ if (dev_pm_opp_of_add_table(dev)) { dev_err(dev, "Invalid operating-points in device tree.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } of_property_read_u32(np, "upthreshold", @@ -465,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) err_free_opp: dev_pm_opp_of_remove_table(&pdev->dev); +err_edev: + devfreq_event_disable_edev(data->edev); + return ret; } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6fa1eba9d477..5142da401db3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -239,6 +239,14 @@ config FSL_RAID the capability to offload memcpy, xor and pq computation for raid5/6. +config HISI_DMA + tristate "HiSilicon DMA Engine support" + depends on ARM64 || (COMPILE_TEST && PCI_MSI) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support HiSilicon Kunpeng DMA engine. + config IMG_MDC_DMA tristate "IMG MDC support" depends on MIPS || COMPILE_TEST @@ -273,6 +281,19 @@ config INTEL_IDMA64 Enable DMA support for Intel Low Power Subsystem such as found on Intel Skylake PCH. +config INTEL_IDXD + tristate "Intel Data Accelerators support" + depends on PCI && X86_64 + select DMA_ENGINE + select SBITMAP + help + Enable support for the Intel(R) data accelerators present + in Intel Xeon CPU. + + Say Y if you have such a platform. + + If unsure, say N. + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86_64 @@ -497,6 +518,15 @@ config PXA_DMA 16 to 32 channels for peripheral to memory or memory to memory transfers. +config PLX_DMA + tristate "PLX ExpressLane PEX Switch DMA Engine Support" + depends on PCI + select DMA_ENGINE + help + Some PLX ExpressLane PCI Switches support additional DMA engines. + These are exposed via extra functions on the switch's + upstream port. Each function exposes one DMA channel. + config SIRF_DMA tristate "CSR SiRFprimaII/SiRFmarco DMA support" depends on ARCH_SIRF diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 42d7e2fc64fa..1d908394fbea 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o +obj-$(CONFIG_HISI_DMA) += hisi_dma.o obj-$(CONFIG_HSU_DMA) += hsu/ obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ +obj-$(CONFIG_INTEL_IDXD) += idxd/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o obj-$(CONFIG_K3_DMA) += k3dma.o @@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o obj-$(CONFIG_OWL_DMA) += owl-dma.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_PL330_DMA) += pl330.o +obj-$(CONFIG_PLX_DMA) += plx_dma.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 832aefbe7af9..539e785039ca 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -772,10 +772,10 @@ static int request_and_map(struct platform_device *pdev, const char *name, return -EBUSY; } - *ptr = devm_ioremap_nocache(device, region->start, + *ptr = devm_ioremap(device, region->start, resource_size(region)); if (*ptr == NULL) { - dev_err(device, "ioremap_nocache of %s failed!", name); + dev_err(device, "ioremap of %s failed!", name); return -ENOMEM; } diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e4c593f48575..4768ef26013b 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) /* stop DMA activity */ if (c->desc) { - if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT) - vchan_terminate_vdesc(&c->desc->vd); - else - vchan_vdesc_fini(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; bcm2835_dma_abort(c); } diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index a0ee404b736e..f1d149e32839 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev) struct dma_device *dma_dev; struct axi_dmac *dmac; struct resource *res; + struct regmap *regmap; int ret; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); @@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dmac); - devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); + regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, + &axi_dmac_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto err_free_irq; + } return 0; +err_free_irq: + free_irq(dmac->irq, dmac); err_unregister_of: of_dma_controller_free(pdev->dev.of_node); err_unregister_device: diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 44af435628f8..448f663da89c 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1021,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = { .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, }; +static const struct jz4780_dma_soc_data x1830_dma_soc_data = { + .nb_channels = 32, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, + { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data }, {}, }; MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 03ac4b96117c..f3ef4edd4de1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -60,6 +60,8 @@ static long dmaengine_ref_count; /* --- sysfs implementation --- */ +#define DMA_SLAVE_NAME "slave" + /** * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev - device node @@ -164,11 +166,152 @@ static struct class dma_devclass = { /* --- client and device registration --- */ -#define dma_device_satisfies_mask(device, mask) \ - __dma_device_satisfies_mask((device), &(mask)) -static int -__dma_device_satisfies_mask(struct dma_device *device, - const dma_cap_mask_t *want) +/** + * dma_cap_mask_all - enable iteration over all operation types + */ +static dma_cap_mask_t dma_cap_mask_all; + +/** + * dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan - associated channel for this entry + */ +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +/** + * channel_table - percpu lookup table for memory-to-memory offload providers + */ +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; + +static int __init dma_channel_table_init(void) +{ + enum dma_transaction_type cap; + int err = 0; + + bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); + + /* 'interrupt', 'private', and 'slave' are channel capabilities, + * but are not associated with an operation so they do not need + * an entry in the channel_table + */ + clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); + clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); + clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); + + for_each_dma_cap_mask(cap, dma_cap_mask_all) { + channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); + if (!channel_table[cap]) { + err = -ENOMEM; + break; + } + } + + if (err) { + pr_err("dmaengine dma_channel_table_init failure: %d\n", err); + for_each_dma_cap_mask(cap, dma_cap_mask_all) + free_percpu(channel_table[cap]); + } + + return err; +} +arch_initcall(dma_channel_table_init); + +/** + * dma_chan_is_local - returns true if the channel is in the same numa-node as + * the cpu + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - returns the channel with min count and in the same numa-node as + * the cpu + * @cap: capability to match + * @cpu: cpu index which the channel should be close to + * + * If some channels are close to the given cpu, the one with the lowest + * reference count is returned. Otherwise, cpu is ignored and only the + * reference count is taken into account. + * Must be called under dma_list_mutex. + */ +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) +{ + struct dma_device *device; + struct dma_chan *chan; + struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (!dma_has_cap(cap, device->cap_mask) || + dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + if (!chan->client_count) + continue; + if (!min || chan->table_count < min->table_count) + min = chan; + + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; + } + } + + chan = localmin ? localmin : min; + + if (chan) + chan->table_count++; + + return chan; +} + +/** + * dma_channel_rebalance - redistribute the available channels + * + * Optimize for cpu isolation (each cpu gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. Must be called under + * dma_list_mutex. + */ +static void dma_channel_rebalance(void) +{ + struct dma_chan *chan; + struct dma_device *device; + int cpu; + int cap; + + /* undo the last distribution */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_possible_cpu(cpu) + per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + chan->table_count = 0; + } + + /* don't populate the channel_table if no clients are available */ + if (!dmaengine_ref_count) + return; + + /* redistribute available channels */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_online_cpu(cpu) { + chan = min_chan(cap, cpu); + per_cpu_ptr(channel_table[cap], cpu)->chan = chan; + } +} + +static int dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) { dma_cap_mask_t has; @@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device, static struct module *dma_chan_to_owner(struct dma_chan *chan) { - return chan->device->dev->driver->owner; + return chan->device->owner; } /** @@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan) } } +static void dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, dma_device_release); +} + /** * dma_chan_get - try to grab a dma channel's parent driver module * @chan - channel to grab @@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan) if (!try_module_get(owner)) return -ENODEV; + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + /* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) { ret = chan->device->device_alloc_chan_resources(chan); @@ -233,6 +399,8 @@ out: return 0; err_out: + dma_device_put(chan->device); +module_put_out: module_put(owner); return ret; } @@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan) return; chan->client_count--; - module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ if (!chan->client_count && chan->device->device_free_chan_resources) { @@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan) chan->router = NULL; chan->route_data = NULL; } + + dma_device_put(chan->device); + module_put(dma_chan_to_owner(chan)); } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) @@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) EXPORT_SYMBOL(dma_sync_wait); /** - * dma_cap_mask_all - enable iteration over all operation types - */ -static dma_cap_mask_t dma_cap_mask_all; - -/** - * dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan - associated channel for this entry - */ -struct dma_chan_tbl_ent { - struct dma_chan *chan; -}; - -/** - * channel_table - percpu lookup table for memory-to-memory offload providers - */ -static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; - -static int __init dma_channel_table_init(void) -{ - enum dma_transaction_type cap; - int err = 0; - - bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); - - /* 'interrupt', 'private', and 'slave' are channel capabilities, - * but are not associated with an operation so they do not need - * an entry in the channel_table - */ - clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); - clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); - clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); - - for_each_dma_cap_mask(cap, dma_cap_mask_all) { - channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); - if (!channel_table[cap]) { - err = -ENOMEM; - break; - } - } - - if (err) { - pr_err("initialization failure\n"); - for_each_dma_cap_mask(cap, dma_cap_mask_all) - free_percpu(channel_table[cap]); - } - - return err; -} -arch_initcall(dma_channel_table_init); - -/** * dma_find_channel - find a channel to carry out the operation * @tx_type: transaction type */ @@ -369,97 +488,6 @@ void dma_issue_pending_all(void) } EXPORT_SYMBOL(dma_issue_pending_all); -/** - * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu - */ -static bool dma_chan_is_local(struct dma_chan *chan, int cpu) -{ - int node = dev_to_node(chan->device->dev); - return node == NUMA_NO_NODE || - cpumask_test_cpu(cpu, cpumask_of_node(node)); -} - -/** - * min_chan - returns the channel with min count and in the same numa-node as the cpu - * @cap: capability to match - * @cpu: cpu index which the channel should be close to - * - * If some channels are close to the given cpu, the one with the lowest - * reference count is returned. Otherwise, cpu is ignored and only the - * reference count is taken into account. - * Must be called under dma_list_mutex. - */ -static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) -{ - struct dma_device *device; - struct dma_chan *chan; - struct dma_chan *min = NULL; - struct dma_chan *localmin = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (!dma_has_cap(cap, device->cap_mask) || - dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) { - if (!chan->client_count) - continue; - if (!min || chan->table_count < min->table_count) - min = chan; - - if (dma_chan_is_local(chan, cpu)) - if (!localmin || - chan->table_count < localmin->table_count) - localmin = chan; - } - } - - chan = localmin ? localmin : min; - - if (chan) - chan->table_count++; - - return chan; -} - -/** - * dma_channel_rebalance - redistribute the available channels - * - * Optimize for cpu isolation (each cpu gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. Must be called under - * dma_list_mutex. - */ -static void dma_channel_rebalance(void) -{ - struct dma_chan *chan; - struct dma_device *device; - int cpu; - int cap; - - /* undo the last distribution */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_possible_cpu(cpu) - per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - chan->table_count = 0; - } - - /* don't populate the channel_table if no clients are available */ - if (!dmaengine_ref_count) - return; - - /* redistribute available channels */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_online_cpu(cpu) { - chan = min_chan(cap, cpu); - per_cpu_ptr(channel_table[cap], cpu)->chan = chan; - } -} - int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { struct dma_device *device; @@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, { struct dma_chan *chan; - if (mask && !__dma_device_satisfies_mask(dev, mask)) { + if (mask && !dma_device_satisfies_mask(dev, mask)) { dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); return NULL; } @@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) if (has_acpi_companion(dev) && !chan) chan = acpi_dma_request_slave_chan_by_name(dev, name); - if (chan) { - /* Valid channel found or requester needs to be deferred */ - if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) - return chan; - } + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; /* Try to find the channel via the DMA filter map(s) */ mutex_lock(&dma_list_mutex); @@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) } mutex_unlock(&dma_list_mutex); - return chan ? chan : ERR_PTR(-EPROBE_DEFER); + if (!IS_ERR_OR_NULL(chan)) + goto found; + + return ERR_PTR(-EPROBE_DEFER); + +found: + chan->slave = dev; + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return ERR_PTR(-ENOMEM); + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_err(dev, "Cannot create DMA %s symlink\n", chan->name); + return chan; } EXPORT_SYMBOL_GPL(dma_request_chan); @@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan) /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + if (chan->slave) { + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); @@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get); */ void dmaengine_put(void) { - struct dma_device *device; + struct dma_device *device, *_d; struct dma_chan *chan; mutex_lock(&dma_list_mutex); dmaengine_ref_count--; BUG_ON(dmaengine_ref_count < 0); /* drop channel references */ - list_for_each_entry(device, &dma_device_list, global_node) { + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue; list_for_each_entry(chan, &device->channels, device_node) @@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device) return 0; } +static int __dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan, + int chan_id) +{ + int rc = 0; + int chancnt = device->chancnt; + atomic_t *idr_ref; + struct dma_chan *tchan; + + tchan = list_first_entry_or_null(&device->channels, + struct dma_chan, device_node); + if (tchan->dev) { + idr_ref = tchan->dev->idr_ref; + } else { + idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); + if (!idr_ref) + return -ENOMEM; + atomic_set(idr_ref, 0); + } + + chan->local = alloc_percpu(typeof(*chan->local)); + if (!chan->local) + goto err_out; + chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + if (!chan->dev) { + free_percpu(chan->local); + chan->local = NULL; + goto err_out; + } + + /* + * When the chan_id is a negative value, we are dynamically adding + * the channel. Otherwise we are static enumerating. + */ + chan->chan_id = chan_id < 0 ? chancnt : chan_id; + chan->dev->device.class = &dma_devclass; + chan->dev->device.parent = device->dev; + chan->dev->chan = chan; + chan->dev->idr_ref = idr_ref; + chan->dev->dev_id = device->dev_id; + atomic_inc(idr_ref); + dev_set_name(&chan->dev->device, "dma%dchan%d", + device->dev_id, chan->chan_id); + + rc = device_register(&chan->dev->device); + if (rc) + goto err_out; + chan->client_count = 0; + device->chancnt = chan->chan_id + 1; + + return 0; + + err_out: + free_percpu(chan->local); + kfree(chan->dev); + if (atomic_dec_return(idr_ref) == 0) + kfree(idr_ref); + return rc; +} + +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + rc = __dma_async_device_channel_register(device, chan, -1); + if (rc < 0) + return rc; + + dma_channel_rebalance(); + return 0; +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_register); + +static void __dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + WARN_ONCE(!device->device_release && chan->client_count, + "%s called while %d clients hold a reference\n", + __func__, chan->client_count); + mutex_lock(&dma_list_mutex); + list_del(&chan->device_node); + device->chancnt--; + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + device_unregister(&chan->dev->device); + free_percpu(chan->local); +} + +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __dma_async_device_channel_unregister(device, chan); + dma_channel_rebalance(); +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); + /** * dma_async_device_register - registers DMA devices found * @device: &dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * dma_async_device_unregister() is called and no further references are taken. */ int dma_async_device_register(struct dma_device *device) { - int chancnt = 0, rc; + int rc, i = 0; struct dma_chan* chan; - atomic_t *idr_ref; if (!device) return -ENODEV; @@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + device->owner = device->dev->driver->owner; + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", @@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + if (!device->device_release) + dev_warn(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case */ if (device_has_all_tx_types(device)) dma_cap_set(DMA_ASYNC_TX, device->cap_mask); - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); - if (!idr_ref) - return -ENOMEM; rc = get_dma_id(device); - if (rc != 0) { - kfree(idr_ref); + if (rc != 0) return rc; - } - - atomic_set(idr_ref, 0); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { - rc = -ENOMEM; - chan->local = alloc_percpu(typeof(*chan->local)); - if (chan->local == NULL) - goto err_out; - chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); - if (chan->dev == NULL) { - free_percpu(chan->local); - chan->local = NULL; + rc = __dma_async_device_channel_register(device, chan, i++); + if (rc < 0) goto err_out; - } - - chan->chan_id = chancnt++; - chan->dev->device.class = &dma_devclass; - chan->dev->device.parent = device->dev; - chan->dev->chan = chan; - chan->dev->idr_ref = idr_ref; - chan->dev->dev_id = device->dev_id; - atomic_inc(idr_ref); - dev_set_name(&chan->dev->device, "dma%dchan%d", - device->dev_id, chan->chan_id); - - rc = device_register(&chan->dev->device); - if (rc) { - free_percpu(chan->local); - chan->local = NULL; - kfree(chan->dev); - atomic_dec(idr_ref); - goto err_out; - } - chan->client_count = 0; - } - - if (!chancnt) { - dev_err(device->dev, "%s: device has no channels!\n", __func__); - rc = -ENODEV; - goto err_out; } - device->chancnt = chancnt; - mutex_lock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) @@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device) err_out: /* if we never registered a channel just release the idr */ - if (atomic_read(idr_ref) == 0) { + if (!device->chancnt) { ida_free(&dma_ida, device->dev_id); - kfree(idr_ref); return rc; } @@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register); */ void dma_async_device_unregister(struct dma_device *device) { - struct dma_chan *chan; + struct dma_chan *chan, *n; + + list_for_each_entry_safe(chan, n, &device->channels, device_node) + __dma_async_device_channel_unregister(device, chan); mutex_lock(&dma_list_mutex); - list_del_rcu(&device->global_node); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_channel_rebalance(); + dma_device_put(device); mutex_unlock(&dma_list_mutex); - - list_for_each_entry(chan, &device->channels, device_node) { - WARN_ONCE(chan->client_count, - "%s called while %d clients hold a reference\n", - __func__, chan->client_count); - mutex_lock(&dma_list_mutex); - chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); - device_unregister(&chan->dev->device); - free_percpu(chan->local); - } } EXPORT_SYMBOL(dma_async_device_unregister); @@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, } EXPORT_SYMBOL(dma_async_tx_descriptor_init); +static inline int desc_check_and_set_metadata_mode( + struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); + +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); + +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); + /* dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on */ @@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void) return class_register(&dma_devclass); } arch_initcall(dma_bus_init); - - diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 501c0b063f85..e8a320c9e57c 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan, state->last = complete; state->used = used; state->residue = 0; + state->in_flight_bytes = 0; } return dma_async_is_complete(cookie, complete, used); } @@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) state->residue = residue; } +static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + struct dmaengine_desc_callback { dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; @@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) return (cb->callback) ? true : false; } +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); + #endif diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index a1ce307c502f..14c1ac26f866 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) vchan_get_all_descriptors(&chan->vc, &head); - /* - * As vchan_dma_desc_free_list can access to desc_allocated list - * we need to call it in vc.lock context. - */ - vchan_dma_desc_free_list(&chan->vc, &head); - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_dma_desc_free_list(&chan->vc, &head); + dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); return 0; diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b1a7ca91701a..5697c3622699 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; + int endian_diff[4] = {3, 1, -1, -3}; u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; + + if (fsl_chan->edma->drvdata->mux_swap) + ch_off += endian_diff[ch_off % 4]; + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 5eaa2902ed39..67e422590c9a 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -147,6 +147,7 @@ struct fsl_edma_drvdata { enum edma_version version; u32 dmamuxs; bool has_dmaclk; + bool mux_swap; int (*setup_irq)(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma); }; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b626c06ac2e0..eff7ebd8cf35 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = { .setup_irq = fsl_edma_irq_init, }; +static struct fsl_edma_drvdata ls1028a_data = { + .version = v1, + .dmamuxs = DMAMUX_NR, + .mux_swap = true, + .setup_irq = fsl_edma_irq_init, +}; + static struct fsl_edma_drvdata imx7ulp_data = { .version = v3, .dmamuxs = 1, @@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = { static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,vf610-edma", .data = &vf610_data}, + { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { /* sentinel */ } }; diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 89792083d62c..95cc0256b387 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) vchan_dma_desc_free_list(&fsl_chan->vchan, &head); - if (!fsl_queue->comp_pool && !fsl_queue->comp_pool) + if (!fsl_queue->comp_pool && !fsl_queue->desc_pool) return; list_for_each_entry_safe(comp_temp, _comp_temp, diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c new file mode 100644 index 000000000000..ed3619266a48 --- /dev/null +++ b/drivers/dma/hisi_dma.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2019 HiSilicon Limited. */ +#include <linux/bitfield.h> +#include <linux/dmaengine.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include "virt-dma.h" + +#define HISI_DMA_SQ_BASE_L 0x0 +#define HISI_DMA_SQ_BASE_H 0x4 +#define HISI_DMA_SQ_DEPTH 0x8 +#define HISI_DMA_SQ_TAIL_PTR 0xc +#define HISI_DMA_CQ_BASE_L 0x10 +#define HISI_DMA_CQ_BASE_H 0x14 +#define HISI_DMA_CQ_DEPTH 0x18 +#define HISI_DMA_CQ_HEAD_PTR 0x1c +#define HISI_DMA_CTRL0 0x20 +#define HISI_DMA_CTRL0_QUEUE_EN_S 0 +#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4 +#define HISI_DMA_CTRL1 0x24 +#define HISI_DMA_CTRL1_QUEUE_RESET_S 0 +#define HISI_DMA_Q_FSM_STS 0x30 +#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0) +#define HISI_DMA_INT_STS 0x40 +#define HISI_DMA_INT_STS_MASK GENMASK(12, 0) +#define HISI_DMA_INT_MSK 0x44 +#define HISI_DMA_MODE 0x217c +#define HISI_DMA_OFFSET 0x100 + +#define HISI_DMA_MSI_NUM 30 +#define HISI_DMA_CHAN_NUM 30 +#define HISI_DMA_Q_DEPTH_VAL 1024 + +#define PCI_BAR_2 2 + +enum hisi_dma_mode { + EP = 0, + RC, +}; + +enum hisi_dma_chan_status { + DISABLE = -1, + IDLE = 0, + RUN, + CPL, + PAUSE, + HALT, + ABORT, + WAIT, + BUFFCLR, +}; + +struct hisi_dma_sqe { + __le32 dw0; +#define OPCODE_MASK GENMASK(3, 0) +#define OPCODE_SMALL_PACKAGE 0x1 +#define OPCODE_M2M 0x4 +#define LOCAL_IRQ_EN BIT(8) +#define ATTR_SRC_MASK GENMASK(14, 12) + __le32 dw1; + __le32 dw2; +#define ATTR_DST_MASK GENMASK(26, 24) + __le32 length; + __le64 src_addr; + __le64 dst_addr; +}; + +struct hisi_dma_cqe { + __le32 rsv0; + __le32 rsv1; + __le16 sq_head; + __le16 rsv2; + __le16 rsv3; + __le16 w0; +#define STATUS_MASK GENMASK(15, 1) +#define STATUS_SUCC 0x0 +#define VALID_BIT BIT(0) +}; + +struct hisi_dma_desc { + struct virt_dma_desc vd; + struct hisi_dma_sqe sqe; +}; + +struct hisi_dma_chan { + struct virt_dma_chan vc; + struct hisi_dma_dev *hdma_dev; + struct hisi_dma_sqe *sq; + struct hisi_dma_cqe *cq; + dma_addr_t sq_dma; + dma_addr_t cq_dma; + u32 sq_tail; + u32 cq_head; + u32 qp_num; + enum hisi_dma_chan_status status; + struct hisi_dma_desc *desc; +}; + +struct hisi_dma_dev { + struct pci_dev *pdev; + void __iomem *base; + struct dma_device dma_dev; + u32 chan_num; + u32 chan_depth; + struct hisi_dma_chan chan[]; +}; + +static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct hisi_dma_chan, vc.chan); +} + +static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct hisi_dma_desc, vd); +} + +static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index, + u32 val) +{ + writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET); +} + +static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val) +{ + u32 tmp; + + tmp = readl_relaxed(addr); + tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos); + writel_relaxed(tmp, addr); +} + +static void hisi_dma_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool pause) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause); +} + +static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool enable) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable); +} + +static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index, + HISI_DMA_INT_STS_MASK); +} + +static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + void __iomem *base = hdma_dev->base; + + hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index, + HISI_DMA_INT_STS_MASK); + hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0); +} + +static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1); +} + +static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) +{ + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + u32 index = chan->qp_num, tmp; + int ret; + + hisi_dma_pause_dma(hdma_dev, index, true); + hisi_dma_enable_dma(hdma_dev, index, false); + hisi_dma_mask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n"); + WARN_ON(1); + } + + hisi_dma_do_reset(hdma_dev, index); + hisi_dma_reset_qp_point(hdma_dev, index); + hisi_dma_pause_dma(hdma_dev, index, false); + hisi_dma_enable_dma(hdma_dev, index, true); + hisi_dma_unmask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n"); + WARN_ON(1); + } +} + +static void hisi_dma_free_chan_resources(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + + hisi_dma_reset_hw_chan(chan); + vchan_free_chan_resources(&chan->vc); + + memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); + memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth); + chan->sq_tail = 0; + chan->cq_head = 0; + chan->status = DISABLE; +} + +static void hisi_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(to_hisi_dma_desc(vd)); +} + +static struct dma_async_tx_descriptor * +hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->sqe.length = cpu_to_le32(len); + desc->sqe.src_addr = cpu_to_le64(src); + desc->sqe.dst_addr = cpu_to_le64(dst); + + return vchan_tx_prep(&chan->vc, &desc->vd, flags); +} + +static enum dma_status +hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(c, cookie, txstate); +} + +static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) +{ + struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); + chan->desc = NULL; + return; + } + list_del(&vd->node); + desc = to_hisi_dma_desc(vd); + chan->desc = desc; + + memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); + + /* update other field in sqe */ + sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); + sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); + + /* make sure data has been updated in sqe */ + wmb(); + + /* update sq tail, point to new sqe position */ + chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth; + + /* update sq_tail to trigger a new task */ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num, + chan->sq_tail); +} + +static void hisi_dma_issue_pending(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + if (vchan_issue_pending(&chan->vc)) + hisi_dma_start_transfer(chan); + + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static int hisi_dma_terminate_all(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vc.lock, flags); + + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vd); + chan->desc = NULL; + } + + vchan_get_all_descriptors(&chan->vc, &head); + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + vchan_dma_desc_free_list(&chan->vc, &head); + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false); + + return 0; +} + +static void hisi_dma_synchronize(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + + vchan_synchronize(&chan->vc); +} + +static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev) +{ + size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth; + size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; + struct device *dev = &hdma_dev->pdev->dev; + struct hisi_dma_chan *chan; + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + chan = &hdma_dev->chan[i]; + chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma, + GFP_KERNEL); + if (!chan->sq) + return -ENOMEM; + + chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, + GFP_KERNEL); + if (!chan->cq) + return -ENOMEM; + } + + return 0; +} + +static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index) +{ + struct hisi_dma_chan *chan = &hdma_dev->chan[index]; + u32 hw_depth = hdma_dev->chan_depth - 1; + void __iomem *base = hdma_dev->base; + + /* set sq, cq base */ + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index, + lower_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index, + upper_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index, + lower_32_bits(chan->cq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index, + upper_32_bits(chan->cq_dma)); + + /* set sq, cq depth */ + hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth); + hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth); + + /* init sq tail and cq head */ + hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_init_hw_qp(hdma_dev, qp_index); + hisi_dma_unmask_irq(hdma_dev, qp_index); + hisi_dma_enable_dma(hdma_dev, qp_index, true); +} + +static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]); +} + +static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hdma_dev->chan[i].qp_num = i; + hdma_dev->chan[i].hdma_dev = hdma_dev; + hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free; + vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev); + hisi_dma_enable_qp(hdma_dev, i); + } +} + +static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hisi_dma_disable_qp(hdma_dev, i); + tasklet_kill(&hdma_dev->chan[i].vc.task); + } +} + +static irqreturn_t hisi_dma_irq(int irq, void *data) +{ + struct hisi_dma_chan *chan = data; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct hisi_dma_cqe *cqe; + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + desc = chan->desc; + cqe = chan->cq + chan->cq_head; + if (desc) { + if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { + chan->cq_head = (chan->cq_head + 1) % + hdma_dev->chan_depth; + hisi_dma_chan_write(hdma_dev->base, + HISI_DMA_CQ_HEAD_PTR, chan->qp_num, + chan->cq_head); + vchan_cookie_complete(&desc->vd); + } else { + dev_err(&hdma_dev->pdev->dev, "task error!\n"); + } + + chan->desc = NULL; + } + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev) +{ + struct pci_dev *pdev = hdma_dev->pdev; + int i, ret; + + for (i = 0; i < hdma_dev->chan_num; i++) { + ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), + hisi_dma_irq, IRQF_SHARED, "hisi_dma", + &hdma_dev->chan[i]); + if (ret) + return ret; + } + + return 0; +} + +/* This function enables all hw channels in a device */ +static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev) +{ + int ret; + + ret = hisi_dma_alloc_qps_mem(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n"); + return ret; + } + + ret = hisi_dma_request_qps_irq(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n"); + return ret; + } + + hisi_dma_enable_qps(hdma_dev); + + return 0; +} + +static void hisi_dma_disable_hw_channels(void *data) +{ + hisi_dma_disable_qps(data); +} + +static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev, + enum hisi_dma_mode mode) +{ + writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE); +} + +static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct hisi_dma_dev *hdma_dev; + struct dma_device *dma_dev; + size_t dev_size; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "failed to enable device mem!\n"); + return ret; + } + + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev)); + if (ret) { + dev_err(dev, "failed to remap I/O region!\n"); + return ret; + } + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM + + sizeof(*hdma_dev); + hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL); + if (!hdma_dev) + return -EINVAL; + + hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2]; + hdma_dev->pdev = pdev; + hdma_dev->chan_num = HISI_DMA_CHAN_NUM; + hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL; + + pci_set_drvdata(pdev, hdma_dev); + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM, + PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to allocate MSI vectors!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev); + if (ret) + return ret; + + dma_dev = &hdma_dev->dma_dev; + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources; + dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy; + dma_dev->device_tx_status = hisi_dma_tx_status; + dma_dev->device_issue_pending = hisi_dma_issue_pending; + dma_dev->device_terminate_all = hisi_dma_terminate_all; + dma_dev->device_synchronize = hisi_dma_synchronize; + dma_dev->directions = BIT(DMA_MEM_TO_MEM); + dma_dev->dev = dev; + INIT_LIST_HEAD(&dma_dev->channels); + + hisi_dma_set_mode(hdma_dev, RC); + + ret = hisi_dma_enable_hw_channels(hdma_dev); + if (ret < 0) { + dev_err(dev, "failed to enable hw channel!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels, + hdma_dev); + if (ret) + return ret; + + ret = dmaenginem_async_device_register(dma_dev); + if (ret < 0) + dev_err(dev, "failed to register device!\n"); + + return ret; +} + +static const struct pci_device_id hisi_dma_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) }, + { 0, } +}; + +static struct pci_driver hisi_dma_pci_driver = { + .name = "hisi_dma", + .id_table = hisi_dma_pci_tbl, + .probe = hisi_dma_probe, +}; + +module_pci_driver(hisi_dma_pci_driver); + +MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); +MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>"); +MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl); diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile new file mode 100644 index 000000000000..8978b898d777 --- /dev/null +++ b/drivers/dma/idxd/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IDXD) += idxd.o +idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c new file mode 100644 index 000000000000..1d7347825b95 --- /dev/null +++ b/drivers/dma/idxd/cdev.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/sched/task.h> +#include <linux/intel-svm.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +struct idxd_cdev_context { + const char *name; + dev_t devt; + struct ida minor_ida; +}; + +/* + * ictx is an array based off of accelerator types. enum idxd_type + * is used as index + */ +static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { + { .name = "dsa" }, +}; + +struct idxd_user_context { + struct idxd_wq *wq; + struct task_struct *task; + unsigned int flags; +}; + +enum idxd_cdev_cleanup { + CDEV_NORMAL = 0, + CDEV_FAILED, +}; + +static void idxd_cdev_dev_release(struct device *dev) +{ + dev_dbg(dev, "releasing cdev device\n"); + kfree(dev); +} + +static struct device_type idxd_cdev_device_type = { + .name = "idxd_cdev", + .release = idxd_cdev_dev_release, +}; + +static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) +{ + struct cdev *cdev = inode->i_cdev; + + return container_of(cdev, struct idxd_cdev, cdev); +} + +static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev) +{ + return container_of(idxd_cdev, struct idxd_wq, idxd_cdev); +} + +static inline struct idxd_wq *inode_wq(struct inode *inode) +{ + return idxd_cdev_wq(inode_idxd_cdev(inode)); +} + +static int idxd_cdev_open(struct inode *inode, struct file *filp) +{ + struct idxd_user_context *ctx; + struct idxd_device *idxd; + struct idxd_wq *wq; + struct device *dev; + struct idxd_cdev *idxd_cdev; + + wq = inode_wq(inode); + idxd = wq->idxd; + dev = &idxd->pdev->dev; + idxd_cdev = &wq->idxd_cdev; + + dev_dbg(dev, "%s called\n", __func__); + + if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) + return -EBUSY; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->wq = wq; + filp->private_data = ctx; + idxd_wq_get(wq); + return 0; +} + +static int idxd_cdev_release(struct inode *node, struct file *filep) +{ + struct idxd_user_context *ctx = filep->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + + dev_dbg(dev, "%s called\n", __func__); + filep->private_data = NULL; + + kfree(ctx); + idxd_wq_put(wq); + return 0; +} + +static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, + const char *func) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { + dev_info_ratelimited(dev, + "%s: %s: mapping too large: %lu\n", + current->comm, func, + vma->vm_end - vma->vm_start); + return -EINVAL; + } + + return 0; +} + +static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR); + unsigned long pfn; + int rc; + + dev_dbg(&pdev->dev, "%s called\n", __func__); + rc = check_vma(wq, vma, __func__); + + vma->vm_flags |= VM_DONTCOPY; + pfn = (base + idxd_get_wq_portal_full_offset(wq->id, + IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_private_data = ctx; + + return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, + vma->vm_page_prot); +} + +static __poll_t idxd_cdev_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + unsigned long flags; + __poll_t out = 0; + + poll_wait(filp, &idxd_cdev->err_queue, wait); + spin_lock_irqsave(&idxd->dev_lock, flags); + if (idxd->sw_err.valid) + out = EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return out; +} + +static const struct file_operations idxd_cdev_fops = { + .owner = THIS_MODULE, + .open = idxd_cdev_open, + .release = idxd_cdev_release, + .mmap = idxd_cdev_mmap, + .poll = idxd_cdev_poll, +}; + +int idxd_cdev_get_major(struct idxd_device *idxd) +{ + return MAJOR(ictx[idxd->type].devt); +} + +static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + struct device *dev; + int minor, rc; + + idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL); + if (!idxd_cdev->dev) + return -ENOMEM; + + dev = idxd_cdev->dev; + dev->parent = &idxd->pdev->dev; + dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd), + idxd->id, wq->id); + dev->bus = idxd_get_bus_type(idxd); + + cdev_ctx = &ictx[wq->idxd->type]; + minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); + if (minor < 0) { + rc = minor; + goto ida_err; + } + + dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); + dev->type = &idxd_cdev_device_type; + rc = device_register(dev); + if (rc < 0) { + dev_err(&idxd->pdev->dev, "device register failed\n"); + put_device(dev); + goto dev_reg_err; + } + idxd_cdev->minor = minor; + + return 0; + + dev_reg_err: + ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); + ida_err: + kfree(dev); + idxd_cdev->dev = NULL; + return rc; +} + +static void idxd_wq_cdev_cleanup(struct idxd_wq *wq, + enum idxd_cdev_cleanup cdev_state) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + + cdev_ctx = &ictx[wq->idxd->type]; + if (cdev_state == CDEV_NORMAL) + cdev_del(&idxd_cdev->cdev); + device_unregister(idxd_cdev->dev); + /* + * The device_type->release() will be called on the device and free + * the allocated struct device. We can just forget it. + */ + ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); + idxd_cdev->dev = NULL; + idxd_cdev->minor = -1; +} + +int idxd_wq_add_cdev(struct idxd_wq *wq) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct cdev *cdev = &idxd_cdev->cdev; + struct device *dev; + int rc; + + rc = idxd_wq_cdev_dev_setup(wq); + if (rc < 0) + return rc; + + dev = idxd_cdev->dev; + cdev_init(cdev, &idxd_cdev_fops); + cdev_set_parent(cdev, &dev->kobj); + rc = cdev_add(cdev, dev->devt, 1); + if (rc) { + dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); + idxd_wq_cdev_cleanup(wq, CDEV_FAILED); + return rc; + } + + init_waitqueue_head(&idxd_cdev->err_queue); + return 0; +} + +void idxd_wq_del_cdev(struct idxd_wq *wq) +{ + idxd_wq_cdev_cleanup(wq, CDEV_NORMAL); +} + +int idxd_cdev_register(void) +{ + int rc, i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + ida_init(&ictx[i].minor_ida); + rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, + ictx[i].name); + if (rc) + return rc; + } + + return 0; +} + +void idxd_cdev_remove(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + unregister_chrdev_region(ictx[i].devt, MINORMASK); + ida_destroy(&ictx[i].minor_ida); + } +} diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c new file mode 100644 index 000000000000..ada69e722f84 --- /dev/null +++ b/drivers/dma/idxd/device.c @@ -0,0 +1,693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout); +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand); + +/* Interrupt control bits */ +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 1; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_mask_msix_vectors(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + int i, rc; + + for (i = 0; i < msixcnt; i++) { + rc = idxd_mask_msix_vector(idxd, i); + if (rc < 0) + dev_warn(&pdev->dev, + "Failed disabling msix vec %d\n", i); + } +} + +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 0; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_unmask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 1; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +void idxd_mask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 0; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +static void free_hw_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->hw_descs[i]); + + kfree(wq->hw_descs); +} + +static int alloc_hw_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), + GFP_KERNEL, node); + if (!wq->hw_descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), + GFP_KERNEL, node); + if (!wq->hw_descs[i]) { + free_hw_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +static void free_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->descs[i]); + + kfree(wq->descs); +} + +static int alloc_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), + GFP_KERNEL, node); + if (!wq->descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), + GFP_KERNEL, node); + if (!wq->descs[i]) { + free_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +/* WQ control bits */ +int idxd_wq_alloc_resources(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_group *group = wq->group; + struct device *dev = &idxd->pdev->dev; + int rc, num_descs, i; + + if (wq->type != IDXD_WQT_KERNEL) + return 0; + + num_descs = wq->size + + idxd->hw.gen_cap.max_descs_per_engine * group->num_engines; + wq->num_descs = num_descs; + + rc = alloc_hw_descs(wq, num_descs); + if (rc < 0) + return rc; + + wq->compls_size = num_descs * sizeof(struct dsa_completion_record); + wq->compls = dma_alloc_coherent(dev, wq->compls_size, + &wq->compls_addr, GFP_KERNEL); + if (!wq->compls) { + rc = -ENOMEM; + goto fail_alloc_compls; + } + + rc = alloc_descs(wq, num_descs); + if (rc < 0) + goto fail_alloc_descs; + + rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL, + dev_to_node(dev)); + if (rc < 0) + goto fail_sbitmap_init; + + for (i = 0; i < num_descs; i++) { + struct idxd_desc *desc = wq->descs[i]; + + desc->hw = wq->hw_descs[i]; + desc->completion = &wq->compls[i]; + desc->compl_dma = wq->compls_addr + + sizeof(struct dsa_completion_record) * i; + desc->id = i; + desc->wq = wq; + + dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan); + desc->txd.tx_submit = idxd_dma_tx_submit; + } + + return 0; + + fail_sbitmap_init: + free_descs(wq); + fail_alloc_descs: + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + fail_alloc_compls: + free_hw_descs(wq); + return rc; +} + +void idxd_wq_free_resources(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if (wq->type != IDXD_WQT_KERNEL) + return; + + free_hw_descs(wq); + free_descs(wq); + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + sbitmap_free(&wq->sbmap); +} + +int idxd_wq_enable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + + if (wq->state == IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d already enabled\n", wq->id); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_WQ_ENABLED) { + dev_dbg(dev, "WQ enable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_ENABLED; + dev_dbg(dev, "WQ %d enabled\n", wq->id); + return 0; +} + +int idxd_wq_disable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status, operand; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + dev_dbg(dev, "Disabling WQ %d\n", wq->id); + + if (wq->state != IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); + return 0; + } + + operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS) { + dev_dbg(dev, "WQ disable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_DISABLED; + dev_dbg(dev, "WQ %d disabled\n", wq->id); + return 0; +} + +int idxd_wq_map_portal(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + resource_size_t start; + + start = pci_resource_start(pdev, IDXD_WQ_BAR); + start = start + wq->id * IDXD_PORTAL_SIZE; + + wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); + if (!wq->dportal) + return -ENOMEM; + dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal); + + return 0; +} + +void idxd_wq_unmap_portal(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + devm_iounmap(dev, wq->dportal); +} + +/* Device control bits */ +static inline bool idxd_is_enabled(struct idxd_device *idxd) +{ + union gensts_reg gensts; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + + if (gensts.state == IDXD_DEVICE_STATE_ENABLED) + return true; + return false; +} + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout) +{ + u32 sts, to = timeout; + + lockdep_assert_held(&idxd->dev_lock); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + while (sts & IDXD_CMDSTS_ACTIVE && --to) { + cpu_relax(); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + } + + if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) { + dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__); + *status = 0; + return -EBUSY; + } + + *status = sts; + return 0; +} + +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand) +{ + union idxd_command_reg cmd; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd = cmd_code; + cmd.operand = operand; + dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", + __func__, cmd_code, operand); + iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); + + return 0; +} + +int idxd_device_enable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device already enabled\n"); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was enabled */ + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_DEV_ENABLED) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + return -ENXIO; + } + + idxd->state = IDXD_DEV_ENABLED; + return 0; +} + +int idxd_device_disable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (!idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device is not enabled\n"); + return 0; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was disabled */ + if (status != IDXD_CMDSTS_SUCCESS && + !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + rc = -ENXIO; + return rc; + } + + idxd->state = IDXD_DEV_CONF_READY; + return 0; +} + +int __idxd_device_reset(struct idxd_device *idxd) +{ + u32 status; + int rc; + + rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + return 0; +} + +int idxd_device_reset(struct idxd_device *idxd) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = __idxd_device_reset(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + return rc; +} + +/* Device configuration bits */ +static void idxd_group_config_write(struct idxd_group *group) +{ + struct idxd_device *idxd = group->idxd; + struct device *dev = &idxd->pdev->dev; + int i; + u32 grpcfg_offset; + + dev_dbg(dev, "Writing group %d cfg registers\n", group->id); + + /* setup GRPWQCFG */ + for (i = 0; i < 4; i++) { + grpcfg_offset = idxd->grpcfg_offset + + group->id * 64 + i * sizeof(u64); + iowrite64(group->grpcfg.wqs[i], + idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", + group->id, i, grpcfg_offset, + ioread64(idxd->reg_base + grpcfg_offset)); + } + + /* setup GRPENGCFG */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32; + iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, + grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); + + /* setup GRPFLAGS */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40; + iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", + group->id, grpcfg_offset, + ioread32(idxd->reg_base + grpcfg_offset)); +} + +static int idxd_groups_config_write(struct idxd_device *idxd) + +{ + union gencfg_reg reg; + int i; + struct device *dev = &idxd->pdev->dev; + + /* Setup bandwidth token limit */ + if (idxd->token_limit) { + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); + reg.token_limit = idxd->token_limit; + iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); + } + + dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, + ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + idxd_group_config_write(group); + } + + return 0; +} + +static int idxd_wq_config_write(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 wq_offset; + int i; + + if (!wq->group) + return 0; + + memset(&wq->wqcfg, 0, sizeof(union wqcfg)); + + /* byte 0-3 */ + wq->wqcfg.wq_size = wq->size; + + if (wq->size == 0) { + dev_warn(dev, "Incorrect work queue size: 0\n"); + return -EINVAL; + } + + /* bytes 4-7 */ + wq->wqcfg.wq_thresh = wq->threshold; + + /* byte 8-11 */ + wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL); + wq->wqcfg.mode = 1; + + wq->wqcfg.priority = wq->priority; + + /* bytes 12-15 */ + wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift; + wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift; + + dev_dbg(dev, "WQ %d CFGs\n", wq->id); + for (i = 0; i < 8; i++) { + wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); + iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset); + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", + wq->id, i, wq_offset, + ioread32(idxd->reg_base + wq_offset)); + } + + return 0; +} + +static int idxd_wqs_config_write(struct idxd_device *idxd) +{ + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + rc = idxd_wq_config_write(wq); + if (rc < 0) + return rc; + } + + return 0; +} + +static void idxd_group_flags_setup(struct idxd_device *idxd) +{ + int i; + + /* TC-A 0 and TC-B 1 should be defaults */ + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + if (group->tc_a == -1) + group->grpcfg.flags.tc_a = 0; + else + group->grpcfg.flags.tc_a = group->tc_a; + if (group->tc_b == -1) + group->grpcfg.flags.tc_b = 1; + else + group->grpcfg.flags.tc_b = group->tc_b; + group->grpcfg.flags.use_token_limit = group->use_token_limit; + group->grpcfg.flags.tokens_reserved = group->tokens_reserved; + if (group->tokens_allowed) + group->grpcfg.flags.tokens_allowed = + group->tokens_allowed; + else + group->grpcfg.flags.tokens_allowed = idxd->max_tokens; + } +} + +static int idxd_engines_setup(struct idxd_device *idxd) +{ + int i, engines = 0; + struct idxd_engine *eng; + struct idxd_group *group; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + group->grpcfg.engines = 0; + } + + for (i = 0; i < idxd->max_engines; i++) { + eng = &idxd->engines[i]; + group = eng->group; + + if (!group) + continue; + + group->grpcfg.engines |= BIT(eng->id); + engines++; + } + + if (!engines) + return -EINVAL; + + return 0; +} + +static int idxd_wqs_setup(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct idxd_group *group; + int i, j, configured = 0; + struct device *dev = &idxd->pdev->dev; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + for (j = 0; j < 4; j++) + group->grpcfg.wqs[j] = 0; + } + + for (i = 0; i < idxd->max_wqs; i++) { + wq = &idxd->wqs[i]; + group = wq->group; + + if (!wq->group) + continue; + if (!wq->size) + continue; + + if (!wq_dedicated(wq)) { + dev_warn(dev, "No shared workqueue support.\n"); + return -EINVAL; + } + + group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); + configured++; + } + + if (configured == 0) + return -EINVAL; + + return 0; +} + +int idxd_device_config(struct idxd_device *idxd) +{ + int rc; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_wqs_setup(idxd); + if (rc < 0) + return rc; + + rc = idxd_engines_setup(idxd); + if (rc < 0) + return rc; + + idxd_group_flags_setup(idxd); + + rc = idxd_wqs_config_write(idxd); + if (rc < 0) + return rc; + + rc = idxd_groups_config_write(idxd); + if (rc < 0) + return rc; + + return 0; +} diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c new file mode 100644 index 000000000000..c64c1429d160 --- /dev/null +++ b/drivers/dma/idxd/dma.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) +{ + return container_of(c, struct idxd_wq, dma_chan); +} + +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type) +{ + struct dma_async_tx_descriptor *tx; + struct dmaengine_result res; + int complete = 1; + + if (desc->completion->status == DSA_COMP_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (desc->completion->status) + res.result = DMA_TRANS_WRITE_FAILED; + else if (comp_type == IDXD_COMPLETE_ABORT) + res.result = DMA_TRANS_ABORTED; + else + complete = 0; + + tx = &desc->txd; + if (complete && tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + dmaengine_desc_get_callback_invoke(tx, &res); + tx->callback = NULL; + tx->callback_result = NULL; + } +} + +static void op_flag_setup(unsigned long flags, u32 *desc_flags) +{ + *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; + if (flags & DMA_PREP_INTERRUPT) + *desc_flags |= IDXD_OP_FLAG_RCI; +} + +static inline void set_completion_address(struct idxd_desc *desc, + u64 *compl_addr) +{ + *compl_addr = desc->compl_dma; +} + +static inline void idxd_prep_desc_common(struct idxd_wq *wq, + struct dsa_hw_desc *hw, char opcode, + u64 addr_f1, u64 addr_f2, u64 len, + u64 compl, u32 flags) +{ + struct idxd_device *idxd = wq->idxd; + + hw->flags = flags; + hw->opcode = opcode; + hw->src_addr = addr_f1; + hw->dst_addr = addr_f2; + hw->xfer_size = len; + hw->priv = !!(wq->type == IDXD_WQT_KERNEL); + hw->completion_addr = compl; + + /* + * Descriptor completion vectors are 1-8 for MSIX. We will round + * robin through the 8 vectors. + */ + wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; + hw->int_handle = wq->vec_ptr; +} + +static struct dma_async_tx_descriptor * +idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_device *idxd = wq->idxd; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + if (len > idxd->max_xfer_bytes) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE, + dma_src, dma_dest, len, desc->compl_dma, + desc_flags); + + desc->txd.flags = flags; + + return &desc->txd; +} + +static int idxd_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_get(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); + return 0; +} + +static void idxd_dma_free_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_put(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); +} + +static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dma_chan, cookie, txstate); +} + +/* + * issue_pending() does not need to do anything since tx_submit() does the job + * already. + */ +static void idxd_dma_issue_pending(struct dma_chan *dma_chan) +{ +} + +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *c = tx->chan; + struct idxd_wq *wq = to_idxd_wq(c); + dma_cookie_t cookie; + int rc; + struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd); + + cookie = dma_cookie_assign(tx); + + rc = idxd_submit_desc(wq, desc); + if (rc < 0) { + idxd_free_desc(wq, desc); + return rc; + } + + return cookie; +} + +static void idxd_dma_release(struct dma_device *device) +{ +} + +int idxd_register_dma_device(struct idxd_device *idxd) +{ + struct dma_device *dma = &idxd->dma_dev; + + INIT_LIST_HEAD(&dma->channels); + dma->dev = &idxd->pdev->dev; + + dma->device_release = idxd_dma_release; + + if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; + } + + dma->device_tx_status = idxd_dma_tx_status; + dma->device_issue_pending = idxd_dma_issue_pending; + dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; + dma->device_free_chan_resources = idxd_dma_free_chan_resources; + + return dma_async_device_register(&idxd->dma_dev); +} + +void idxd_unregister_dma_device(struct idxd_device *idxd) +{ + dma_async_device_unregister(&idxd->dma_dev); +} + +int idxd_register_dma_channel(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct dma_device *dma = &idxd->dma_dev; + struct dma_chan *chan = &wq->dma_chan; + int rc; + + memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); + chan->device = dma; + list_add_tail(&chan->device_node, &dma->channels); + rc = dma_async_device_channel_register(dma, chan); + if (rc < 0) + return rc; + + return 0; +} + +void idxd_unregister_dma_channel(struct idxd_wq *wq) +{ + dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan); +} diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h new file mode 100644 index 000000000000..b8f8a363b4a7 --- /dev/null +++ b/drivers/dma/idxd/idxd.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_H_ +#define _IDXD_H_ + +#include <linux/sbitmap.h> +#include <linux/dmaengine.h> +#include <linux/percpu-rwsem.h> +#include <linux/wait.h> +#include <linux/cdev.h> +#include "registers.h" + +#define IDXD_DRIVER_VERSION "1.00" + +extern struct kmem_cache *idxd_desc_pool; + +#define IDXD_REG_TIMEOUT 50 +#define IDXD_DRAIN_TIMEOUT 5000 + +enum idxd_type { + IDXD_TYPE_UNKNOWN = -1, + IDXD_TYPE_DSA = 0, + IDXD_TYPE_MAX +}; + +#define IDXD_NAME_SIZE 128 + +struct idxd_device_driver { + struct device_driver drv; +}; + +struct idxd_irq_entry { + struct idxd_device *idxd; + int id; + struct llist_head pending_llist; + struct list_head work_list; +}; + +struct idxd_group { + struct device conf_dev; + struct idxd_device *idxd; + struct grpcfg grpcfg; + int id; + int num_engines; + int num_wqs; + bool use_token_limit; + u8 tokens_allowed; + u8 tokens_reserved; + int tc_a; + int tc_b; +}; + +#define IDXD_MAX_PRIORITY 0xf + +enum idxd_wq_state { + IDXD_WQ_DISABLED = 0, + IDXD_WQ_ENABLED, +}; + +enum idxd_wq_flag { + WQ_FLAG_DEDICATED = 0, +}; + +enum idxd_wq_type { + IDXD_WQT_NONE = 0, + IDXD_WQT_KERNEL, + IDXD_WQT_USER, +}; + +struct idxd_cdev { + struct cdev cdev; + struct device *dev; + int minor; + struct wait_queue_head err_queue; +}; + +#define IDXD_ALLOCATED_BATCH_SIZE 128U +#define WQ_NAME_SIZE 1024 +#define WQ_TYPE_SIZE 10 + +enum idxd_op_type { + IDXD_OP_BLOCK = 0, + IDXD_OP_NONBLOCK = 1, +}; + +enum idxd_complete_type { + IDXD_COMPLETE_NORMAL = 0, + IDXD_COMPLETE_ABORT, +}; + +struct idxd_wq { + void __iomem *dportal; + struct device conf_dev; + struct idxd_cdev idxd_cdev; + struct idxd_device *idxd; + int id; + enum idxd_wq_type type; + struct idxd_group *group; + int client_count; + struct mutex wq_lock; /* mutex for workqueue */ + u32 size; + u32 threshold; + u32 priority; + enum idxd_wq_state state; + unsigned long flags; + union wqcfg wqcfg; + atomic_t dq_count; /* dedicated queue flow control */ + u32 vec_ptr; /* interrupt steering */ + struct dsa_hw_desc **hw_descs; + int num_descs; + struct dsa_completion_record *compls; + dma_addr_t compls_addr; + int compls_size; + struct idxd_desc **descs; + struct sbitmap sbmap; + struct dma_chan dma_chan; + struct percpu_rw_semaphore submit_lock; + wait_queue_head_t submit_waitq; + char name[WQ_NAME_SIZE + 1]; +}; + +struct idxd_engine { + struct device conf_dev; + int id; + struct idxd_group *group; + struct idxd_device *idxd; +}; + +/* shadow registers */ +struct idxd_hw { + u32 version; + union gen_cap_reg gen_cap; + union wq_cap_reg wq_cap; + union group_cap_reg group_cap; + union engine_cap_reg engine_cap; + struct opcap opcap; +}; + +enum idxd_device_state { + IDXD_DEV_HALTED = -1, + IDXD_DEV_DISABLED = 0, + IDXD_DEV_CONF_READY, + IDXD_DEV_ENABLED, +}; + +enum idxd_device_flag { + IDXD_FLAG_CONFIGURABLE = 0, +}; + +struct idxd_device { + enum idxd_type type; + struct device conf_dev; + struct list_head list; + struct idxd_hw hw; + enum idxd_device_state state; + unsigned long flags; + int id; + int major; + + struct pci_dev *pdev; + void __iomem *reg_base; + + spinlock_t dev_lock; /* spinlock for device */ + struct idxd_group *groups; + struct idxd_wq *wqs; + struct idxd_engine *engines; + + int num_groups; + + u32 msix_perm_offset; + u32 wqcfg_offset; + u32 grpcfg_offset; + u32 perfmon_offset; + + u64 max_xfer_bytes; + u32 max_batch_size; + int max_groups; + int max_engines; + int max_tokens; + int max_wqs; + int max_wq_size; + int token_limit; + int nr_tokens; /* non-reserved tokens */ + + union sw_err_reg sw_err; + + struct msix_entry *msix_entries; + int num_wq_irqs; + struct idxd_irq_entry *irq_entries; + + struct dma_device dma_dev; +}; + +/* IDXD software descriptor */ +struct idxd_desc { + struct dsa_hw_desc *hw; + dma_addr_t desc_dma; + struct dsa_completion_record *completion; + dma_addr_t compl_dma; + struct dma_async_tx_descriptor txd; + struct llist_node llnode; + struct list_head list; + int id; + struct idxd_wq *wq; +}; + +#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) +#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) + +extern struct bus_type dsa_bus_type; + +static inline bool wq_dedicated(struct idxd_wq *wq) +{ + return test_bit(WQ_FLAG_DEDICATED, &wq->flags); +} + +enum idxd_portal_prot { + IDXD_PORTAL_UNLIMITED = 0, + IDXD_PORTAL_LIMITED, +}; + +static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) +{ + return prot * 0x1000; +} + +static inline int idxd_get_wq_portal_full_offset(int wq_id, + enum idxd_portal_prot prot) +{ + return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); +} + +static inline void idxd_set_type(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + + if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0) + idxd->type = IDXD_TYPE_DSA; + else + idxd->type = IDXD_TYPE_UNKNOWN; +} + +static inline void idxd_wq_get(struct idxd_wq *wq) +{ + wq->client_count++; +} + +static inline void idxd_wq_put(struct idxd_wq *wq) +{ + wq->client_count--; +} + +static inline int idxd_wq_refcount(struct idxd_wq *wq) +{ + return wq->client_count; +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd); +int idxd_register_bus_type(void); +void idxd_unregister_bus_type(void); +int idxd_setup_sysfs(struct idxd_device *idxd); +void idxd_cleanup_sysfs(struct idxd_device *idxd); +int idxd_register_driver(void); +void idxd_unregister_driver(void); +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd); + +/* device interrupt control */ +irqreturn_t idxd_irq_handler(int vec, void *data); +irqreturn_t idxd_misc_thread(int vec, void *data); +irqreturn_t idxd_wq_thread(int irq, void *data); +void idxd_mask_error_interrupts(struct idxd_device *idxd); +void idxd_unmask_error_interrupts(struct idxd_device *idxd); +void idxd_mask_msix_vectors(struct idxd_device *idxd); +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); + +/* device control */ +int idxd_device_enable(struct idxd_device *idxd); +int idxd_device_disable(struct idxd_device *idxd); +int idxd_device_reset(struct idxd_device *idxd); +int __idxd_device_reset(struct idxd_device *idxd); +void idxd_device_cleanup(struct idxd_device *idxd); +int idxd_device_config(struct idxd_device *idxd); +void idxd_device_wqs_clear_state(struct idxd_device *idxd); + +/* work queue control */ +int idxd_wq_alloc_resources(struct idxd_wq *wq); +void idxd_wq_free_resources(struct idxd_wq *wq); +int idxd_wq_enable(struct idxd_wq *wq); +int idxd_wq_disable(struct idxd_wq *wq); +int idxd_wq_map_portal(struct idxd_wq *wq); +void idxd_wq_unmap_portal(struct idxd_wq *wq); + +/* submission */ +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); + +/* dmaengine */ +int idxd_register_dma_device(struct idxd_device *idxd); +void idxd_unregister_dma_device(struct idxd_device *idxd); +int idxd_register_dma_channel(struct idxd_wq *wq); +void idxd_unregister_dma_channel(struct idxd_wq *wq); +void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type); +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx); + +/* cdev */ +int idxd_cdev_register(void); +void idxd_cdev_remove(void); +int idxd_cdev_get_major(struct idxd_device *idxd); +int idxd_wq_add_cdev(struct idxd_wq *wq); +void idxd_wq_del_cdev(struct idxd_wq *wq); + +#endif diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c new file mode 100644 index 000000000000..7778c05deb5d --- /dev/null +++ b/drivers/dma/idxd/init.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/aer.h> +#include <linux/fs.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/device.h> +#include <linux/idr.h> +#include <uapi/linux/idxd.h> +#include <linux/dmaengine.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +MODULE_VERSION(IDXD_DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); + +#define DRV_NAME "idxd" + +static struct idr idxd_idrs[IDXD_TYPE_MAX]; +static struct mutex idxd_idr_lock; + +static struct pci_device_id idxd_pci_tbl[] = { + /* DSA ver 1.0 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); + +static char *idxd_name[] = { + "dsa", +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd) +{ + return idxd_name[idxd->type]; +} + +static int idxd_setup_interrupts(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + struct idxd_irq_entry *irq_entry; + int i, msixcnt; + int rc = 0; + + msixcnt = pci_msix_vec_count(pdev); + if (msixcnt < 0) { + dev_err(dev, "Not MSI-X interrupt capable.\n"); + goto err_no_irq; + } + + idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) * + msixcnt, GFP_KERNEL); + if (!idxd->msix_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) + idxd->msix_entries[i].entry = i; + + rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt); + if (rc) { + dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt); + goto err_no_irq; + } + dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); + + /* + * We implement 1 completion list per MSI-X entry except for + * entry 0, which is for errors and others. + */ + idxd->irq_entries = devm_kcalloc(dev, msixcnt, + sizeof(struct idxd_irq_entry), + GFP_KERNEL); + if (!idxd->irq_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) { + idxd->irq_entries[i].id = i; + idxd->irq_entries[i].idxd = idxd; + } + + msix = &idxd->msix_entries[0]; + irq_entry = &idxd->irq_entries[0]; + rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler, + idxd_misc_thread, 0, "idxd-misc", + irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate misc interrupt.\n"); + goto err_no_irq; + } + + dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", + msix->vector); + + /* first MSI-X entry is not for wq interrupts */ + idxd->num_wq_irqs = msixcnt - 1; + + for (i = 1; i < msixcnt; i++) { + msix = &idxd->msix_entries[i]; + irq_entry = &idxd->irq_entries[i]; + + init_llist_head(&idxd->irq_entries[i].pending_llist); + INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); + rc = devm_request_threaded_irq(dev, msix->vector, + idxd_irq_handler, + idxd_wq_thread, 0, + "idxd-portal", irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate irq %d.\n", + msix->vector); + goto err_no_irq; + } + dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", + i, msix->vector); + } + + idxd_unmask_error_interrupts(idxd); + + return 0; + + err_no_irq: + /* Disable error interrupt generation */ + idxd_mask_error_interrupts(idxd); + pci_disable_msix(pdev); + dev_err(dev, "No usable interrupts\n"); + return rc; +} + +static void idxd_wqs_free_lock(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + percpu_free_rwsem(&wq->submit_lock); + } +} + +static int idxd_setup_internals(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + idxd->groups = devm_kcalloc(dev, idxd->max_groups, + sizeof(struct idxd_group), GFP_KERNEL); + if (!idxd->groups) + return -ENOMEM; + + for (i = 0; i < idxd->max_groups; i++) { + idxd->groups[i].idxd = idxd; + idxd->groups[i].id = i; + idxd->groups[i].tc_a = -1; + idxd->groups[i].tc_b = -1; + } + + idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq), + GFP_KERNEL); + if (!idxd->wqs) + return -ENOMEM; + + idxd->engines = devm_kcalloc(dev, idxd->max_engines, + sizeof(struct idxd_engine), GFP_KERNEL); + if (!idxd->engines) + return -ENOMEM; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + int rc; + + wq->id = i; + wq->idxd = idxd; + mutex_init(&wq->wq_lock); + atomic_set(&wq->dq_count, 0); + init_waitqueue_head(&wq->submit_waitq); + wq->idxd_cdev.minor = -1; + rc = percpu_init_rwsem(&wq->submit_lock); + if (rc < 0) { + idxd_wqs_free_lock(idxd); + return rc; + } + } + + for (i = 0; i < idxd->max_engines; i++) { + idxd->engines[i].idxd = idxd; + idxd->engines[i].id = i; + } + + return 0; +} + +static void idxd_read_table_offsets(struct idxd_device *idxd) +{ + union offsets_reg offsets; + struct device *dev = &idxd->pdev->dev; + + offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); + offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + + sizeof(u64)); + idxd->grpcfg_offset = offsets.grpcfg * 0x100; + dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); + idxd->wqcfg_offset = offsets.wqcfg * 0x100; + dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", + idxd->wqcfg_offset); + idxd->msix_perm_offset = offsets.msix_perm * 0x100; + dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", + idxd->msix_perm_offset); + idxd->perfmon_offset = offsets.perfmon * 0x100; + dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); +} + +static void idxd_read_caps(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + /* reading generic capabilities */ + idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); + dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); + idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; + dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); + idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; + dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); + if (idxd->hw.gen_cap.config_en) + set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); + + /* reading group capabilities */ + idxd->hw.group_cap.bits = + ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); + dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); + idxd->max_groups = idxd->hw.group_cap.num_groups; + dev_dbg(dev, "max groups: %u\n", idxd->max_groups); + idxd->max_tokens = idxd->hw.group_cap.total_tokens; + dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); + idxd->nr_tokens = idxd->max_tokens; + + /* read engine capabilities */ + idxd->hw.engine_cap.bits = + ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); + dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); + idxd->max_engines = idxd->hw.engine_cap.num_engines; + dev_dbg(dev, "max engines: %u\n", idxd->max_engines); + + /* read workqueue capabilities */ + idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); + dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); + idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; + dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); + idxd->max_wqs = idxd->hw.wq_cap.num_wqs; + dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); + + /* reading operation capabilities */ + for (i = 0; i < 4; i++) { + idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + + IDXD_OPCAP_OFFSET + i * sizeof(u64)); + dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); + } +} + +static struct idxd_device *idxd_alloc(struct pci_dev *pdev, + void __iomem * const *iomap) +{ + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + + idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL); + if (!idxd) + return NULL; + + idxd->pdev = pdev; + idxd->reg_base = iomap[IDXD_MMIO_BAR]; + spin_lock_init(&idxd->dev_lock); + + return idxd; +} + +static int idxd_probe(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + int rc; + + dev_dbg(dev, "%s entered and resetting device\n", __func__); + rc = idxd_device_reset(idxd); + if (rc < 0) + return rc; + dev_dbg(dev, "IDXD reset complete\n"); + + idxd_read_caps(idxd); + idxd_read_table_offsets(idxd); + + rc = idxd_setup_internals(idxd); + if (rc) + goto err_setup; + + rc = idxd_setup_interrupts(idxd); + if (rc) + goto err_setup; + + dev_dbg(dev, "IDXD interrupt setup complete.\n"); + + mutex_lock(&idxd_idr_lock); + idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL); + mutex_unlock(&idxd_idr_lock); + if (idxd->id < 0) { + rc = -ENOMEM; + goto err_idr_fail; + } + + idxd->major = idxd_cdev_get_major(idxd); + + dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); + return 0; + + err_idr_fail: + idxd_mask_error_interrupts(idxd); + idxd_mask_msix_vectors(idxd); + err_setup: + return rc; +} + +static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + int rc; + unsigned int mask; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + dev_dbg(dev, "Mapping BARs\n"); + mask = (1 << IDXD_MMIO_BAR); + rc = pcim_iomap_regions(pdev, mask, DRV_NAME); + if (rc) + return rc; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + dev_dbg(dev, "Set DMA masks\n"); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev, iomap); + if (!idxd) + return -ENOMEM; + + idxd_set_type(idxd); + + dev_dbg(dev, "Set PCI master\n"); + pci_set_master(pdev); + pci_set_drvdata(pdev, idxd); + + idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); + rc = idxd_probe(idxd); + if (rc) { + dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); + return -ENODEV; + } + + rc = idxd_setup_sysfs(idxd); + if (rc) { + dev_err(dev, "IDXD sysfs setup failed\n"); + return -ENODEV; + } + + idxd->state = IDXD_DEV_CONF_READY; + + dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", + idxd->hw.version); + + return 0; +} + +static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *itr; + struct llist_node *head; + + head = llist_del_all(&ie->pending_llist); + if (!head) + return; + + llist_for_each_entry_safe(desc, itr, head, llnode) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_flush_work_list(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *iter; + + list_for_each_entry_safe(desc, iter, &ie->work_list, list) { + list_del(&desc->list); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_shutdown(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + int rc, i; + struct idxd_irq_entry *irq_entry; + int msixcnt = pci_msix_vec_count(pdev); + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + if (rc) + dev_err(&pdev->dev, "Disabling device failed\n"); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_mask_msix_vectors(idxd); + idxd_mask_error_interrupts(idxd); + + for (i = 0; i < msixcnt; i++) { + irq_entry = &idxd->irq_entries[i]; + synchronize_irq(idxd->msix_entries[i].vector); + if (i == 0) + continue; + idxd_flush_pending_llist(irq_entry); + idxd_flush_work_list(irq_entry); + } +} + +static void idxd_remove(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_cleanup_sysfs(idxd); + idxd_shutdown(pdev); + idxd_wqs_free_lock(idxd); + mutex_lock(&idxd_idr_lock); + idr_remove(&idxd_idrs[idxd->type], idxd->id); + mutex_unlock(&idxd_idr_lock); +} + +static struct pci_driver idxd_pci_driver = { + .name = DRV_NAME, + .id_table = idxd_pci_tbl, + .probe = idxd_pci_probe, + .remove = idxd_remove, + .shutdown = idxd_shutdown, +}; + +static int __init idxd_init_module(void) +{ + int err, i; + + /* + * If the CPU does not support write512, there's no point in + * enumerating the device. We can not utilize it. + */ + if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { + pr_warn("idxd driver failed to load without MOVDIR64B.\n"); + return -ENODEV; + } + + pr_info("%s: Intel(R) Accelerator Devices Driver %s\n", + DRV_NAME, IDXD_DRIVER_VERSION); + + mutex_init(&idxd_idr_lock); + for (i = 0; i < IDXD_TYPE_MAX; i++) + idr_init(&idxd_idrs[i]); + + err = idxd_register_bus_type(); + if (err < 0) + return err; + + err = idxd_register_driver(); + if (err < 0) + goto err_idxd_driver_register; + + err = idxd_cdev_register(); + if (err) + goto err_cdev_register; + + err = pci_register_driver(&idxd_pci_driver); + if (err) + goto err_pci_register; + + return 0; + +err_pci_register: + idxd_cdev_remove(); +err_cdev_register: + idxd_unregister_driver(); +err_idxd_driver_register: + idxd_unregister_bus_type(); + return err; +} +module_init(idxd_init_module); + +static void __exit idxd_exit_module(void) +{ + pci_unregister_driver(&idxd_pci_driver); + idxd_cdev_remove(); + idxd_unregister_bus_type(); +} +module_exit(idxd_exit_module); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c new file mode 100644 index 000000000000..d6fcd2e60103 --- /dev/null +++ b/drivers/dma/idxd/irq.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +void idxd_device_wqs_clear_state(struct idxd_device *idxd) +{ + int i; + + lockdep_assert_held(&idxd->dev_lock); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->state = IDXD_WQ_DISABLED; + } +} + +static int idxd_restart(struct idxd_device *idxd) +{ + int i, rc; + + lockdep_assert_held(&idxd->dev_lock); + + rc = __idxd_device_reset(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_config(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_enable(idxd); + if (rc < 0) + goto out; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_ENABLED) { + rc = idxd_wq_enable(wq); + if (rc < 0) { + dev_warn(&idxd->pdev->dev, + "Unable to re-enable wq %s\n", + dev_name(&wq->conf_dev)); + } + } + } + + return 0; + + out: + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + return rc; +} + +irqreturn_t idxd_irq_handler(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + + idxd_mask_msix_vector(idxd, irq_entry->id); + return IRQ_WAKE_THREAD; +} + +irqreturn_t idxd_misc_thread(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + struct device *dev = &idxd->pdev->dev; + union gensts_reg gensts; + u32 cause, val = 0; + int i, rc; + bool err = false; + + cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); + + if (cause & IDXD_INTC_ERR) { + spin_lock_bh(&idxd->dev_lock); + for (i = 0; i < 4; i++) + idxd->sw_err.bits[i] = ioread64(idxd->reg_base + + IDXD_SWERR_OFFSET + i * sizeof(u64)); + iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); + + if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { + int id = idxd->sw_err.wq_idx; + struct idxd_wq *wq = &idxd->wqs[id]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } else { + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } + } + + spin_unlock_bh(&idxd->dev_lock); + val |= IDXD_INTC_ERR; + + for (i = 0; i < 4; i++) + dev_warn(dev, "err[%d]: %#16.16llx\n", + i, idxd->sw_err.bits[i]); + err = true; + } + + if (cause & IDXD_INTC_CMD) { + /* Driver does use command interrupts */ + val |= IDXD_INTC_CMD; + } + + if (cause & IDXD_INTC_OCCUPY) { + /* Driver does not utilize occupancy interrupt */ + val |= IDXD_INTC_OCCUPY; + } + + if (cause & IDXD_INTC_PERFMON_OVFL) { + /* + * Driver does not utilize perfmon counter overflow interrupt + * yet. + */ + val |= IDXD_INTC_PERFMON_OVFL; + } + + val ^= cause; + if (val) + dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", + val); + + iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); + if (!err) + return IRQ_HANDLED; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + if (gensts.state == IDXD_DEVICE_STATE_HALT) { + spin_lock_bh(&idxd->dev_lock); + if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { + rc = idxd_restart(idxd); + if (rc < 0) + dev_err(&idxd->pdev->dev, + "idxd restart failed, device halt."); + } else { + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + dev_err(&idxd->pdev->dev, + "idxd halted, need %s.\n", + gensts.reset_type == IDXD_DEVICE_RESET_FLR ? + "FLR" : "system reset"); + } + spin_unlock_bh(&idxd->dev_lock); + } + + idxd_unmask_msix_vector(idxd, irq_entry->id); + return IRQ_HANDLED; +} + +static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct idxd_desc *desc, *t; + struct llist_node *head; + int queued = 0; + + head = llist_del_all(&irq_entry->pending_llist); + if (!head) + return 0; + + llist_for_each_entry_safe(desc, t, head, llnode) { + if (desc->completion->status) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + list_add_tail(&desc->list, &irq_entry->work_list); + queued++; + } + } + + return queued; +} + +static int irq_process_work_list(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct list_head *node, *next; + int queued = 0; + + if (list_empty(&irq_entry->work_list)) + return 0; + + list_for_each_safe(node, next, &irq_entry->work_list) { + struct idxd_desc *desc = + container_of(node, struct idxd_desc, list); + + if (desc->completion->status) { + list_del(&desc->list); + /* process and callback */ + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + queued++; + } + } + + return queued; +} + +irqreturn_t idxd_wq_thread(int irq, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + int rc, processed = 0, retry = 0; + + /* + * There are two lists we are processing. The pending_llist is where + * submmiter adds all the submitted descriptor after sending it to + * the workqueue. It's a lockless singly linked list. The work_list + * is the common linux double linked list. We are in a scenario of + * multiple producers and a single consumer. The producers are all + * the kernel submitters of descriptors, and the consumer is the + * kernel irq handler thread for the msix vector when using threaded + * irq. To work with the restrictions of llist to remain lockless, + * we are doing the following steps: + * 1. Iterate through the work_list and process any completed + * descriptor. Delete the completed entries during iteration. + * 2. llist_del_all() from the pending list. + * 3. Iterate through the llist that was deleted from the pending list + * and process the completed entries. + * 4. If the entry is still waiting on hardware, list_add_tail() to + * the work_list. + * 5. Repeat until no more descriptors. + */ + do { + rc = irq_process_work_list(irq_entry, &processed); + if (rc != 0) { + retry++; + continue; + } + + rc = irq_process_pending_llist(irq_entry, &processed); + } while (rc != 0 && retry != 10); + + idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); + + if (processed == 0) + return IRQ_NONE; + + return IRQ_HANDLED; +} diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h new file mode 100644 index 000000000000..a39e7ae6b3d9 --- /dev/null +++ b/drivers/dma/idxd/registers.h @@ -0,0 +1,336 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_REGISTERS_H_ +#define _IDXD_REGISTERS_H_ + +/* PCI Config */ +#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 + +#define IDXD_MMIO_BAR 0 +#define IDXD_WQ_BAR 2 +#define IDXD_PORTAL_SIZE 0x4000 + +/* MMIO Device BAR0 Registers */ +#define IDXD_VER_OFFSET 0x00 +#define IDXD_VER_MAJOR_MASK 0xf0 +#define IDXD_VER_MINOR_MASK 0x0f +#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4) +#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK) + +union gen_cap_reg { + struct { + u64 block_on_fault:1; + u64 overlap_copy:1; + u64 cache_control_mem:1; + u64 cache_control_cache:1; + u64 rsvd:3; + u64 int_handle_req:1; + u64 dest_readback:1; + u64 drain_readback:1; + u64 rsvd2:6; + u64 max_xfer_shift:5; + u64 max_batch_shift:4; + u64 max_ims_mult:6; + u64 config_en:1; + u64 max_descs_per_engine:8; + u64 rsvd3:24; + }; + u64 bits; +} __packed; +#define IDXD_GENCAP_OFFSET 0x10 + +union wq_cap_reg { + struct { + u64 total_wq_size:16; + u64 num_wqs:8; + u64 rsvd:24; + u64 shared_mode:1; + u64 dedicated_mode:1; + u64 rsvd2:1; + u64 priority:1; + u64 occupancy:1; + u64 occupancy_int:1; + u64 rsvd3:10; + }; + u64 bits; +} __packed; +#define IDXD_WQCAP_OFFSET 0x20 + +union group_cap_reg { + struct { + u64 num_groups:8; + u64 total_tokens:8; + u64 token_en:1; + u64 token_limit:1; + u64 rsvd:46; + }; + u64 bits; +} __packed; +#define IDXD_GRPCAP_OFFSET 0x30 + +union engine_cap_reg { + struct { + u64 num_engines:8; + u64 rsvd:56; + }; + u64 bits; +} __packed; + +#define IDXD_ENGCAP_OFFSET 0x38 + +#define IDXD_OPCAP_NOOP 0x0001 +#define IDXD_OPCAP_BATCH 0x0002 +#define IDXD_OPCAP_MEMMOVE 0x0008 +struct opcap { + u64 bits[4]; +}; + +#define IDXD_OPCAP_OFFSET 0x40 + +#define IDXD_TABLE_OFFSET 0x60 +union offsets_reg { + struct { + u64 grpcfg:16; + u64 wqcfg:16; + u64 msix_perm:16; + u64 ims:16; + u64 perfmon:16; + u64 rsvd:48; + }; + u64 bits[2]; +} __packed; + +#define IDXD_GENCFG_OFFSET 0x80 +union gencfg_reg { + struct { + u32 token_limit:8; + u32 rsvd:4; + u32 user_int_en:1; + u32 rsvd2:19; + }; + u32 bits; +} __packed; + +#define IDXD_GENCTRL_OFFSET 0x88 +union genctrl_reg { + struct { + u32 softerr_int_en:1; + u32 rsvd:31; + }; + u32 bits; +} __packed; + +#define IDXD_GENSTATS_OFFSET 0x90 +union gensts_reg { + struct { + u32 state:2; + u32 reset_type:2; + u32 rsvd:28; + }; + u32 bits; +} __packed; + +enum idxd_device_status_state { + IDXD_DEVICE_STATE_DISABLED = 0, + IDXD_DEVICE_STATE_ENABLED, + IDXD_DEVICE_STATE_DRAIN, + IDXD_DEVICE_STATE_HALT, +}; + +enum idxd_device_reset_type { + IDXD_DEVICE_RESET_SOFTWARE = 0, + IDXD_DEVICE_RESET_FLR, + IDXD_DEVICE_RESET_WARM, + IDXD_DEVICE_RESET_COLD, +}; + +#define IDXD_INTCAUSE_OFFSET 0x98 +#define IDXD_INTC_ERR 0x01 +#define IDXD_INTC_CMD 0x02 +#define IDXD_INTC_OCCUPY 0x04 +#define IDXD_INTC_PERFMON_OVFL 0x08 + +#define IDXD_CMD_OFFSET 0xa0 +union idxd_command_reg { + struct { + u32 operand:20; + u32 cmd:5; + u32 rsvd:6; + u32 int_req:1; + }; + u32 bits; +} __packed; + +enum idxd_cmd { + IDXD_CMD_ENABLE_DEVICE = 1, + IDXD_CMD_DISABLE_DEVICE, + IDXD_CMD_DRAIN_ALL, + IDXD_CMD_ABORT_ALL, + IDXD_CMD_RESET_DEVICE, + IDXD_CMD_ENABLE_WQ, + IDXD_CMD_DISABLE_WQ, + IDXD_CMD_DRAIN_WQ, + IDXD_CMD_ABORT_WQ, + IDXD_CMD_RESET_WQ, + IDXD_CMD_DRAIN_PASID, + IDXD_CMD_ABORT_PASID, + IDXD_CMD_REQUEST_INT_HANDLE, +}; + +#define IDXD_CMDSTS_OFFSET 0xa8 +union cmdsts_reg { + struct { + u8 err; + u16 result; + u8 rsvd:7; + u8 active:1; + }; + u32 bits; +} __packed; +#define IDXD_CMDSTS_ACTIVE 0x80000000 + +enum idxd_cmdsts_err { + IDXD_CMDSTS_SUCCESS = 0, + IDXD_CMDSTS_INVAL_CMD, + IDXD_CMDSTS_INVAL_WQIDX, + IDXD_CMDSTS_HW_ERR, + /* enable device errors */ + IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10, + IDXD_CMDSTS_ERR_CONFIG, + IDXD_CMDSTS_ERR_BUSMASTER_EN, + IDXD_CMDSTS_ERR_PASID_INVAL, + IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE, + IDXD_CMDSTS_ERR_GRP_CONFIG, + IDXD_CMDSTS_ERR_GRP_CONFIG2, + IDXD_CMDSTS_ERR_GRP_CONFIG3, + IDXD_CMDSTS_ERR_GRP_CONFIG4, + /* enable wq errors */ + IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20, + IDXD_CMDSTS_ERR_WQ_ENABLED, + IDXD_CMDSTS_ERR_WQ_SIZE, + IDXD_CMDSTS_ERR_WQ_PRIOR, + IDXD_CMDSTS_ERR_WQ_MODE, + IDXD_CMDSTS_ERR_BOF_EN, + IDXD_CMDSTS_ERR_PASID_EN, + IDXD_CMDSTS_ERR_MAX_BATCH_SIZE, + IDXD_CMDSTS_ERR_MAX_XFER_SIZE, + /* disable device errors */ + IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31, + /* disable WQ, drain WQ, abort WQ, reset WQ */ + IDXD_CMDSTS_ERR_DEV_NOT_EN, + /* request interrupt handle */ + IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41, + IDXD_CMDSTS_ERR_NO_HANDLE, +}; + +#define IDXD_SWERR_OFFSET 0xc0 +#define IDXD_SWERR_VALID 0x00000001 +#define IDXD_SWERR_OVERFLOW 0x00000002 +#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW) +union sw_err_reg { + struct { + u64 valid:1; + u64 overflow:1; + u64 desc_valid:1; + u64 wq_idx_valid:1; + u64 batch:1; + u64 fault_rw:1; + u64 priv:1; + u64 rsvd:1; + u64 error:8; + u64 wq_idx:8; + u64 rsvd2:8; + u64 operation:8; + u64 pasid:20; + u64 rsvd3:4; + + u64 batch_idx:16; + u64 rsvd4:16; + u64 invalid_flags:32; + + u64 fault_addr; + + u64 rsvd5; + }; + u64 bits[4]; +} __packed; + +union msix_perm { + struct { + u32 rsvd:2; + u32 ignore:1; + u32 pasid_en:1; + u32 rsvd2:8; + u32 pasid:20; + }; + u32 bits; +} __packed; + +union group_flags { + struct { + u32 tc_a:3; + u32 tc_b:3; + u32 rsvd:1; + u32 use_token_limit:1; + u32 tokens_reserved:8; + u32 rsvd2:4; + u32 tokens_allowed:8; + u32 rsvd3:4; + }; + u32 bits; +} __packed; + +struct grpcfg { + u64 wqs[4]; + u64 engines; + union group_flags flags; +} __packed; + +union wqcfg { + struct { + /* bytes 0-3 */ + u16 wq_size; + u16 rsvd; + + /* bytes 4-7 */ + u16 wq_thresh; + u16 rsvd1; + + /* bytes 8-11 */ + u32 mode:1; /* shared or dedicated */ + u32 bof:1; /* block on fault */ + u32 rsvd2:2; + u32 priority:4; + u32 pasid:20; + u32 pasid_en:1; + u32 priv:1; + u32 rsvd3:2; + + /* bytes 12-15 */ + u32 max_xfer_shift:5; + u32 max_batch_shift:4; + u32 rsvd4:23; + + /* bytes 16-19 */ + u16 occupancy_inth; + u16 occupancy_table_sel:1; + u16 rsvd5:15; + + /* bytes 20-23 */ + u16 occupancy_limit; + u16 occupancy_int_en:1; + u16 rsvd6:15; + + /* bytes 24-27 */ + u16 occupancy; + u16 occupancy_int:1; + u16 rsvd7:12; + u16 mode_support:1; + u16 wq_state:2; + + /* bytes 28-31 */ + u32 rsvd8; + }; + u32 bits[8]; +} __packed; +#endif diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c new file mode 100644 index 000000000000..45a0c5869a0a --- /dev/null +++ b/drivers/dma/idxd/submit.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <uapi/linux/idxd.h> +#include "idxd.h" +#include "registers.h" + +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) +{ + struct idxd_desc *desc; + int idx; + struct idxd_device *idxd = wq->idxd; + + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + + if (optype == IDXD_OP_BLOCK) + percpu_down_read(&wq->submit_lock); + else if (!percpu_down_read_trylock(&wq->submit_lock)) + return ERR_PTR(-EBUSY); + + if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) { + int rc; + + if (optype == IDXD_OP_NONBLOCK) { + percpu_up_read(&wq->submit_lock); + return ERR_PTR(-EAGAIN); + } + + percpu_up_read(&wq->submit_lock); + percpu_down_write(&wq->submit_lock); + rc = wait_event_interruptible(wq->submit_waitq, + atomic_add_unless(&wq->dq_count, + 1, wq->size) || + idxd->state != IDXD_DEV_ENABLED); + percpu_up_write(&wq->submit_lock); + if (rc < 0) + return ERR_PTR(-EINTR); + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + } else { + percpu_up_read(&wq->submit_lock); + } + + idx = sbitmap_get(&wq->sbmap, 0, false); + if (idx < 0) { + atomic_dec(&wq->dq_count); + return ERR_PTR(-EAGAIN); + } + + desc = wq->descs[idx]; + memset(desc->hw, 0, sizeof(struct dsa_hw_desc)); + memset(desc->completion, 0, sizeof(struct dsa_completion_record)); + return desc; +} + +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + atomic_dec(&wq->dq_count); + + sbitmap_clear_bit(&wq->sbmap, desc->id); + wake_up(&wq->submit_waitq); +} + +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + struct idxd_device *idxd = wq->idxd; + int vec = desc->hw->int_handle; + void __iomem *portal; + + if (idxd->state != IDXD_DEV_ENABLED) + return -EIO; + + portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED); + /* + * The wmb() flushes writes to coherent DMA data before possibly + * triggering a DMA read. The wmb() is necessary even on UP because + * the recipient is a device. + */ + wmb(); + iosubmit_cmds512(portal, desc->hw, 1); + + /* + * Pending the descriptor to the lockless list for the irq_entry + * that we designated the descriptor to. + */ + if (desc->hw->flags & IDXD_OP_FLAG_RCI) + llist_add(&desc->llnode, + &idxd->irq_entries[vec].pending_llist); + + return 0; +} diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c new file mode 100644 index 000000000000..849c50ab939a --- /dev/null +++ b/drivers/dma/idxd/sysfs.c @@ -0,0 +1,1528 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +static char *idxd_wq_type_names[] = { + [IDXD_WQT_NONE] = "none", + [IDXD_WQT_KERNEL] = "kernel", + [IDXD_WQT_USER] = "user", +}; + +static void idxd_conf_device_release(struct device *dev) +{ + dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); +} + +static struct device_type idxd_group_device_type = { + .name = "group", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_wq_device_type = { + .name = "wq", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_engine_device_type = { + .name = "engine", + .release = idxd_conf_device_release, +}; + +static struct device_type dsa_device_type = { + .name = "dsa", + .release = idxd_conf_device_release, +}; + +static inline bool is_dsa_dev(struct device *dev) +{ + return dev ? dev->type == &dsa_device_type : false; +} + +static inline bool is_idxd_dev(struct device *dev) +{ + return is_dsa_dev(dev); +} + +static inline bool is_idxd_wq_dev(struct device *dev) +{ + return dev ? dev->type == &idxd_wq_device_type : false; +} + +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) +{ + if (wq->type == IDXD_WQT_KERNEL && + strcmp(wq->name, "dmaengine") == 0) + return true; + return false; +} + +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) +{ + return wq->type == IDXD_WQT_USER ? true : false; +} + +static int idxd_config_bus_match(struct device *dev, + struct device_driver *drv) +{ + int matched = 0; + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) + return 0; + matched = 1; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + if (idxd->state < IDXD_DEV_CONF_READY) + return 0; + + if (wq->state != IDXD_WQ_DISABLED) { + dev_dbg(dev, "%s not disabled\n", dev_name(dev)); + return 0; + } + matched = 1; + } + + if (matched) + dev_dbg(dev, "%s matched\n", dev_name(dev)); + + return matched; +} + +static int idxd_config_bus_probe(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called\n", __func__); + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) { + dev_warn(dev, "Device not ready for config\n"); + return -EBUSY; + } + + if (!try_module_get(THIS_MODULE)) + return -ENXIO; + + spin_lock_irqsave(&idxd->dev_lock, flags); + + /* Perform IDXD configuration and enabling */ + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device config failed: %d\n", rc); + return rc; + } + + /* start device */ + rc = idxd_device_enable(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device enable failed: %d\n", rc); + return rc; + } + + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_info(dev, "Device %s enabled\n", dev_name(dev)); + + rc = idxd_register_dma_device(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_dbg(dev, "Failed to register dmaengine device\n"); + return rc; + } + return 0; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + mutex_lock(&wq->wq_lock); + + if (idxd->state != IDXD_DEV_ENABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Enabling while device not enabled.\n"); + return -EPERM; + } + + if (wq->state != IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d already enabled.\n", wq->id); + return -EBUSY; + } + + if (!wq->group) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ not attached to group.\n"); + return -EINVAL; + } + + if (strlen(wq->name) == 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ name not set.\n"); + return -EINVAL; + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ resource alloc failed\n"); + return rc; + } + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Writing WQ %d config failed: %d\n", + wq->id, rc); + return rc; + } + + rc = idxd_wq_enable(wq); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d enabling failed: %d\n", + wq->id, rc); + return rc; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + rc = idxd_wq_map_portal(wq); + if (rc < 0) { + dev_warn(dev, "wq portal mapping failed: %d\n", rc); + rc = idxd_wq_disable(wq); + if (rc < 0) + dev_warn(dev, "IDXD wq disable failed\n"); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + return rc; + } + + wq->client_count = 0; + + dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); + + if (is_idxd_wq_dmaengine(wq)) { + rc = idxd_register_dma_channel(wq); + if (rc < 0) { + dev_dbg(dev, "DMA channel register failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } else if (is_idxd_wq_cdev(wq)) { + rc = idxd_wq_add_cdev(wq); + if (rc < 0) { + dev_dbg(dev, "Cdev creation failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } + + mutex_unlock(&wq->wq_lock); + return 0; + } + + return -ENODEV; +} + +static void disable_wq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + unsigned long flags; + int rc; + + mutex_lock(&wq->wq_lock); + dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); + if (wq->state == IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + return; + } + + if (is_idxd_wq_dmaengine(wq)) + idxd_unregister_dma_channel(wq); + else if (is_idxd_wq_cdev(wq)) + idxd_wq_del_cdev(wq); + + if (idxd_wq_refcount(wq)) + dev_warn(dev, "Clients has claim on wq %d: %d\n", + wq->id, idxd_wq_refcount(wq)); + + idxd_wq_unmap_portal(wq); + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_wq_disable(wq); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + idxd_wq_free_resources(wq); + wq->client_count = 0; + mutex_unlock(&wq->wq_lock); + + if (rc < 0) + dev_warn(dev, "Failed to disable %s: %d\n", + dev_name(&wq->conf_dev), rc); + else + dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); +} + +static int idxd_config_bus_remove(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); + + /* disable workqueue here */ + if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + + disable_wq(wq); + } else if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + int i; + + dev_dbg(dev, "%s removing dev %s\n", __func__, + dev_name(&idxd->conf_dev)); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_DISABLED) + continue; + dev_warn(dev, "Active wq %d on disable %s.\n", i, + dev_name(&idxd->conf_dev)); + device_release_driver(&wq->conf_dev); + } + + idxd_unregister_dma_device(idxd); + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); + if (rc < 0) + dev_warn(dev, "Device disable failed\n"); + else + dev_info(dev, "Device %s disabled\n", dev_name(dev)); + + } + + return 0; +} + +static void idxd_config_bus_shutdown(struct device *dev) +{ + dev_dbg(dev, "%s called\n", __func__); +} + +struct bus_type dsa_bus_type = { + .name = "dsa", + .match = idxd_config_bus_match, + .probe = idxd_config_bus_probe, + .remove = idxd_config_bus_remove, + .shutdown = idxd_config_bus_shutdown, +}; + +static struct bus_type *idxd_bus_types[] = { + &dsa_bus_type +}; + +static struct idxd_device_driver dsa_drv = { + .drv = { + .name = "dsa", + .bus = &dsa_bus_type, + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + }, +}; + +static struct idxd_device_driver *idxd_drvs[] = { + &dsa_drv +}; + +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) +{ + return idxd_bus_types[idxd->type]; +} + +static struct device_type *idxd_get_device_type(struct idxd_device *idxd) +{ + if (idxd->type == IDXD_TYPE_DSA) + return &dsa_device_type; + else + return NULL; +} + +/* IDXD generic driver setup */ +int idxd_register_driver(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = driver_register(&idxd_drvs[i]->drv); + if (rc < 0) + goto drv_fail; + } + + return 0; + +drv_fail: + for (; i > 0; i--) + driver_unregister(&idxd_drvs[i]->drv); + return rc; +} + +void idxd_unregister_driver(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + driver_unregister(&idxd_drvs[i]->drv); +} + +/* IDXD engine attributes */ +static ssize_t engine_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + + if (engine->group) + return sprintf(buf, "%d\n", engine->group->id); + else + return sprintf(buf, "%d\n", -1); +} + +static ssize_t engine_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + struct idxd_device *idxd = engine->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (engine->group) { + engine->group->num_engines--; + engine->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = engine->group; + + if (prevg) + prevg->num_engines--; + engine->group = &idxd->groups[id]; + engine->group->num_engines++; + + return count; +} + +static struct device_attribute dev_attr_engine_group = + __ATTR(group_id, 0644, engine_group_id_show, + engine_group_id_store); + +static struct attribute *idxd_engine_attributes[] = { + &dev_attr_engine_group.attr, + NULL, +}; + +static const struct attribute_group idxd_engine_attribute_group = { + .attrs = idxd_engine_attributes, +}; + +static const struct attribute_group *idxd_engine_attribute_groups[] = { + &idxd_engine_attribute_group, + NULL, +}; + +/* Group attributes */ + +static void idxd_set_free_tokens(struct idxd_device *idxd) +{ + int i, tokens; + + for (i = 0, tokens = 0; i < idxd->max_groups; i++) { + struct idxd_group *g = &idxd->groups[i]; + + tokens += g->tokens_reserved; + } + + idxd->nr_tokens = idxd->max_tokens - tokens; +} + +static ssize_t group_tokens_reserved_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_reserved); +} + +static ssize_t group_tokens_reserved_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + if (val > idxd->max_tokens) + return -EINVAL; + + if (val > idxd->nr_tokens) + return -EINVAL; + + group->tokens_reserved = val; + idxd_set_free_tokens(idxd); + return count; +} + +static struct device_attribute dev_attr_group_tokens_reserved = + __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, + group_tokens_reserved_store); + +static ssize_t group_tokens_allowed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_allowed); +} + +static ssize_t group_tokens_allowed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + if (val < 4 * group->num_engines || + val > group->tokens_reserved + idxd->nr_tokens) + return -EINVAL; + + group->tokens_allowed = val; + return count; +} + +static struct device_attribute dev_attr_group_tokens_allowed = + __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, + group_tokens_allowed_store); + +static ssize_t group_use_token_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->use_token_limit); +} + +static ssize_t group_use_token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + group->use_token_limit = !!val; + return count; +} + +static struct device_attribute dev_attr_group_use_token_limit = + __ATTR(use_token_limit, 0644, group_use_token_limit_show, + group_use_token_limit_store); + +static ssize_t group_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + if (!engine->group) + continue; + + if (engine->group->id == group->id) + rc += sprintf(tmp + rc, "engine%d.%d ", + idxd->id, engine->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_engines = + __ATTR(engines, 0444, group_engines_show, NULL); + +static ssize_t group_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (!wq->group) + continue; + + if (wq->group->id == group->id) + rc += sprintf(tmp + rc, "wq%d.%d ", + idxd->id, wq->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_work_queues = + __ATTR(work_queues, 0444, group_work_queues_show, NULL); + +static ssize_t group_traffic_class_a_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_a); +} + +static ssize_t group_traffic_class_a_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_a = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_a = + __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, + group_traffic_class_a_store); + +static ssize_t group_traffic_class_b_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_b); +} + +static ssize_t group_traffic_class_b_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_b = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_b = + __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, + group_traffic_class_b_store); + +static struct attribute *idxd_group_attributes[] = { + &dev_attr_group_work_queues.attr, + &dev_attr_group_engines.attr, + &dev_attr_group_use_token_limit.attr, + &dev_attr_group_tokens_allowed.attr, + &dev_attr_group_tokens_reserved.attr, + &dev_attr_group_traffic_class_a.attr, + &dev_attr_group_traffic_class_b.attr, + NULL, +}; + +static const struct attribute_group idxd_group_attribute_group = { + .attrs = idxd_group_attributes, +}; + +static const struct attribute_group *idxd_group_attribute_groups[] = { + &idxd_group_attribute_group, + NULL, +}; + +/* IDXD work queue attribs */ +static ssize_t wq_clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->client_count); +} + +static struct device_attribute dev_attr_wq_clients = + __ATTR(clients, 0444, wq_clients_show, NULL); + +static ssize_t wq_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->state) { + case IDXD_WQ_DISABLED: + return sprintf(buf, "disabled\n"); + case IDXD_WQ_ENABLED: + return sprintf(buf, "enabled\n"); + } + + return sprintf(buf, "unknown\n"); +} + +static struct device_attribute dev_attr_wq_state = + __ATTR(state, 0444, wq_state_show, NULL); + +static ssize_t wq_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->group) + return sprintf(buf, "%u\n", wq->group->id); + else + return sprintf(buf, "-1\n"); +} + +static ssize_t wq_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (wq->group) { + wq->group->num_wqs--; + wq->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = wq->group; + + if (prevg) + prevg->num_wqs--; + wq->group = group; + group->num_wqs++; + return count; +} + +static struct device_attribute dev_attr_wq_group_id = + __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); + +static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", + wq_dedicated(wq) ? "dedicated" : "shared"); +} + +static ssize_t wq_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (sysfs_streq(buf, "dedicated")) { + set_bit(WQ_FLAG_DEDICATED, &wq->flags); + wq->threshold = 0; + } else { + return -EINVAL; + } + + return count; +} + +static struct device_attribute dev_attr_wq_mode = + __ATTR(mode, 0644, wq_mode_show, wq_mode_store); + +static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->size); +} + +static ssize_t wq_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long size; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &size); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (size > idxd->max_wq_size) + return -EINVAL; + + wq->size = size; + return count; +} + +static struct device_attribute dev_attr_wq_size = + __ATTR(size, 0644, wq_size_show, wq_size_store); + +static ssize_t wq_priority_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->priority); +} + +static ssize_t wq_priority_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long prio; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &prio); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (prio > IDXD_MAX_PRIORITY) + return -EINVAL; + + wq->priority = prio; + return count; +} + +static struct device_attribute dev_attr_wq_priority = + __ATTR(priority, 0644, wq_priority_show, wq_priority_store); + +static ssize_t wq_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->type) { + case IDXD_WQT_KERNEL: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_KERNEL]); + case IDXD_WQT_USER: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_USER]); + case IDXD_WQT_NONE: + default: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_NONE]); + } + + return -EINVAL; +} + +static ssize_t wq_type_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + enum idxd_wq_type old_type; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + old_type = wq->type; + if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) + wq->type = IDXD_WQT_KERNEL; + else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) + wq->type = IDXD_WQT_USER; + else + wq->type = IDXD_WQT_NONE; + + /* If we are changing queue type, clear the name */ + if (wq->type != old_type) + memset(wq->name, 0, WQ_NAME_SIZE + 1); + + return count; +} + +static struct device_attribute dev_attr_wq_type = + __ATTR(type, 0644, wq_type_show, wq_type_store); + +static ssize_t wq_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", wq->name); +} + +static ssize_t wq_name_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) + return -EINVAL; + + memset(wq->name, 0, WQ_NAME_SIZE + 1); + strncpy(wq->name, buf, WQ_NAME_SIZE); + strreplace(wq->name, '\n', '\0'); + return count; +} + +static struct device_attribute dev_attr_wq_name = + __ATTR(name, 0644, wq_name_show, wq_name_store); + +static ssize_t wq_cdev_minor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->idxd_cdev.minor); +} + +static struct device_attribute dev_attr_wq_cdev_minor = + __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); + +static struct attribute *idxd_wq_attributes[] = { + &dev_attr_wq_clients.attr, + &dev_attr_wq_state.attr, + &dev_attr_wq_group_id.attr, + &dev_attr_wq_mode.attr, + &dev_attr_wq_size.attr, + &dev_attr_wq_priority.attr, + &dev_attr_wq_type.attr, + &dev_attr_wq_name.attr, + &dev_attr_wq_cdev_minor.attr, + NULL, +}; + +static const struct attribute_group idxd_wq_attribute_group = { + .attrs = idxd_wq_attributes, +}; + +static const struct attribute_group *idxd_wq_attribute_groups[] = { + &idxd_wq_attribute_group, + NULL, +}; + +/* IDXD device attribs */ +static ssize_t max_work_queues_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wq_size); +} +static DEVICE_ATTR_RO(max_work_queues_size); + +static ssize_t max_groups_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_groups); +} +static DEVICE_ATTR_RO(max_groups); + +static ssize_t max_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wqs); +} +static DEVICE_ATTR_RO(max_work_queues); + +static ssize_t max_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_engines); +} +static DEVICE_ATTR_RO(max_engines); + +static ssize_t numa_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); +} +static DEVICE_ATTR_RO(numa_node); + +static ssize_t max_batch_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_batch_size); +} +static DEVICE_ATTR_RO(max_batch_size); + +static ssize_t max_transfer_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); +} +static DEVICE_ATTR_RO(max_transfer_size); + +static ssize_t op_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); +} +static DEVICE_ATTR_RO(op_cap); + +static ssize_t configurable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", + test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); +} +static DEVICE_ATTR_RO(configurable); + +static ssize_t clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long flags; + int count = 0, i; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + count += wq->client_count; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return sprintf(buf, "%d\n", count); +} +static DEVICE_ATTR_RO(clients); + +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + switch (idxd->state) { + case IDXD_DEV_DISABLED: + case IDXD_DEV_CONF_READY: + return sprintf(buf, "disabled\n"); + case IDXD_DEV_ENABLED: + return sprintf(buf, "enabled\n"); + case IDXD_DEV_HALTED: + return sprintf(buf, "halted\n"); + } + + return sprintf(buf, "unknown\n"); +} +static DEVICE_ATTR_RO(state); + +static ssize_t errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + int i, out = 0; + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < 4; i++) + out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + out--; + out += sprintf(buf + out, "\n"); + return out; +} +static DEVICE_ATTR_RO(errors); + +static ssize_t max_tokens_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_tokens); +} +static DEVICE_ATTR_RO(max_tokens); + +static ssize_t token_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->token_limit); +} + +static ssize_t token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (!idxd->hw.group_cap.token_limit) + return -EPERM; + + if (val > idxd->hw.group_cap.total_tokens) + return -EINVAL; + + idxd->token_limit = val; + return count; +} +static DEVICE_ATTR_RW(token_limit); + +static ssize_t cdev_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->major); +} +static DEVICE_ATTR_RO(cdev_major); + +static struct attribute *idxd_device_attributes[] = { + &dev_attr_max_groups.attr, + &dev_attr_max_work_queues.attr, + &dev_attr_max_work_queues_size.attr, + &dev_attr_max_engines.attr, + &dev_attr_numa_node.attr, + &dev_attr_max_batch_size.attr, + &dev_attr_max_transfer_size.attr, + &dev_attr_op_cap.attr, + &dev_attr_configurable.attr, + &dev_attr_clients.attr, + &dev_attr_state.attr, + &dev_attr_errors.attr, + &dev_attr_max_tokens.attr, + &dev_attr_token_limit.attr, + &dev_attr_cdev_major.attr, + NULL, +}; + +static const struct attribute_group idxd_device_attribute_group = { + .attrs = idxd_device_attributes, +}; + +static const struct attribute_group *idxd_attribute_groups[] = { + &idxd_device_attribute_group, + NULL, +}; + +static int idxd_setup_engine_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + engine->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&engine->conf_dev, "engine%d.%d", + idxd->id, engine->id); + engine->conf_dev.bus = idxd_get_bus_type(idxd); + engine->conf_dev.groups = idxd_engine_attribute_groups; + engine->conf_dev.type = &idxd_engine_device_type; + dev_dbg(dev, "Engine device register: %s\n", + dev_name(&engine->conf_dev)); + rc = device_register(&engine->conf_dev); + if (rc < 0) { + put_device(&engine->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + return rc; +} + +static int idxd_setup_group_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + group->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&group->conf_dev, "group%d.%d", + idxd->id, group->id); + group->conf_dev.bus = idxd_get_bus_type(idxd); + group->conf_dev.groups = idxd_group_attribute_groups; + group->conf_dev.type = &idxd_group_device_type; + dev_dbg(dev, "Group device register: %s\n", + dev_name(&group->conf_dev)); + rc = device_register(&group->conf_dev); + if (rc < 0) { + put_device(&group->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + return rc; +} + +static int idxd_setup_wq_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); + wq->conf_dev.bus = idxd_get_bus_type(idxd); + wq->conf_dev.groups = idxd_wq_attribute_groups; + wq->conf_dev.type = &idxd_wq_device_type; + dev_dbg(dev, "WQ device register: %s\n", + dev_name(&wq->conf_dev)); + rc = device_register(&wq->conf_dev); + if (rc < 0) { + put_device(&wq->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + return rc; +} + +static int idxd_setup_device_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + char devname[IDXD_NAME_SIZE]; + + sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); + idxd->conf_dev.parent = dev; + dev_set_name(&idxd->conf_dev, "%s", devname); + idxd->conf_dev.bus = idxd_get_bus_type(idxd); + idxd->conf_dev.groups = idxd_attribute_groups; + idxd->conf_dev.type = idxd_get_device_type(idxd); + + dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); + rc = device_register(&idxd->conf_dev); + if (rc < 0) { + put_device(&idxd->conf_dev); + return rc; + } + + return 0; +} + +int idxd_setup_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + + rc = idxd_setup_device_sysfs(idxd); + if (rc < 0) { + dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_wq_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_group_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_engine_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); + return rc; + } + + return 0; +} + +void idxd_cleanup_sysfs(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + + device_unregister(&idxd->conf_dev); +} + +int idxd_register_bus_type(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = bus_register(idxd_bus_types[i]); + if (rc < 0) + goto bus_err; + } + + return 0; + +bus_err: + for (; i > 0; i--) + bus_unregister(idxd_bus_types[i]); + return rc; +} + +void idxd_unregister_bus_type(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + bus_unregister(idxd_bus_types[i]); +} diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c27e206a764c..066b21a32232 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac) return; } sdmac->desc = desc = to_sdma_desc(&vd->tx); - /* - * Do not delete the node in desc_issued list in cyclic mode, otherwise - * the desc allocated will never be freed in vchan_dma_desc_free_list - */ - if (!(sdmac->flags & IMX_DMA_SG_LOOP)) - list_del(&vd->node); + + list_del(&vd->node); sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; @@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work) spin_lock_irqsave(&sdmac->vc.lock, flags); vchan_get_all_descriptors(&sdmac->vc, &head); - sdmac->desc = NULL; spin_unlock_irqrestore(&sdmac->vc.lock, flags); vchan_dma_desc_free_list(&sdmac->vc, &head); sdmac->context_loaded = false; } -static int sdma_disable_channel_async(struct dma_chan *chan) +static int sdma_terminate_all(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&sdmac->vc.lock, flags); sdma_disable_channel(chan); - if (sdmac->desc) + if (sdmac->desc) { + vchan_terminate_vdesc(&sdmac->desc->vd); + sdmac->desc = NULL; schedule_work(&sdmac->terminate_worker); + } + + spin_unlock_irqrestore(&sdmac->vc.lock, flags); return 0; } @@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - sdma_disable_channel_async(chan); + sdma_terminate_all(chan); sdma_channel_synchronize(chan); @@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, struct dma_tx_state *txstate) { struct sdma_channel *sdmac = to_sdma_chan(chan); - struct sdma_desc *desc; + struct sdma_desc *desc = NULL; u32 residue; struct virt_dma_desc *vd; enum dma_status ret; @@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, return ret; spin_lock_irqsave(&sdmac->vc.lock, flags); + vd = vchan_find_desc(&sdmac->vc, cookie); - if (vd) { + if (vd) desc = to_sdma_desc(&vd->tx); + else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) + desc = sdmac->desc; + + if (desc) { if (sdmac->flags & IMX_DMA_SG_LOOP) residue = (desc->num_bd - desc->buf_ptail) * desc->period_len - desc->chn_real_count; else residue = desc->chn_count - desc->chn_real_count; - } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { - residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; } else { residue = 0; } + spin_unlock_irqrestore(&sdmac->vc.lock, flags); dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, @@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel_async; + sdma->dma_device.device_terminate_all = sdma_terminate_all; sdma->dma_device.device_synchronize = sdma_channel_synchronize; sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index a6a6dc432db8..60e9afbb896c 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ioat_kobject_del(ioat_dma); dma_async_device_unregister(dma); - - dma_pool_destroy(ioat_dma->completion_pool); - - INIT_LIST_HEAD(&dma->channels); } /** @@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) break; @@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c) return; ioat_stop(ioat_chan); - ioat_reset_hw(ioat_chan); - /* Put LTR to idle */ - if (ioat_dma->version >= IOAT_VER_3_4) - writeb(IOAT_CHAN_LTR_SWSEL_IDLE, - ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); + if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) { + ioat_reset_hw(ioat_chan); + + /* Put LTR to idle */ + if (ioat_dma->version >= IOAT_VER_3_4) + writeb(IOAT_CHAN_LTR_SWSEL_IDLE, + ioat_chan->reg_base + + IOAT_CHAN_LTR_SWSEL_OFFSET); + } spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); @@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = { .err_handler = &ioat_err_handler, }; +static void release_ioatdma(struct dma_device *device) +{ + struct ioatdma_device *d = to_ioatdma_device(device); + int i; + + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(d->idx[i]); + + dma_pool_destroy(d->completion_pool); + kfree(d); +} + static struct ioatdma_device * alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) { - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return NULL; d->pdev = pdev; d->reg_base = iobase; + d->dma_dev.device_release = release_ioatdma; return d; } @@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; + ioat_shutdown(pdev); + dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index c20e6bd4e298..29f1223b285a 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&c->vc.lock, flags); vchan_get_all_descriptors(&c->vc, &head); - vchan_dma_desc_free_list(&c->vc, &head); spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + return 0; } diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index c2d779daa4b5..b2c2b5e8093c 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -15,6 +15,8 @@ #include <linux/of.h> #include <linux/of_dma.h> +#include "dmaengine.h" + static LIST_HEAD(of_dma_list); static DEFINE_MUTEX(of_dma_lock); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 023f951189a7..c683051257fd 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan) } vchan_get_all_descriptors(&vchan->vc, &head); - vchan_dma_desc_free_list(&vchan->vc, &head); spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6cce9ef61b29..88b884cbb7c1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); - pm_runtime_disable(dev); - - if (!pm_runtime_status_suspended(dev)) { - /* amba did not disable the clock */ - amba_pclk_disable(pcdev); - } + pm_runtime_force_suspend(dev); amba_pclk_unprepare(pcdev); return 0; @@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev) if (ret) return ret; - if (!pm_runtime_status_suspended(dev)) - ret = amba_pclk_enable(pcdev); - - pm_runtime_enable(dev); + pm_runtime_force_resume(dev); return ret; } -static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); +static const struct dev_pm_ops pl330_pm = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) +}; static int pl330_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c new file mode 100644 index 000000000000..db4c5fd453a9 --- /dev/null +++ b/drivers/dma/plx_dma.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microsemi Switchtec(tm) PCIe Management Driver + * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com> + * Copyright (c) 2019, GigaIO Networks, Inc + */ + +#include "dmaengine.h" + +#include <linux/circ_buf.h> +#include <linux/dmaengine.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> + +MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Logan Gunthorpe"); + +#define PLX_REG_DESC_RING_ADDR 0x214 +#define PLX_REG_DESC_RING_ADDR_HI 0x218 +#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C +#define PLX_REG_DESC_RING_COUNT 0x220 +#define PLX_REG_DESC_RING_LAST_ADDR 0x224 +#define PLX_REG_DESC_RING_LAST_SIZE 0x228 +#define PLX_REG_PREF_LIMIT 0x234 +#define PLX_REG_CTRL 0x238 +#define PLX_REG_CTRL2 0x23A +#define PLX_REG_INTR_CTRL 0x23C +#define PLX_REG_INTR_STATUS 0x23E + +#define PLX_REG_PREF_LIMIT_PREF_FOUR 8 + +#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0) +#define PLX_REG_CTRL_ABORT BIT(1) +#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2) +#define PLX_REG_CTRL_START BIT(3) +#define PLX_REG_CTRL_RING_STOP_MODE BIT(4) +#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5) +#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5) +#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5) +#define PLX_REG_CTRL_DESC_INVALID BIT(8) +#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9) +#define PLX_REG_CTRL_ABORT_DONE BIT(10) +#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12) +#define PLX_REG_CTRL_IN_PROGRESS BIT(30) + +#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \ + PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \ + PLX_REG_CTRL_ABORT_DONE | \ + PLX_REG_CTRL_IMM_PAUSE_DONE) + +#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \ + PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \ + PLX_REG_CTRL_START | \ + PLX_REG_CTRL_RESET_VAL) + +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7 + +#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0) +#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1) +#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3) +#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4) +#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5) + +#define PLX_REG_INTR_STATUS_ERROR BIT(0) +#define PLX_REG_INTR_STATUS_INV_DESC BIT(1) +#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2) +#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3) + +struct plx_dma_hw_std_desc { + __le32 flags_and_size; + __le16 dst_addr_hi; + __le16 src_addr_hi; + __le32 dst_addr_lo; + __le32 src_addr_lo; +}; + +#define PLX_DESC_SIZE_MASK 0x7ffffff +#define PLX_DESC_FLAG_VALID BIT(31) +#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30) + +#define PLX_DESC_WB_SUCCESS BIT(30) +#define PLX_DESC_WB_RD_FAIL BIT(29) +#define PLX_DESC_WB_WR_FAIL BIT(28) + +#define PLX_DMA_RING_COUNT 2048 + +struct plx_dma_desc { + struct dma_async_tx_descriptor txd; + struct plx_dma_hw_std_desc *hw; + u32 orig_size; +}; + +struct plx_dma_dev { + struct dma_device dma_dev; + struct dma_chan dma_chan; + struct pci_dev __rcu *pdev; + void __iomem *bar; + struct tasklet_struct desc_task; + + spinlock_t ring_lock; + bool ring_active; + int head; + int tail; + struct plx_dma_hw_std_desc *hw_ring; + dma_addr_t hw_ring_dma; + struct plx_dma_desc **desc_ring; +}; + +static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c) +{ + return container_of(c, struct plx_dma_dev, dma_chan); +} + +static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct plx_dma_desc, txd); +} + +static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i) +{ + return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)]; +} + +static void plx_dma_process_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + u32 flags; + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size)); + + if (flags & PLX_DESC_FLAG_VALID) + break; + + res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK); + + if (flags & PLX_DESC_WB_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (flags & PLX_DESC_WB_WR_FAIL) + res.result = DMA_TRANS_WRITE_FAILED; + else + res.result = DMA_TRANS_READ_FAILED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + + plx_dma_process_desc(plxdev); + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + res.residue = desc->orig_size; + res.result = DMA_TRANS_ABORTED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void __plx_dma_stop(struct plx_dma_dev *plxdev) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + u32 val; + + val = readl(plxdev->bar + PLX_REG_CTRL); + if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE)) + return; + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + while (!time_after(jiffies, timeout)) { + val = readl(plxdev->bar + PLX_REG_CTRL); + if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE) + break; + + cpu_relax(); + } + + if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)) + dev_err(plxdev->dma_dev.dev, + "Timeout waiting for graceful pause!\n"); + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); +} + +static void plx_dma_stop(struct plx_dma_dev *plxdev) +{ + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + __plx_dma_stop(plxdev); + + rcu_read_unlock(); +} + +static void plx_dma_desc_task(unsigned long data) +{ + struct plx_dma_dev *plxdev = (void *)data; + + plx_dma_process_desc(plxdev); +} + +static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c, + dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, + unsigned long flags) + __acquires(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c); + struct plx_dma_desc *plxdesc; + + spin_lock_bh(&plxdev->ring_lock); + if (!plxdev->ring_active) + goto err_unlock; + + if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT)) + goto err_unlock; + + if (len > PLX_DESC_SIZE_MASK) + goto err_unlock; + + plxdesc = plx_dma_get_desc(plxdev, plxdev->head); + plxdev->head++; + + plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst)); + plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst)); + plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src)); + plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src)); + + plxdesc->orig_size = len; + + if (flags & DMA_PREP_INTERRUPT) + len |= PLX_DESC_FLAG_INT_WHEN_DONE; + + plxdesc->hw->flags_and_size = cpu_to_le32(len); + plxdesc->txd.flags = flags; + + /* return with the lock held, it will be released in tx_submit */ + + return &plxdesc->txd; + +err_unlock: + /* + * Keep sparse happy by restoring an even lock count on + * this lock. + */ + __acquire(plxdev->ring_lock); + + spin_unlock_bh(&plxdev->ring_lock); + return NULL; +} + +static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc) + __releases(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan); + struct plx_dma_desc *plxdesc = to_plx_desc(desc); + dma_cookie_t cookie; + + cookie = dma_cookie_assign(desc); + + /* + * Ensure the descriptor updates are visible to the dma device + * before setting the valid bit. + */ + wmb(); + + plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID); + + spin_unlock_bh(&plxdev->ring_lock); + + return cookie; +} + +static enum dma_status plx_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + plx_dma_process_desc(plxdev); + + return dma_cookie_status(chan, cookie, txstate); +} + +static void plx_dma_issue_pending(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + /* + * Ensure the valid bits are visible before starting the + * DMA engine. + */ + wmb(); + + writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL); + + rcu_read_unlock(); +} + +static irqreturn_t plx_dma_isr(int irq, void *devid) +{ + struct plx_dma_dev *plxdev = devid; + u32 status; + + status = readw(plxdev->bar + PLX_REG_INTR_STATUS); + + if (!status) + return IRQ_NONE; + + if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active) + tasklet_schedule(&plxdev->desc_task); + + writew(status, plxdev->bar + PLX_REG_INTR_STATUS); + + return IRQ_HANDLED; +} + +static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev) +{ + struct plx_dma_desc *desc; + int i; + + plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT, + sizeof(*plxdev->desc_ring), GFP_KERNEL); + if (!plxdev->desc_ring) + return -ENOMEM; + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) { + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + goto free_and_exit; + + dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan); + desc->txd.tx_submit = plx_dma_tx_submit; + desc->hw = &plxdev->hw_ring[i]; + + plxdev->desc_ring[i] = desc; + } + + return 0; + +free_and_exit: + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + kfree(plxdev->desc_ring); + return -ENOMEM; +} + +static int plx_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + int rc; + + plxdev->head = plxdev->tail = 0; + plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz, + &plxdev->hw_ring_dma, GFP_KERNEL); + if (!plxdev->hw_ring) + return -ENOMEM; + + rc = plx_dma_alloc_desc(plxdev); + if (rc) + goto out_free_hw_ring; + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + rc = -ENODEV; + goto out_free_hw_ring; + } + + writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(upper_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); + writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT); + + plxdev->ring_active = true; + + rcu_read_unlock(); + + return PLX_DMA_RING_COUNT; + +out_free_hw_ring: + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + return rc; +} + +static void plx_dma_free_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + struct pci_dev *pdev; + int irq = -1; + int i; + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + plx_dma_stop(plxdev); + + rcu_read_lock(); + pdev = rcu_dereference(plxdev->pdev); + if (pdev) + irq = pci_irq_vector(pdev, 0); + rcu_read_unlock(); + + if (irq > 0) + synchronize_irq(irq); + + tasklet_kill(&plxdev->desc_task); + + plx_dma_abort_desc(plxdev); + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + + kfree(plxdev->desc_ring); + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + +} + +static void plx_dma_release(struct dma_device *dma_dev) +{ + struct plx_dma_dev *plxdev = + container_of(dma_dev, struct plx_dma_dev, dma_dev); + + put_device(dma_dev->dev); + kfree(plxdev); +} + +static int plx_dma_create(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev; + struct dma_device *dma; + struct dma_chan *chan; + int rc; + + plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL); + if (!plxdev) + return -ENOMEM; + + rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0, + KBUILD_MODNAME, plxdev); + if (rc) { + kfree(plxdev); + return rc; + } + + spin_lock_init(&plxdev->ring_lock); + tasklet_init(&plxdev->desc_task, plx_dma_desc_task, + (unsigned long)plxdev); + + RCU_INIT_POINTER(plxdev->pdev, pdev); + plxdev->bar = pcim_iomap_table(pdev)[0]; + + dma = &plxdev->dma_dev; + dma->chancnt = 1; + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->copy_align = DMAENGINE_ALIGN_1_BYTE; + dma->dev = get_device(&pdev->dev); + + dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources; + dma->device_free_chan_resources = plx_dma_free_chan_resources; + dma->device_prep_dma_memcpy = plx_dma_prep_memcpy; + dma->device_issue_pending = plx_dma_issue_pending; + dma->device_tx_status = plx_dma_tx_status; + dma->device_release = plx_dma_release; + + chan = &plxdev->dma_chan; + chan->device = dma; + dma_cookie_init(chan); + list_add_tail(&chan->device_node, &dma->channels); + + rc = dma_async_device_register(dma); + if (rc) { + pci_err(pdev, "Failed to register dma device: %d\n", rc); + free_irq(pci_irq_vector(pdev, 0), plxdev); + kfree(plxdev); + return rc; + } + + pci_set_drvdata(pdev, plxdev); + + return 0; +} + +static int plx_dma_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int rc; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME); + if (rc) + return rc; + + rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (rc <= 0) + return rc; + + pci_set_master(pdev); + + rc = plx_dma_create(pdev); + if (rc) + goto err_free_irq_vectors; + + pci_info(pdev, "PLX DMA Channel Registered\n"); + + return 0; + +err_free_irq_vectors: + pci_free_irq_vectors(pdev); + return rc; +} + +static void plx_dma_remove(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev = pci_get_drvdata(pdev); + + free_irq(pci_irq_vector(pdev, 0), plxdev); + + rcu_assign_pointer(plxdev->pdev, NULL); + synchronize_rcu(); + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + __plx_dma_stop(plxdev); + plx_dma_abort_desc(plxdev); + + plxdev->bar = NULL; + dma_async_device_unregister(&plxdev->dma_dev); + + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id plx_dma_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_PLX, + .device = 0x87D0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = PCI_CLASS_SYSTEM_OTHER << 8, + .class_mask = 0xFFFFFFFF, + }, + {0} +}; +MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl); + +static struct pci_driver plx_dma_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = plx_dma_pci_tbl, + .probe = plx_dma_probe, + .remove = plx_dma_remove, +}; +module_pci_driver(plx_dma_pci_driver); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 43da8eeb18ef..8e14c72d03f0 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) s3c24xx_dma_start_next_sg(s3cchan, txd); } -static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, - struct s3c24xx_dma_chan *s3cchan) -{ - LIST_HEAD(head); - - vchan_get_all_descriptors(&s3cchan->vc, &head); - vchan_dma_desc_free_list(&s3cchan->vc, &head); -} - /* * Try to allocate a physical channel. When successful, assign it to * this virtual channel, and initiate the next descriptor. The @@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; + LIST_HEAD(head); unsigned long flags; - int ret = 0; + int ret; spin_lock_irqsave(&s3cchan->vc.lock, flags); @@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) } /* Dequeue jobs not yet fired as well */ - s3c24xx_dma_free_txd_list(s3cdma, s3cchan); + + vchan_get_all_descriptors(&s3cchan->vc, &head); + + spin_unlock_irqrestore(&s3cchan->vc.lock, flags); + + vchan_dma_desc_free_list(&s3cchan->vc, &head); + + return 0; + unlock: spin_unlock_irqrestore(&s3cchan->vc.lock, flags); @@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) /* Basic sanity check */ if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { - dev_err(&pdev->dev, "to many dma channels %d, max %d\n", + dev_err(&pdev->dev, "too many dma channels %d, max %d\n", pdata->num_phy_channels, MAX_DMA_CHANNELS); return -EINVAL; } diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 465256fe8b1f..6d0bec947636 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan) kfree(chan->desc); chan->desc = NULL; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); sf_pdma_disclaim_chan(chan); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); } static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, @@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan) chan->desc = NULL; chan->xfer_err = false; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); return 0; } diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index e397a50058c8..bbc2bda3b902 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dma_addr_t src, dest; u32 endpoints; int nr_periods, offset, plength, i; + u8 ram_type, io_mode, linear_mode; if (!is_slave_direction(dir)) { dev_err(chan2dev(chan), "Invalid DMA direction\n"); return NULL; } - if (vchan->is_dedicated) { - /* - * As we are using this just for audio data, we need to use - * normal DMA. There is nothing stopping us from supporting - * dedicated DMA here as well, so if a client comes up and - * requires it, it will be simple to implement it. - */ - dev_err(chan2dev(chan), - "Cyclic transfers are only supported on Normal DMA\n"); - return NULL; - } - contract = generate_dma_contract(); if (!contract) return NULL; contract->is_cyclic = 1; - /* Figure out the endpoints and the address we need */ + if (vchan->is_dedicated) { + io_mode = SUN4I_DDMA_ADDR_MODE_IO; + linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; + } else { + io_mode = SUN4I_NDMA_ADDR_MODE_IO; + linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; + } + if (dir == DMA_MEM_TO_DEV) { src = buf; dest = sconfig->dst_addr; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type); } else { src = sconfig->src_addr; dest = buf; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); } /* @@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dest = buf + offset; /* Make the promise */ - promise = generate_ndma_promise(chan, src, dest, - plength, sconfig, dir); + if (vchan->is_dedicated) + promise = generate_ddma_promise(chan, src, dest, + plength, sconfig); + else + promise = generate_ndma_promise(chan, src, dest, + plength, sconfig, dir); + if (!promise) { /* TODO: should we free everything? */ return NULL; @@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan) } spin_lock_irqsave(&vchan->vc.lock, flags); - vchan_dma_desc_free_list(&vchan->vc, &head); /* Clear these so the vchan is usable again */ vchan->processing = NULL; vchan->pchan = NULL; spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index d507c24fbf31..f76e06651f80 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -34,5 +34,29 @@ config DMA_OMAP Enable support for the TI sDMA (System DMA or DMA4) controller. This DMA engine is found on OMAP and DRA7xx parts. +config TI_K3_UDMA + bool "Texas Instruments UDMA support" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_PROTOCOL + depends on TI_SCI_INTA_IRQCHIP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select TI_K3_RINGACC + select TI_K3_PSIL + help + Enable support for the TI UDMA (Unified DMA) controller. This + DMA engine is used in AM65x and j721e. + +config TI_K3_UDMA_GLUE_LAYER + bool "Texas Instruments UDMA Glue layer for non DMAengine users" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_K3_UDMA + help + Say y here to support the K3 NAVSS DMA glue interface + If unsure, say N. + +config TI_K3_PSIL + bool + config TI_DMA_CROSSBAR bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index 113e59ec9c32..9a29a107e374 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -2,4 +2,7 @@ obj-$(CONFIG_TI_CPPI41) += cppi41.o obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o +obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o +obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 756a3c951dc7..03a7f647f7b2 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev) if (!info) return -ENODEV; - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync() failed\n"); - return ret; - } - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ecc); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + pm_runtime_disable(dev); + return ret; + } + /* Get eDMA3 configuration from IP */ ret = edma_setup_from_hw(dev, info, ecc); if (ret) - return ret; + goto err_disable_pm; /* Allocate memory based on the information we got from the IP */ ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, sizeof(*ecc->slave_chans), GFP_KERNEL); - if (!ecc->slave_chans) - return -ENOMEM; ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->slot_inuse) - return -ENOMEM; ecc->channels_mask = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_channels), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->channels_mask) - return -ENOMEM; + if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { + ret = -ENOMEM; + goto err_disable_pm; + } /* Mark all channels available initially */ bitmap_fill(ecc->channels_mask, ecc->num_channels); @@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccint = irq; } @@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccerrint = irq; } @@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev) ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(dev, "Can't allocate PaRAM dummy slot\n"); - return ecc->dummy_slot; + ret = ecc->dummy_slot; + goto err_disable_pm; } queue_priority_mapping = info->queue_priority_mapping; @@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev) err_reg1: edma_free_slot(ecc, ecc->dummy_slot); +err_disable_pm: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev) if (ecc->dma_memcpy) dma_async_device_unregister(ecc->dma_memcpy); edma_free_slot(ecc, ecc->dummy_slot); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return 0; } diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c new file mode 100644 index 000000000000..a896a15908cf --- /dev/null +++ b/drivers/dma/ti/k3-psil-am654.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am654_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0x4300), + PSIL_ETHERNET(0x4301), + PSIL_ETHERNET(0x4302), + PSIL_ETHERNET(0x4303), + /* PDMA0 - McASPs */ + PSIL_PDMA_XY_TR(0x4400), + PSIL_PDMA_XY_TR(0x4401), + PSIL_PDMA_XY_TR(0x4402), + /* PDMA1 - SPI0-4 */ + PSIL_PDMA_XY_PKT(0x4500), + PSIL_PDMA_XY_PKT(0x4501), + PSIL_PDMA_XY_PKT(0x4502), + PSIL_PDMA_XY_PKT(0x4503), + PSIL_PDMA_XY_PKT(0x4504), + PSIL_PDMA_XY_PKT(0x4505), + PSIL_PDMA_XY_PKT(0x4506), + PSIL_PDMA_XY_PKT(0x4507), + PSIL_PDMA_XY_PKT(0x4508), + PSIL_PDMA_XY_PKT(0x4509), + PSIL_PDMA_XY_PKT(0x450a), + PSIL_PDMA_XY_PKT(0x450b), + PSIL_PDMA_XY_PKT(0x450c), + PSIL_PDMA_XY_PKT(0x450d), + PSIL_PDMA_XY_PKT(0x450e), + PSIL_PDMA_XY_PKT(0x450f), + PSIL_PDMA_XY_PKT(0x4510), + PSIL_PDMA_XY_PKT(0x4511), + PSIL_PDMA_XY_PKT(0x4512), + PSIL_PDMA_XY_PKT(0x4513), + /* PDMA1 - USART0-2 */ + PSIL_PDMA_XY_PKT(0x4514), + PSIL_PDMA_XY_PKT(0x4515), + PSIL_PDMA_XY_PKT(0x4516), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 - ADCs */ + PSIL_PDMA_XY_TR(0x7100), + PSIL_PDMA_XY_TR(0x7101), + PSIL_PDMA_XY_TR(0x7102), + PSIL_PDMA_XY_TR(0x7103), + /* MCU_PDMA1 - MCU_SPI0-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + PSIL_PDMA_XY_PKT(0x7208), + PSIL_PDMA_XY_PKT(0x7209), + PSIL_PDMA_XY_PKT(0x720a), + PSIL_PDMA_XY_PKT(0x720b), + /* MCU_PDMA1 - MCU_USART0 */ + PSIL_PDMA_XY_PKT(0x7212), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am654_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0xc300), + PSIL_ETHERNET(0xc301), + PSIL_ETHERNET(0xc302), + PSIL_ETHERNET(0xc303), + PSIL_ETHERNET(0xc304), + PSIL_ETHERNET(0xc305), + PSIL_ETHERNET(0xc306), + PSIL_ETHERNET(0xc307), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), +}; + +struct psil_ep_map am654_ep_map = { + .name = "am654", + .src = am654_src_ep_map, + .src_count = ARRAY_SIZE(am654_src_ep_map), + .dst = am654_dst_ep_map, + .dst_count = ARRAY_SIZE(am654_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c new file mode 100644 index 000000000000..e3cfd5f66842 --- /dev/null +++ b/drivers/dma/ti/k3-psil-j721e.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j721e_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */ + PSIL_PDMA_MCASP(0x4400), + PSIL_PDMA_MCASP(0x4401), + PSIL_PDMA_MCASP(0x4402), + /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */ + PSIL_PDMA_MCASP(0x4500), + PSIL_PDMA_MCASP(0x4501), + PSIL_PDMA_MCASP(0x4502), + PSIL_PDMA_MCASP(0x4503), + PSIL_PDMA_MCASP(0x4504), + PSIL_PDMA_MCASP(0x4505), + PSIL_PDMA_MCASP(0x4506), + PSIL_PDMA_MCASP(0x4507), + PSIL_PDMA_MCASP(0x4508), + /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */ + PSIL_PDMA_XY_PKT(0x4600), + PSIL_PDMA_XY_PKT(0x4601), + PSIL_PDMA_XY_PKT(0x4602), + PSIL_PDMA_XY_PKT(0x4603), + PSIL_PDMA_XY_PKT(0x4604), + PSIL_PDMA_XY_PKT(0x4605), + PSIL_PDMA_XY_PKT(0x4606), + PSIL_PDMA_XY_PKT(0x4607), + /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */ + PSIL_PDMA_XY_PKT(0x460c), + PSIL_PDMA_XY_PKT(0x460d), + PSIL_PDMA_XY_PKT(0x460e), + PSIL_PDMA_XY_PKT(0x460f), + PSIL_PDMA_XY_PKT(0x4610), + PSIL_PDMA_XY_PKT(0x4611), + PSIL_PDMA_XY_PKT(0x4612), + PSIL_PDMA_XY_PKT(0x4613), + /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */ + PSIL_PDMA_XY_PKT(0x4618), + PSIL_PDMA_XY_PKT(0x4619), + PSIL_PDMA_XY_PKT(0x461a), + PSIL_PDMA_XY_PKT(0x461b), + PSIL_PDMA_XY_PKT(0x461c), + PSIL_PDMA_XY_PKT(0x461d), + PSIL_PDMA_XY_PKT(0x461e), + PSIL_PDMA_XY_PKT(0x461f), + /* PDMA11 (PDMA_MISC_G3) */ + PSIL_PDMA_XY_PKT(0x4624), + PSIL_PDMA_XY_PKT(0x4625), + PSIL_PDMA_XY_PKT(0x4626), + PSIL_PDMA_XY_PKT(0x4627), + PSIL_PDMA_XY_PKT(0x4628), + PSIL_PDMA_XY_PKT(0x4629), + PSIL_PDMA_XY_PKT(0x4630), + PSIL_PDMA_XY_PKT(0x463a), + /* PDMA13 (PDMA_USART_G0) - UART0-1 */ + PSIL_PDMA_XY_PKT(0x4700), + PSIL_PDMA_XY_PKT(0x4701), + /* PDMA14 (PDMA_USART_G1) - UART2-3 */ + PSIL_PDMA_XY_PKT(0x4702), + PSIL_PDMA_XY_PKT(0x4703), + /* PDMA15 (PDMA_USART_G2) - UART4-9 */ + PSIL_PDMA_XY_PKT(0x4704), + PSIL_PDMA_XY_PKT(0x4705), + PSIL_PDMA_XY_PKT(0x4706), + PSIL_PDMA_XY_PKT(0x4707), + PSIL_PDMA_XY_PKT(0x4708), + PSIL_PDMA_XY_PKT(0x4709), + /* CPSW9 */ + PSIL_ETHERNET(0x4a00), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ + PSIL_PDMA_XY_PKT(0x7100), + PSIL_PDMA_XY_PKT(0x7101), + PSIL_PDMA_XY_PKT(0x7102), + PSIL_PDMA_XY_PKT(0x7103), + /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ + PSIL_PDMA_XY_PKT(0x7300), + /* MCU_PDMA_ADC - ADC0-1 */ + PSIL_PDMA_XY_TR(0x7400), + PSIL_PDMA_XY_TR(0x7401), + PSIL_PDMA_XY_TR(0x7402), + PSIL_PDMA_XY_TR(0x7403), + /* SA2UL */ + PSIL_SA2UL(0x7500, 0), + PSIL_SA2UL(0x7501, 0), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j721e_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* CPSW9 */ + PSIL_ETHERNET(0xca00), + PSIL_ETHERNET(0xca01), + PSIL_ETHERNET(0xca02), + PSIL_ETHERNET(0xca03), + PSIL_ETHERNET(0xca04), + PSIL_ETHERNET(0xca05), + PSIL_ETHERNET(0xca06), + PSIL_ETHERNET(0xca07), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* SA2UL */ + PSIL_SA2UL(0xf500, 1), +}; + +struct psil_ep_map j721e_ep_map = { + .name = "j721e", + .src = j721e_src_ep_map, + .src_count = ARRAY_SIZE(j721e_src_ep_map), + .dst = j721e_dst_ep_map, + .dst_count = ARRAY_SIZE(j721e_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h new file mode 100644 index 000000000000..a1f389ca371e --- /dev/null +++ b/drivers/dma/ti/k3-psil-priv.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_PSIL_PRIV_H_ +#define K3_PSIL_PRIV_H_ + +#include <linux/dma/k3-psil.h> + +struct psil_ep { + u32 thread_id; + struct psil_endpoint_config ep_config; +}; + +/** + * struct psil_ep_map - PSI-L thread ID configuration maps + * @name: Name of the map, set it to the name of the SoC + * @src: Array of source PSI-L thread configurations + * @src_count: Number of entries in the src array + * @dst: Array of destination PSI-L thread configurations + * @dst_count: Number of entries in the dst array + * + * In case of symmetric configuration for a matching src/dst thread (for example + * 0x4400 and 0xc400) only the src configuration can be present. If no dst + * configuration found the code will look for (dst_thread_id & ~0x8000) to find + * the symmetric match. + */ +struct psil_ep_map { + char *name; + struct psil_ep *src; + int src_count; + struct psil_ep *dst; + int dst_count; +}; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id); + +/* SoC PSI-L endpoint maps */ +extern struct psil_ep_map am654_ep_map; +extern struct psil_ep_map j721e_ep_map; + +#endif /* K3_PSIL_PRIV_H_ */ diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c new file mode 100644 index 000000000000..d7b965049ccb --- /dev/null +++ b/drivers/dma/ti/k3-psil.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mutex.h> +#include <linux/of.h> + +#include "k3-psil-priv.h" + +static DEFINE_MUTEX(ep_map_mutex); +static struct psil_ep_map *soc_ep_map; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) +{ + int i; + + mutex_lock(&ep_map_mutex); + if (!soc_ep_map) { + if (of_machine_is_compatible("ti,am654")) { + soc_ep_map = &am654_ep_map; + } else if (of_machine_is_compatible("ti,j721e")) { + soc_ep_map = &j721e_ep_map; + } else { + pr_err("PSIL: No compatible machine found for map\n"); + return ERR_PTR(-ENOTSUPP); + } + pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); + } + mutex_unlock(&ep_map_mutex); + + if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) { + /* check in destination thread map */ + for (i = 0; i < soc_ep_map->dst_count; i++) { + if (soc_ep_map->dst[i].thread_id == thread_id) + return &soc_ep_map->dst[i].ep_config; + } + } + + thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET; + if (soc_ep_map->src) { + for (i = 0; i < soc_ep_map->src_count; i++) { + if (soc_ep_map->src[i].thread_id == thread_id) + return &soc_ep_map->src[i].ep_config; + } + } + + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL_GPL(psil_get_ep_config); + +int psil_set_new_ep_config(struct device *dev, const char *name, + struct psil_endpoint_config *ep_config) +{ + struct psil_endpoint_config *dst_ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int index; + + if (!dev || !dev->of_node) + return -EINVAL; + + index = of_property_match_string(dev->of_node, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells", + index, &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + dst_ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(dst_ep_config)) { + pr_err("PSIL: thread ID 0x%04x not defined in map\n", + thread_id); + of_node_put(dma_spec.np); + return PTR_ERR(dst_ep_config); + } + + memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config)); + + of_node_put(dma_spec.np); + return 0; +} +EXPORT_SYMBOL_GPL(psil_set_new_ep_config); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c new file mode 100644 index 000000000000..c1511298ece2 --- /dev/null +++ b/drivers/dma/ti/k3-udma-glue.c @@ -0,0 +1,1198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * K3 NAVSS DMA glue interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/dma/ti-cppi5.h> +#include <linux/dma/k3-udma-glue.h> + +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct k3_udma_glue_common { + struct device *dev; + struct udma_dev *udmax; + const struct udma_tisci_rm *tisci_rm; + struct k3_ringacc *ringacc; + u32 src_thread; + u32 dst_thread; + + u32 hdesc_size; + bool epib; + u32 psdata_size; + u32 swdata_size; +}; + +struct k3_udma_glue_tx_channel { + struct k3_udma_glue_common common; + + struct udma_tchan *udma_tchanx; + int udma_tchan_id; + + struct k3_ring *ringtx; + struct k3_ring *ringtxcq; + + bool psil_paired; + + int virq; + + atomic_t free_pkts; + bool tx_pause_on_err; + bool tx_filt_einfo; + bool tx_filt_pswords; + bool tx_supr_tdpkt; +}; + +struct k3_udma_glue_rx_flow { + struct udma_rflow *udma_rflow; + int udma_rflow_id; + struct k3_ring *ringrx; + struct k3_ring *ringrxfdq; + + int virq; +}; + +struct k3_udma_glue_rx_channel { + struct k3_udma_glue_common common; + + struct udma_rchan *udma_rchanx; + int udma_rchan_id; + bool remote; + + bool psil_paired; + + u32 swdata_size; + int flow_id_base; + + struct k3_udma_glue_rx_flow *flows; + u32 flow_num; + u32 flows_ready; +}; + +#define K3_UDMAX_TDOWN_TIMEOUT_US 1000 + +static int of_k3_udma_glue_parse(struct device_node *udmax_np, + struct k3_udma_glue_common *common) +{ + common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np, + "ti,ringacc"); + if (IS_ERR(common->ringacc)) + return PTR_ERR(common->ringacc); + + common->udmax = of_xudma_dev_get(udmax_np, NULL); + if (IS_ERR(common->udmax)) + return PTR_ERR(common->udmax); + + common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); + + return 0; +} + +static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, + const char *name, struct k3_udma_glue_common *common, + bool tx_chn) +{ + struct psil_endpoint_config *ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int ret = 0; + int index; + + if (unlikely(!name)) + return -EINVAL; + + index = of_property_match_string(chn_np, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, + &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + /* get psil endpoint config */ + ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(ep_config)) { + dev_err(common->dev, + "No configuration for psi-l thread 0x%04x\n", + thread_id); + ret = PTR_ERR(ep_config); + goto out_put_spec; + } + + common->epib = ep_config->needs_epib; + common->psdata_size = ep_config->psd_size; + + if (tx_chn) + common->dst_thread = thread_id; + else + common->src_thread = thread_id; + + ret = of_k3_udma_glue_parse(dma_spec.np, common); + +out_put_spec: + of_node_put(dma_spec.np); + return ret; +}; + +static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + struct device *dev = tx_chn->common.dev; + + dev_dbg(dev, "dump_tx_chn:\n" + "udma_tchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n", + tx_chn->udma_tchan_id, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); +} + +static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG, + xudma_tchanrt_read(chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG)); +} + +static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.index = tx_chn->udma_tchan_id; + if (tx_chn->tx_pause_on_err) + req.tx_pause_on_err = 1; + if (tx_chn->tx_filt_einfo) + req.tx_filt_einfo = 1; + if (tx_chn->tx_filt_pswords) + req.tx_filt_pswords = 1; + req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + if (tx_chn->tx_supr_tdpkt) + req.tx_supr_tdpkt = 1; + req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; + req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + + return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); +} + +struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, + const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) +{ + struct k3_udma_glue_tx_channel *tx_chn; + int ret; + + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); + if (!tx_chn) + return ERR_PTR(-ENOMEM); + + tx_chn->common.dev = dev; + tx_chn->common.swdata_size = cfg->swdata_size; + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &tx_chn->common, true); + if (ret) + goto err; + + tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, + tx_chn->common.psdata_size, + tx_chn->common.swdata_size); + + /* request and cfg UDMAP TX channel */ + tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1); + if (IS_ERR(tx_chn->udma_tchanx)) { + ret = PTR_ERR(tx_chn->udma_tchanx); + dev_err(dev, "UDMAX tchanx get err %d\n", ret); + goto err; + } + tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); + + atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); + + /* request and cfg rings */ + tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc, + tx_chn->udma_tchan_id, 0); + if (!tx_chn->ringtx) { + ret = -ENODEV; + dev_err(dev, "Failed to get TX ring %u\n", + tx_chn->udma_tchan_id); + goto err; + } + + tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc, + -1, 0); + if (!tx_chn->ringtxcq) { + ret = -ENODEV; + dev_err(dev, "Failed to get TXCQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + /* request and cfg psi-l */ + tx_chn->common.src_thread = + xudma_dev_get_psil_base(tx_chn->common.udmax) + + tx_chn->udma_tchan_id; + + ret = k3_udma_glue_cfg_tx_chn(tx_chn); + if (ret) { + dev_err(dev, "Failed to cfg tchan %d\n", ret); + goto err; + } + + ret = xudma_navss_psil_pair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + tx_chn->psil_paired = true; + + /* reset TX RT registers */ + k3_udma_glue_disable_tx_chn(tx_chn); + + k3_udma_glue_dump_tx_chn(tx_chn); + + return tx_chn; + +err: + k3_udma_glue_release_tx_chn(tx_chn); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); + +void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + if (tx_chn->psil_paired) { + xudma_navss_psil_unpair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + tx_chn->psil_paired = false; + } + + if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) + xudma_tchan_put(tx_chn->common.udmax, + tx_chn->udma_tchanx); + + if (tx_chn->ringtxcq) + k3_ringacc_ring_free(tx_chn->ringtxcq); + + if (tx_chn->ringtx) + k3_ringacc_ring_free(tx_chn->ringtx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); + +int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma) +{ + u32 ringtxcq_id; + + if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) + return -ENOMEM; + + ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); + + return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn); + +int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *desc_dma) +{ + int ret; + + ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); + if (!ret) + atomic_inc(&tx_chn->free_pkts); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); + +int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + u32 txrt_ctl; + + txrt_ctl = UDMA_PEER_RT_EN_ENABLE; + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + txrt_ctl); + + txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + txrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + txrt_ctl); + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn); + +void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0); + + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn); + +void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + bool sync) +{ + int i = 0; + u32 val; + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(tx_chn->common.dev, "TX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn); + +void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma)) +{ + dma_addr_t desc_dma; + int occ_tx, i, ret; + + /* reset TXCQ as it is not input for udma - expected to be empty */ + if (tx_chn->ringtxcq) + k3_ringacc_ring_reset(tx_chn->ringtxcq); + + /* + * TXQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save TXQ occ + * 2) clean up TXQ and call callback .cleanup() for each desc + * 3) reset TXQ in a special way + */ + occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); + dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx); + + for (i = 0; i < occ_tx; i++) { + ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); + if (ret) { + dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); + +u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn) +{ + return tx_chn->common.hdesc_size; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size); + +u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn) +{ + return k3_ringacc_get_ring_id(tx_chn->ringtxcq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); + +int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) +{ + tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); + + return tx_chn->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); + +static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req; + int ret; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + + req.nav_id = tisci_rm->tisci_dev_id; + req.index = rx_chn->udma_rchan_id; + req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; + /* + * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw + * and udmax impl, so just configure it to invalid value. + * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); + */ + req.rxcq_qnum = 0xFFFF; + if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { + /* Default flow + extra ones */ + req.flowid_start = rx_chn->flow_id_base; + req.flowid_cnt = rx_chn->flow_num; + } + req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + + ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); + if (ret) + dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", + rx_chn->udma_rchan_id, ret); + + return ret; +} + +static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + if (IS_ERR_OR_NULL(flow->udma_rflow)) + return; + + if (flow->ringrxfdq) + k3_ringacc_ring_free(flow->ringrxfdq); + + if (flow->ringrx) + k3_ringacc_ring_free(flow->ringrx); + + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + flow->udma_rflow = NULL; + rx_chn->flows_ready--; +} + +static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, + flow->udma_rflow_id); + if (IS_ERR(flow->udma_rflow)) { + ret = PTR_ERR(flow->udma_rflow); + dev_err(dev, "UDMAX rflow get err %d\n", ret); + goto err; + } + + if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + return -ENODEV; + } + + /* request and cfg rings */ + flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxq_id, 0); + if (!flow->ringrx) { + ret = -ENODEV; + dev_err(dev, "Failed to get RX ring\n"); + goto err; + } + + flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxfdq0_id, 0); + if (!flow->ringrxfdq) { + ret = -ENODEV; + dev_err(dev, "Failed to get RXFDQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); + goto err; + } + + if (rx_chn->remote) { + rx_ring_id = TI_SCI_RESOURCE_NULL; + rx_ringfdq_id = TI_SCI_RESOURCE_NULL; + } else { + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + } + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + if (rx_chn->common.epib) + req.rx_einfo_present = 1; + if (rx_chn->common.psdata_size) + req.rx_psinfo_present = 1; + if (flow_cfg->rx_error_handling) + req.rx_error_handling = 1; + req.rx_desc_type = 0; + req.rx_dest_qnum = rx_ring_id; + req.rx_src_tag_hi_sel = 0; + req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; + req.rx_dest_tag_hi_sel = 0; + req.rx_dest_tag_lo_sel = 0; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, + ret); + goto err; + } + + rx_chn->flows_ready++; + dev_dbg(dev, "flow%d config done. ready:%d\n", + flow->udma_rflow_id, rx_chn->flows_ready); + + return 0; +err: + k3_udma_glue_release_rx_flow(rx_chn, flow_idx); + return ret; +} + +static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "dump_rx_chn:\n" + "udma_rchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n" + "epib: %d\n" + "hdesc_size: %u\n" + "psdata_size: %u\n" + "swdata_size: %u\n" + "flow_id_base: %d\n" + "flow_num: %d\n", + chn->udma_rchan_id, + chn->common.src_thread, + chn->common.dst_thread, + chn->common.epib, + chn->common.hdesc_size, + chn->common.psdata_size, + chn->common.swdata_size, + chn->flow_id_base, + chn->flow_num); +} + +static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG, + xudma_rchanrt_read(chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG)); +} + +static int +k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + int ret; + + /* default rflow */ + if (cfg->flow_id_use_rxchan_id) + return 0; + + /* not a GP rflows */ + if (rx_chn->flow_id_base != -1 && + !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + return 0; + + /* Allocate range of GP rflows */ + ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + if (ret < 0) { + dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", + rx_chn->flow_id_base, rx_chn->flow_num, ret); + return ret; + } + rx_chn->flow_id_base = ret; + + return 0; +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0) + return ERR_PTR(-EINVAL); + + if (cfg->flow_id_num != 1 && + (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) + return ERR_PTR(-EINVAL); + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + /* request and cfg UDMAP RX channel */ + rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); + if (IS_ERR(rx_chn->udma_rchanx)) { + ret = PTR_ERR(rx_chn->udma_rchanx); + dev_err(dev, "UDMAX rchanx get err %d\n", ret); + goto err; + } + rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); + + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + + /* Use RX channel id as flow id: target dev can't generate flow_id */ + if (cfg->flow_id_use_rxchan_id) + rx_chn->flow_id_base = rx_chn->udma_rchan_id; + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + /* request and cfg psi-l */ + rx_chn->common.dst_thread = + xudma_dev_get_psil_base(rx_chn->common.udmax) + + rx_chn->udma_rchan_id; + + ret = k3_udma_glue_cfg_rx_chn(rx_chn); + if (ret) { + dev_err(dev, "Failed to cfg rchan %d\n", ret); + goto err; + } + + /* init default RX flow only if flow_num = 1 */ + if (cfg->def_flow_cfg) { + ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); + if (ret) + goto err; + } + + ret = xudma_navss_psil_pair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + rx_chn->psil_paired = true; + + /* reset RX RT registers */ + k3_udma_glue_disable_rx_chn(rx_chn); + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0 || + cfg->flow_id_use_rxchan_id || + cfg->def_flow_cfg || + cfg->flow_id_base < 0) + return ERR_PTR(-EINVAL); + + /* + * Remote RX channel is under control of Remote CPU core, so + * Linux can only request and manipulate by dedicated RX flows + */ + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = true; + rx_chn->udma_rchan_id = -1; + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + rx_chn->psil_paired = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + if (cfg->remote) + return k3_udma_glue_request_remote_rx_chn(dev, name, cfg); + else + return k3_udma_glue_request_rx_chn_priv(dev, name, cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn); + +void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + int i; + + if (IS_ERR_OR_NULL(rx_chn->common.udmax)) + return; + + if (rx_chn->psil_paired) { + xudma_navss_psil_unpair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + rx_chn->psil_paired = false; + } + + for (i = 0; i < rx_chn->flow_num; i++) + k3_udma_glue_release_rx_flow(rx_chn, i); + + if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + xudma_free_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + + if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) + xudma_rchan_put(rx_chn->common.udmax, + rx_chn->udma_rchanx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); + +int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init); + +u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow; + + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + flow = &rx_chn->flows[flow_idx]; + + return k3_ringacc_get_ring_id(flow->ringrxfdq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id); + +u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) +{ + return rx_chn->flow_id_base; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base); + +int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = rx_ring_id; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable); + +int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + memset(&req, 0, sizeof(req)); + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); + +int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + u32 rxrt_ctl; + + if (rx_chn->remote) + return -EINVAL; + + if (rx_chn->flows_ready < rx_chn->flow_num) + return -EINVAL; + + rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + rxrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, + rxrt_ctl); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn); + +void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + 0); + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn); + +void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + bool sync) +{ + int i = 0; + u32 val; + + if (rx_chn->remote) + return; + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(rx_chn->common.dev, "RX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); + +void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + struct device *dev = rx_chn->common.dev; + dma_addr_t desc_dma; + int occ_rx, i, ret; + + /* reset RXCQ as it is not input for udma - expected to be empty */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); + dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); + if (flow->ringrx) + k3_ringacc_ring_reset(flow->ringrx); + + /* Skip RX FDQ in case one FDQ is used for the set of flows */ + if (skip_fdq) + return; + + /* + * RX FDQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save RX FDQ occ + * 2) clean up RX FDQ and call callback .cleanup() for each desc + * 3) reset RX FDQ in a special way + */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); + dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx); + + for (i = 0; i < occ_rx; i++) { + ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); + if (ret) { + dev_err(dev, "RX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); + +int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, struct cppi5_host_desc_t *desc_rx, + dma_addr_t desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn); + +int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, dma_addr_t *desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_pop(flow->ringrx, desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn); + +int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow; + + flow = &rx_chn->flows[flow_num]; + + flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); + + return flow->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c new file mode 100644 index 000000000000..0b8f3dd6b146 --- /dev/null +++ b/drivers/dma/ti/k3-udma-private.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_pair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_pair); + +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_unpair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_unpair); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) +{ + struct device_node *udma_node = np; + struct platform_device *pdev; + struct udma_dev *ud; + + if (property) { + udma_node = of_parse_phandle(np, property, 0); + if (!udma_node) { + pr_err("UDMA node is not found\n"); + return ERR_PTR(-ENODEV); + } + } + + pdev = of_find_device_by_node(udma_node); + if (!pdev) { + pr_debug("UDMA device not found\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + if (np != udma_node) + of_node_put(udma_node); + + ud = platform_get_drvdata(pdev); + if (!ud) { + pr_debug("UDMA has not been probed\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + return ud; +} +EXPORT_SYMBOL(of_xudma_dev_get); + +u32 xudma_dev_get_psil_base(struct udma_dev *ud) +{ + return ud->psil_base; +} +EXPORT_SYMBOL(xudma_dev_get_psil_base); + +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud) +{ + return &ud->tisci_rm; +} +EXPORT_SYMBOL(xudma_dev_get_tisci_rm); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_alloc_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_alloc_gp_rflow_range); + +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_free_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_free_gp_rflow_range); + +bool xudma_rflow_is_gp(struct udma_dev *ud, int id) +{ + return !test_bit(id, ud->rflow_gp_map); +} +EXPORT_SYMBOL(xudma_rflow_is_gp); + +#define XUDMA_GET_PUT_RESOURCE(res) \ +struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \ +{ \ + return __udma_reserve_##res(ud, false, id); \ +} \ +EXPORT_SYMBOL(xudma_##res##_get); \ + \ +void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \ +{ \ + clear_bit(p->id, ud->res##_map); \ +} \ +EXPORT_SYMBOL(xudma_##res##_put) +XUDMA_GET_PUT_RESOURCE(tchan); +XUDMA_GET_PUT_RESOURCE(rchan); + +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id) +{ + return __udma_get_rflow(ud, id); +} +EXPORT_SYMBOL(xudma_rflow_get); + +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p) +{ + __udma_put_rflow(ud, p); +} +EXPORT_SYMBOL(xudma_rflow_put); + +#define XUDMA_GET_RESOURCE_ID(res) \ +int xudma_##res##_get_id(struct udma_##res *p) \ +{ \ + return p->id; \ +} \ +EXPORT_SYMBOL(xudma_##res##_get_id) +XUDMA_GET_RESOURCE_ID(tchan); +XUDMA_GET_RESOURCE_ID(rchan); +XUDMA_GET_RESOURCE_ID(rflow); + +/* Exported register access functions */ +#define XUDMA_RT_IO_FUNCTIONS(res) \ +u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \ +{ \ + return udma_##res##rt_read(p, reg); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_read); \ + \ +void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \ +{ \ + udma_##res##rt_write(p, reg, val); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_write) +XUDMA_RT_IO_FUNCTIONS(tchan); +XUDMA_RT_IO_FUNCTIONS(rchan); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c new file mode 100644 index 000000000000..ea79c2df28e0 --- /dev/null +++ b/drivers/dma/ti/k3-udma.c @@ -0,0 +1,3432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/of_dma.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/dma/ti-cppi5.h> + +#include "../virt-dma.h" +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct udma_static_tr { + u8 elsize; /* RPSTR0 */ + u16 elcnt; /* RPSTR0 */ + u16 bstcnt; /* RPSTR1 */ +}; + +#define K3_UDMA_MAX_RFLOWS 1024 +#define K3_UDMA_DEFAULT_RING_SIZE 16 + +/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ +#define UDMA_RFLOW_SRCTAG_NONE 0 +#define UDMA_RFLOW_SRCTAG_CFG_TAG 1 +#define UDMA_RFLOW_SRCTAG_FLOW_ID 2 +#define UDMA_RFLOW_SRCTAG_SRC_TAG 4 + +#define UDMA_RFLOW_DSTTAG_NONE 0 +#define UDMA_RFLOW_DSTTAG_CFG_TAG 1 +#define UDMA_RFLOW_DSTTAG_FLOW_ID 2 +#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 +#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 + +struct udma_chan; + +enum udma_mmr { + MMR_GCFG = 0, + MMR_RCHANRT, + MMR_TCHANRT, + MMR_LAST, +}; + +static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" }; + +struct udma_tchan { + void __iomem *reg_rt; + + int id; + struct k3_ring *t_ring; /* Transmit ring */ + struct k3_ring *tc_ring; /* Transmit Completion ring */ +}; + +struct udma_rflow { + int id; + struct k3_ring *fd_ring; /* Free Descriptor ring */ + struct k3_ring *r_ring; /* Receive ring */ +}; + +struct udma_rchan { + void __iomem *reg_rt; + + int id; +}; + +#define UDMA_FLAG_PDMA_ACC32 BIT(0) +#define UDMA_FLAG_PDMA_BURST BIT(1) + +struct udma_match_data { + u32 psil_base; + bool enable_memcpy_support; + u32 flags; + u32 statictr_z_mask; + u32 rchan_oes_offset; + + u8 tpl_levels; + u32 level_start_idx[]; +}; + +struct udma_dev { + struct dma_device ddev; + struct device *dev; + void __iomem *mmrs[MMR_LAST]; + const struct udma_match_data *match_data; + + size_t desc_align; /* alignment to use for descriptors */ + + struct udma_tisci_rm tisci_rm; + + struct k3_ringacc *ringacc; + + struct work_struct purge_work; + struct list_head desc_to_purge; + spinlock_t lock; + + int tchan_cnt; + int echan_cnt; + int rchan_cnt; + int rflow_cnt; + unsigned long *tchan_map; + unsigned long *rchan_map; + unsigned long *rflow_gp_map; + unsigned long *rflow_gp_map_allocated; + unsigned long *rflow_in_use; + + struct udma_tchan *tchans; + struct udma_rchan *rchans; + struct udma_rflow *rflows; + + struct udma_chan *channels; + u32 psil_base; +}; + +struct udma_hwdesc { + size_t cppi5_desc_size; + void *cppi5_desc_vaddr; + dma_addr_t cppi5_desc_paddr; + + /* TR descriptor internal pointers */ + void *tr_req_base; + struct cppi5_tr_resp_t *tr_resp_base; +}; + +struct udma_desc { + struct virt_dma_desc vd; + + bool terminated; + + enum dma_transfer_direction dir; + + struct udma_static_tr static_tr; + u32 residue; + + unsigned int sglen; + unsigned int desc_idx; /* Only used for cyclic in packet mode */ + unsigned int tr_idx; + + u32 metadata_size; + void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ + + unsigned int hwdesc_count; + struct udma_hwdesc hwdesc[0]; +}; + +enum udma_chan_state { + UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ + UDMA_CHAN_IS_ACTIVE, /* Normal operation */ + UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ +}; + +struct udma_tx_drain { + struct delayed_work work; + unsigned long jiffie; + u32 residue; +}; + +struct udma_chan_config { + bool pkt_mode; /* TR or packet */ + bool needs_epib; /* EPIB is needed for the communication or not */ + u32 psd_size; /* size of Protocol Specific Data */ + u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ + u32 hdesc_size; /* Size of a packet descriptor in packet mode */ + bool notdpkt; /* Suppress sending TDC packet */ + int remote_thread_id; + u32 src_thread; + u32 dst_thread; + enum psil_endpoint_type ep_type; + bool enable_acc32; + bool enable_burst; + enum udma_tp_level channel_tpl; /* Channel Throughput Level */ + + enum dma_transfer_direction dir; +}; + +struct udma_chan { + struct virt_dma_chan vc; + struct dma_slave_config cfg; + struct udma_dev *ud; + struct udma_desc *desc; + struct udma_desc *terminated_desc; + struct udma_static_tr static_tr; + char *name; + + struct udma_tchan *tchan; + struct udma_rchan *rchan; + struct udma_rflow *rflow; + + bool psil_paired; + + int irq_num_ring; + int irq_num_udma; + + bool cyclic; + bool paused; + + enum udma_chan_state state; + struct completion teardown_completed; + + struct udma_tx_drain tx_drain; + + u32 bcnt; /* number of bytes completed since the start of the channel */ + u32 in_ring_cnt; /* number of descriptors in flight */ + + /* Channel configuration parameters */ + struct udma_chan_config config; + + /* dmapool for packet mode descriptors */ + bool use_dma_pool; + struct dma_pool *hdesc_pool; + + u32 id; +}; + +static inline struct udma_dev *to_udma_dev(struct dma_device *d) +{ + return container_of(d, struct udma_dev, ddev); +} + +static inline struct udma_chan *to_udma_chan(struct dma_chan *c) +{ + return container_of(c, struct udma_chan, vc.chan); +} + +static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct udma_desc, vd.tx); +} + +/* Generic register access functions */ +static inline u32 udma_read(void __iomem *base, int reg) +{ + return readl(base + reg); +} + +static inline void udma_write(void __iomem *base, int reg, u32 val) +{ + writel(val, base + reg); +} + +static inline void udma_update_bits(void __iomem *base, int reg, + u32 mask, u32 val) +{ + u32 tmp, orig; + + orig = readl(base + reg); + tmp = orig & ~mask; + tmp |= (val & mask); + + if (tmp != orig) + writel(tmp, base + reg); +} + +/* TCHANRT */ +static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg) +{ + if (!tchan) + return 0; + return udma_read(tchan->reg_rt, reg); +} + +static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg, + u32 val) +{ + if (!tchan) + return; + udma_write(tchan->reg_rt, reg, val); +} + +static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg, + u32 mask, u32 val) +{ + if (!tchan) + return; + udma_update_bits(tchan->reg_rt, reg, mask, val); +} + +/* RCHANRT */ +static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg) +{ + if (!rchan) + return 0; + return udma_read(rchan->reg_rt, reg); +} + +static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg, + u32 val) +{ + if (!rchan) + return; + udma_write(rchan->reg_rt, reg, val); +} + +static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg, + u32 mask, u32 val) +{ + if (!rchan) + return; + udma_update_bits(rchan->reg_rt, reg, mask, val); +} + +static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static void udma_reset_uchan(struct udma_chan *uc) +{ + memset(&uc->config, 0, sizeof(uc->config)); + uc->config.remote_thread_id = -1; + uc->state = UDMA_CHAN_IS_IDLE; +} + +static void udma_dump_chan_stdata(struct udma_chan *uc) +{ + struct device *dev = uc->ud->dev; + u32 offset; + int i; + + if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "TCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_TCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, + udma_tchanrt_read(uc->tchan, offset)); + } + } + + if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "RCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_RCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, + udma_rchanrt_read(uc->rchan, offset)); + } + } +} + +static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, + int idx) +{ + return d->hwdesc[idx].cppi5_desc_paddr; +} + +static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) +{ + return d->hwdesc[idx].cppi5_desc_vaddr; +} + +static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, + dma_addr_t paddr) +{ + struct udma_desc *d = uc->terminated_desc; + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + + if (!d) { + d = uc->desc; + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + } + + return d; +} + +static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) +{ + if (uc->use_dma_pool) { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_pool_free(uc->hdesc_pool, + d->hwdesc[i].cppi5_desc_vaddr, + d->hwdesc[i].cppi5_desc_paddr); + + d->hwdesc[i].cppi5_desc_vaddr = NULL; + } + } else if (d->hwdesc[0].cppi5_desc_vaddr) { + struct udma_dev *ud = uc->ud; + + dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size, + d->hwdesc[0].cppi5_desc_vaddr, + d->hwdesc[0].cppi5_desc_paddr); + + d->hwdesc[0].cppi5_desc_vaddr = NULL; + } +} + +static void udma_purge_desc_work(struct work_struct *work) +{ + struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); + struct virt_dma_desc *vd, *_vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&ud->lock, flags); + list_splice_tail_init(&ud->desc_to_purge, &head); + spin_unlock_irqrestore(&ud->lock, flags); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + + udma_free_hwdesc(uc, d); + list_del(&vd->node); + kfree(d); + } + + /* If more to purge, schedule the work again */ + if (!list_empty(&ud->desc_to_purge)) + schedule_work(&ud->purge_work); +} + +static void udma_desc_free(struct virt_dma_desc *vd) +{ + struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + unsigned long flags; + + if (uc->terminated_desc == d) + uc->terminated_desc = NULL; + + if (uc->use_dma_pool) { + udma_free_hwdesc(uc, d); + kfree(d); + return; + } + + spin_lock_irqsave(&ud->lock, flags); + list_add_tail(&vd->node, &ud->desc_to_purge); + spin_unlock_irqrestore(&ud->lock, flags); + + schedule_work(&ud->purge_work); +} + +static bool udma_is_chan_running(struct udma_chan *uc) +{ + u32 trt_ctl = 0; + u32 rrt_ctl = 0; + + if (uc->tchan) + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + if (uc->rchan) + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + + if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) + return true; + + return false; +} + +static bool udma_is_chan_paused(struct udma_chan *uc) +{ + u32 val, pause_mask; + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + val = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_DEV: + val = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_MEM: + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + pause_mask = UDMA_CHAN_RT_CTL_PAUSE; + break; + default: + return false; + } + + if (val & pause_mask) + return true; + + return false; +} + +static void udma_sync_for_device(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + if (uc->cyclic && uc->config.pkt_mode) { + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[idx].cppi5_desc_paddr, + d->hwdesc[idx].cppi5_desc_size, + DMA_TO_DEVICE); + } else { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[i].cppi5_desc_paddr, + d->hwdesc[i].cppi5_desc_size, + DMA_TO_DEVICE); + } + } +} + +static int udma_push_to_ring(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + struct k3_ring *ring = NULL; + int ret = -EINVAL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->fd_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->t_ring; + break; + default: + break; + } + + if (ring) { + dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx); + + wmb(); /* Ensure that writes are not moved over this point */ + udma_sync_for_device(uc, idx); + ret = k3_ringacc_ring_push(ring, &desc_addr); + uc->in_ring_cnt++; + } + + return ret; +} + +static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) +{ + struct k3_ring *ring = NULL; + int ret = -ENOENT; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->r_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring && k3_ringacc_ring_get_occ(ring)) { + struct udma_desc *d = NULL; + + ret = k3_ringacc_ring_pop(ring, addr); + if (ret) + return ret; + + /* Teardown completion */ + if (cppi5_desc_is_tdcm(*addr)) + return ret; + + d = udma_udma_desc_from_paddr(uc, *addr); + + if (d) + dma_sync_single_for_cpu(uc->ud->dev, *addr, + d->hwdesc[0].cppi5_desc_size, + DMA_FROM_DEVICE); + rmb(); /* Ensure that reads are not moved before this point */ + + if (!ret) + uc->in_ring_cnt--; + } + + return ret; +} + +static void udma_reset_rings(struct udma_chan *uc) +{ + struct k3_ring *ring1 = NULL; + struct k3_ring *ring2 = NULL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + if (uc->rchan) { + ring1 = uc->rflow->fd_ring; + ring2 = uc->rflow->r_ring; + } + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + if (uc->tchan) { + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + } + break; + default: + break; + } + + if (ring1) + k3_ringacc_ring_reset_dma(ring1, + k3_ringacc_ring_get_occ(ring1)); + if (ring2) + k3_ringacc_ring_reset(ring2); + + /* make sure we are not leaking memory by stalled descriptor */ + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + uc->in_ring_cnt = 0; +} + +static void udma_reset_counters(struct udma_chan *uc) +{ + u32 val; + + if (uc->tchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + } + + if (uc->rchan) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val); + } + + uc->bcnt = 0; +} + +static int udma_reset_chan(struct udma_chan *uc, bool hard) +{ + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + /* Reset all counters */ + udma_reset_counters(uc); + + /* Hard reset: re-initialize the channel to reset */ + if (hard) { + struct udma_chan_config ucc_backup; + int ret; + + memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); + uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); + + /* restore the channel configuration */ + memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); + ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); + if (ret) + return ret; + + /* + * Setting forced teardown after forced reset helps recovering + * the rchan. + */ + if (uc->config.dir == DMA_DEV_TO_MEM) + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN | + UDMA_CHAN_RT_CTL_FTDOWN); + } + uc->state = UDMA_CHAN_IS_IDLE; + + return 0; +} + +static void udma_start_desc(struct udma_chan *uc) +{ + struct udma_chan_config *ucc = &uc->config; + + if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { + int i; + + /* Push all descriptors to ring for packet mode cyclic or RX */ + for (i = 0; i < uc->desc->sglen; i++) + udma_push_to_ring(uc, i); + } else { + udma_push_to_ring(uc, 0); + } +} + +static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) +{ + /* Only PDMAs have staticTR */ + if (uc->config.ep_type == PSIL_EP_NATIVE) + return false; + + /* Check if the staticTR configuration has changed for TX */ + if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) + return true; + + return false; +} + +static int udma_start(struct udma_chan *uc) +{ + struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); + + if (!vd) { + uc->desc = NULL; + return -ENOENT; + } + + list_del(&vd->node); + + uc->desc = to_udma_desc(&vd->tx); + + /* Channel is already running and does not need reconfiguration */ + if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { + udma_start_desc(uc); + goto out; + } + + /* Make sure that we clear the teardown bit, if it is set */ + udma_reset_chan(uc, false); + + /* Push descriptors before we start the channel */ + udma_start_desc(uc); + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + const struct udma_match_data *match_data = + uc->ud->match_data; + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG, + PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, + match_data->statictr_z_mask)); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + /* Enable remote */ + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + break; + case DMA_MEM_TO_DEV: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_tchanrt_write(uc->tchan, + UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + /* Enable remote */ + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + default: + return -EINVAL; + } + + uc->state = UDMA_CHAN_IS_ACTIVE; +out: + + return 0; +} + +static int udma_stop(struct udma_chan *uc) +{ + enum udma_chan_state old_state = uc->state; + + uc->state = UDMA_CHAN_IS_TERMINATING; + reinit_completion(&uc->teardown_completed); + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_TEARDOWN); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_FLUSH); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + default: + uc->state = old_state; + complete_all(&uc->teardown_completed); + return -EINVAL; + } + + return 0; +} + +static void udma_cyclic_packet_elapsed(struct udma_chan *uc) +{ + struct udma_desc *d = uc->desc; + struct cppi5_host_desc_t *h_desc; + + h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; + cppi5_hdesc_reset_to_original(h_desc); + udma_push_to_ring(uc, d->desc_idx); + d->desc_idx = (d->desc_idx + 1) % d->sglen; +} + +static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) +{ + struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + memcpy(d->metadata, h_desc->epib, d->metadata_size); +} + +static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) +{ + u32 peer_bcnt, bcnt; + + /* Only TX towards PDMA is affected */ + if (uc->config.ep_type == PSIL_EP_NATIVE || + uc->config.dir != DMA_MEM_TO_DEV) + return true; + + peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + + if (peer_bcnt < bcnt) { + uc->tx_drain.residue = bcnt - peer_bcnt; + uc->tx_drain.jiffie = jiffies; + return false; + } + + return true; +} + +static void udma_check_tx_completion(struct work_struct *work) +{ + struct udma_chan *uc = container_of(work, typeof(*uc), + tx_drain.work.work); + bool desc_done = true; + u32 residue_diff; + unsigned long jiffie_diff, delay; + + if (uc->desc) { + residue_diff = uc->tx_drain.residue; + jiffie_diff = uc->tx_drain.jiffie; + desc_done = udma_is_desc_really_done(uc, uc->desc); + } + + if (!desc_done) { + jiffie_diff = uc->tx_drain.jiffie - jiffie_diff; + residue_diff -= uc->tx_drain.residue; + if (residue_diff) { + /* Try to guess when we should check next time */ + residue_diff /= jiffie_diff; + delay = uc->tx_drain.residue / residue_diff / 3; + if (jiffies_to_msecs(delay) < 5) + delay = 0; + } else { + /* No progress, check again in 1 second */ + delay = HZ; + } + + schedule_delayed_work(&uc->tx_drain.work, delay); + } else if (uc->desc) { + struct udma_desc *d = uc->desc; + + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } +} + +static irqreturn_t udma_ring_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + dma_addr_t paddr = 0; + + if (udma_pop_from_ring(uc, &paddr) || !paddr) + return IRQ_HANDLED; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* Teardown completion message */ + if (cppi5_desc_is_tdcm(paddr)) { + /* Compensate our internal pop/push counter */ + uc->in_ring_cnt++; + + complete_all(&uc->teardown_completed); + + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + if (!uc->desc) + udma_start(uc); + + goto out; + } + + d = udma_udma_desc_from_paddr(uc, paddr); + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + if (desc_paddr != paddr) { + dev_err(uc->ud->dev, "not matching descriptors!\n"); + goto out; + } + + if (uc->cyclic) { + /* push the descriptor back to the ring */ + if (d == uc->desc) { + udma_cyclic_packet_elapsed(uc); + vchan_cyclic_callback(&d->vd); + } + } else { + bool desc_done = false; + + if (d == uc->desc) { + desc_done = udma_is_desc_really_done(uc, d); + + if (desc_done) { + uc->bcnt += d->residue; + udma_start(uc); + } else { + schedule_delayed_work(&uc->tx_drain.work, + 0); + } + } + + if (desc_done) + vchan_cookie_complete(&d->vd); + } + } +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t udma_udma_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + d = uc->desc; + if (d) { + d->tr_idx = (d->tr_idx + 1) % d->sglen; + + if (uc->cyclic) { + vchan_cyclic_callback(&d->vd); + } else { + /* TODO: figure out the real amount of data */ + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +/** + * __udma_alloc_gp_rflow_range - alloc range of GP RX flows + * @ud: UDMA device + * @from: Start the search from this flow id number + * @cnt: Number of consecutive flow ids to allocate + * + * Allocate range of RX flow ids for future use, those flows can be requested + * only using explicit flow id number. if @from is set to -1 it will try to find + * first free range. if @from is positive value it will force allocation only + * of the specified range of flows. + * + * Returns -ENOMEM if can't find free range. + * -EEXIST if requested range is busy. + * -EINVAL if wrong input values passed. + * Returns flow id on success. + */ +static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + int start, tmp_from; + DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); + + tmp_from = from; + if (tmp_from < 0) + tmp_from = ud->rchan_cnt; + /* default flows can't be allocated and accessible only by id */ + if (tmp_from < ud->rchan_cnt) + return -EINVAL; + + if (tmp_from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, + ud->rflow_cnt); + + start = bitmap_find_next_zero_area(tmp, + ud->rflow_cnt, + tmp_from, cnt, 0); + if (start >= ud->rflow_cnt) + return -ENOMEM; + + if (from >= 0 && start != from) + return -EEXIST; + + bitmap_set(ud->rflow_gp_map_allocated, start, cnt); + return start; +} + +static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + if (from < ud->rchan_cnt) + return -EINVAL; + if (from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); + return 0; +} + +static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) +{ + /* + * Attempt to request rflow by ID can be made for any rflow + * if not in use with assumption that caller knows what's doing. + * TI-SCI FW will perform additional permission check ant way, it's + * safe + */ + + if (id < 0 || id >= ud->rflow_cnt) + return ERR_PTR(-ENOENT); + + if (test_bit(id, ud->rflow_in_use)) + return ERR_PTR(-ENOENT); + + /* GP rflow has to be allocated first */ + if (!test_bit(id, ud->rflow_gp_map) && + !test_bit(id, ud->rflow_gp_map_allocated)) + return ERR_PTR(-EINVAL); + + dev_dbg(ud->dev, "get rflow%d\n", id); + set_bit(id, ud->rflow_in_use); + return &ud->rflows[id]; +} + +static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) +{ + if (!test_bit(rflow->id, ud->rflow_in_use)) { + dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); + return; + } + + dev_dbg(ud->dev, "put rflow%d\n", rflow->id); + clear_bit(rflow->id, ud->rflow_in_use); +} + +#define UDMA_RESERVE_RESOURCE(res) \ +static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ + enum udma_tp_level tpl, \ + int id) \ +{ \ + if (id >= 0) { \ + if (test_bit(id, ud->res##_map)) { \ + dev_err(ud->dev, "res##%d is in use\n", id); \ + return ERR_PTR(-ENOENT); \ + } \ + } else { \ + int start; \ + \ + if (tpl >= ud->match_data->tpl_levels) \ + tpl = ud->match_data->tpl_levels - 1; \ + \ + start = ud->match_data->level_start_idx[tpl]; \ + \ + id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ + start); \ + if (id == ud->res##_cnt) { \ + return ERR_PTR(-ENOENT); \ + } \ + } \ + \ + set_bit(id, ud->res##_map); \ + return &ud->res##s[id]; \ +} + +UDMA_RESERVE_RESOURCE(tchan); +UDMA_RESERVE_RESOURCE(rchan); + +static int udma_get_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->tchan)) + return PTR_ERR(uc->tchan); + + return 0; +} + +static int udma_get_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return 0; + } + + uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->rchan)) + return PTR_ERR(uc->rchan); + + return 0; +} + +static int udma_get_chan_pair(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + const struct udma_match_data *match_data = ud->match_data; + int chan_id, end; + + if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { + dev_info(ud->dev, "chan%d: already have %d pair allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + if (uc->tchan) { + dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return -EBUSY; + } else if (uc->rchan) { + dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return -EBUSY; + } + + /* Can be optimized, but let's have it like this for now */ + end = min(ud->tchan_cnt, ud->rchan_cnt); + /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */ + chan_id = match_data->level_start_idx[match_data->tpl_levels - 1]; + for (; chan_id < end; chan_id++) { + if (!test_bit(chan_id, ud->tchan_map) && + !test_bit(chan_id, ud->rchan_map)) + break; + } + + if (chan_id == end) + return -ENOENT; + + set_bit(chan_id, ud->tchan_map); + set_bit(chan_id, ud->rchan_map); + uc->tchan = &ud->tchans[chan_id]; + uc->rchan = &ud->rchans[chan_id]; + + return 0; +} + +static int udma_get_rflow(struct udma_chan *uc, int flow_id) +{ + struct udma_dev *ud = uc->ud; + + if (!uc->rchan) { + dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); + return -EINVAL; + } + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", + uc->id, uc->rflow->id); + return 0; + } + + uc->rflow = __udma_get_rflow(ud, flow_id); + if (IS_ERR(uc->rflow)) + return PTR_ERR(uc->rflow); + + return 0; +} + +static void udma_put_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, + uc->rchan->id); + clear_bit(uc->rchan->id, ud->rchan_map); + uc->rchan = NULL; + } +} + +static void udma_put_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, + uc->tchan->id); + clear_bit(uc->tchan->id, ud->tchan_map); + uc->tchan = NULL; + } +} + +static void udma_put_rflow(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, + uc->rflow->id); + __udma_put_rflow(ud, uc->rflow); + uc->rflow = NULL; + } +} + +static void udma_free_tx_resources(struct udma_chan *uc) +{ + if (!uc->tchan) + return; + + k3_ringacc_ring_free(uc->tchan->t_ring); + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->t_ring = NULL; + uc->tchan->tc_ring = NULL; + + udma_put_tchan(uc); +} + +static int udma_alloc_tx_resources(struct udma_chan *uc) +{ + struct k3_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int ret; + + ret = udma_get_tchan(uc); + if (ret) + return ret; + + uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc, + uc->tchan->id, 0); + if (!uc->tchan->t_ring) { + ret = -EBUSY; + goto err_tx_ring; + } + + uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!uc->tchan->tc_ring) { + ret = -EBUSY; + goto err_txc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); + ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->tc_ring = NULL; +err_txc_ring: + k3_ringacc_ring_free(uc->tchan->t_ring); + uc->tchan->t_ring = NULL; +err_tx_ring: + udma_put_tchan(uc); + + return ret; +} + +static void udma_free_rx_resources(struct udma_chan *uc) +{ + if (!uc->rchan) + return; + + if (uc->rflow) { + struct udma_rflow *rflow = uc->rflow; + + k3_ringacc_ring_free(rflow->fd_ring); + k3_ringacc_ring_free(rflow->r_ring); + rflow->fd_ring = NULL; + rflow->r_ring = NULL; + + udma_put_rflow(uc); + } + + udma_put_rchan(uc); +} + +static int udma_alloc_rx_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct k3_ring_cfg ring_cfg; + struct udma_rflow *rflow; + int fd_ring_id; + int ret; + + ret = udma_get_rchan(uc); + if (ret) + return ret; + + /* For MEM_TO_MEM we don't need rflow or rings */ + if (uc->config.dir == DMA_MEM_TO_MEM) + return 0; + + ret = udma_get_rflow(uc, uc->rchan->id); + if (ret) { + ret = -EBUSY; + goto err_rflow; + } + + rflow = uc->rflow; + fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; + rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0); + if (!rflow->fd_ring) { + ret = -EBUSY; + goto err_rx_ring; + } + + rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!rflow->r_ring) { + ret = -EBUSY; + goto err_rxc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + + if (uc->config.pkt_mode) + ring_cfg.size = SG_MAX_SEGMENTS; + else + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(rflow->r_ring); + rflow->r_ring = NULL; +err_rxc_ring: + k3_ringacc_ring_free(rflow->fd_ring); + rflow->fd_ring = NULL; +err_rx_ring: + udma_put_rflow(uc); +err_rflow: + udma_put_rchan(uc); + + return ret; +} + +#define TISCI_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID) + +#define TISCI_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID) + +static int udma_tisci_m2m_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + struct udma_rchan *rchan = uc->rchan; + int ret = 0; + + /* Non synchronized - mem to mem type of transfer */ + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) { + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + return ret; + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_rx.rxcq_qnum = tc_ring; + req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) + dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); + + return ret; +} + +static int udma_tisci_tx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = mode; + req_tx.tx_supr_tdpkt = uc->config.notdpkt; + req_tx.tx_fetch_size = fetch_size >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + + return ret; +} + +static int udma_tisci_rx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_rchan *rchan = uc->rchan; + int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); + int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = fetch_size >> 2; + req_rx.rxcq_qnum = rx_ring; + req_rx.rx_chan_type = mode; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) { + dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); + return ret; + } + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + + flow_req.nav_id = tisci_rm->tisci_dev_id; + flow_req.flow_index = rchan->id; + + if (uc->config.needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + if (uc->config.psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + flow_req.rx_error_handling = 1; + flow_req.rx_dest_qnum = rx_ring; + flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; + flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; + flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; + flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; + flow_req.rx_fdq0_sz0_qnum = fd_ring; + flow_req.rx_fdq1_qnum = fd_ring; + flow_req.rx_fdq2_qnum = fd_ring; + flow_req.rx_fdq3_qnum = fd_ring; + + ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); + + if (ret) + dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); + + return 0; +} + +static int udma_alloc_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + const struct udma_match_data *match_data = ud->match_data; + struct k3_ring *irq_ring; + u32 irq_udma_idx; + int ret; + + if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { + uc->use_dma_pool = true; + /* in case of MEM_TO_MEM we have maximum of two TRs */ + if (uc->config.dir == DMA_MEM_TO_MEM) { + uc->config.hdesc_size = cppi5_trdesc_calc_size( + sizeof(struct cppi5_tr_type15_t), 2); + uc->config.pkt_mode = false; + } + } + + if (uc->use_dma_pool) { + uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, + uc->config.hdesc_size, + ud->desc_align, + 0); + if (!uc->hdesc_pool) { + dev_err(ud->ddev.dev, + "Descriptor pool allocation failed\n"); + uc->use_dma_pool = false; + return -ENOMEM; + } + } + + /* + * Make sure that the completion is in a known state: + * No teardown, the channel is idle + */ + reinit_completion(&uc->teardown_completed); + complete_all(&uc->teardown_completed); + uc->state = UDMA_CHAN_IS_IDLE; + + switch (uc->config.dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, + uc->id); + + ret = udma_get_chan_pair(uc); + if (ret) + return ret; + + ret = udma_alloc_tx_resources(uc); + if (ret) + return ret; + + ret = udma_alloc_rx_resources(uc); + if (ret) { + udma_free_tx_resources(uc); + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_m2m_channel_config(uc); + break; + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, + uc->id); + + ret = udma_alloc_tx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = uc->config.remote_thread_id; + uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_tx_channel_config(uc); + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, + uc->id); + + ret = udma_alloc_rx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = uc->config.remote_thread_id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->rflow->r_ring; + irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id; + + ret = udma_tisci_rx_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + /* PSI-L pairing */ + ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); + if (ret) { + dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", + uc->config.src_thread, uc->config.dst_thread); + goto err_res_free; + } + + uc->psil_paired = true; + + uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); + if (uc->irq_num_ring <= 0) { + dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", + k3_ringacc_get_ring_id(irq_ring)); + ret = -EINVAL; + goto err_psi_free; + } + + ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, + IRQF_TRIGGER_HIGH, uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); + goto err_irq_free; + } + + /* Event from UDMA (TR events) only needed for slave TR mode channels */ + if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { + uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, + irq_udma_idx); + if (uc->irq_num_udma <= 0) { + dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", + irq_udma_idx); + free_irq(uc->irq_num_ring, uc); + ret = -EINVAL; + goto err_irq_free; + } + + ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, + uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: UDMA irq request failed\n", + uc->id); + free_irq(uc->irq_num_ring, uc); + goto err_irq_free; + } + } else { + uc->irq_num_udma = 0; + } + + udma_reset_rings(uc); + + INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, + udma_check_tx_completion); + return 0; + +err_irq_free: + uc->irq_num_ring = 0; + uc->irq_num_udma = 0; +err_psi_free: + navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); + uc->psil_paired = false; +err_res_free: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } + + return ret; +} + +static int udma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct udma_chan *uc = to_udma_chan(chan); + + memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); + + return 0; +} + +static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, + size_t tr_size, int tr_count, + enum dma_transfer_direction dir) +{ + struct udma_hwdesc *hwdesc; + struct cppi5_desc_hdr_t *tr_desc; + struct udma_desc *d; + u32 reload_count = 0; + u32 ring_id; + + switch (tr_size) { + case 16: + case 32: + case 64: + case 128: + break; + default: + dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); + return NULL; + } + + /* We have only one descriptor containing multiple TRs */ + d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = tr_count; + + d->hwdesc_count = 1; + hwdesc = &d->hwdesc[0]; + + /* Allocate memory for DMA ring descriptor */ + if (uc->use_dma_pool) { + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + } else { + hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, + tr_count); + hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, + uc->ud->desc_align); + hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, + hwdesc->cppi5_desc_size, + &hwdesc->cppi5_desc_paddr, + GFP_NOWAIT); + } + + if (!hwdesc->cppi5_desc_vaddr) { + kfree(d); + return NULL; + } + + /* Start of the TR req records */ + hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; + /* Start address of the TR response array */ + hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; + + tr_desc = hwdesc->cppi5_desc_vaddr; + + if (uc->cyclic) + reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); + cppi5_desc_set_pktids(tr_desc, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); + + return d; +} + +static struct udma_desc * +udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req = NULL; + unsigned int i; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, sglen, dir); + if (!d) + return NULL; + + d->sglen = sglen; + + tr_req = d->hwdesc[0].tr_req_base; + for_each_sg(sgl, sgent, sglen, i) { + d->residue += sg_dma_len(sgent); + + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[i].addr = sg_dma_address(sgent); + tr_req[i].icnt0 = burst * dev_width; + tr_req[i].dim1 = burst * dev_width; + tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0; + } + + cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP); + + return d; +} + +static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, + enum dma_slave_buswidth dev_width, + u16 elcnt) +{ + if (uc->config.ep_type != PSIL_EP_PDMA_XY) + return 0; + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + d->static_tr.elsize = 0; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + d->static_tr.elsize = 1; + break; + case DMA_SLAVE_BUSWIDTH_3_BYTES: + d->static_tr.elsize = 2; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + d->static_tr.elsize = 3; + break; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + d->static_tr.elsize = 4; + break; + default: /* not reached */ + return -EINVAL; + } + + d->static_tr.elcnt = elcnt; + + /* + * PDMA must to close the packet when the channel is in packet mode. + * For TR mode when the channel is not cyclic we also need PDMA to close + * the packet otherwise the transfer will stall because PDMA holds on + * the data it has received from the peripheral. + */ + if (uc->config.pkt_mode || !uc->cyclic) { + unsigned int div = dev_width * elcnt; + + if (uc->cyclic) + d->static_tr.bstcnt = d->residue / d->sglen / div; + else + d->static_tr.bstcnt = d->residue / div; + + if (uc->config.dir == DMA_DEV_TO_MEM && + d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) + return -EINVAL; + } else { + d->static_tr.bstcnt = 0; + } + + return 0; +} + +static struct udma_desc * +udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct scatterlist *sgent; + struct cppi5_host_desc_t *h_desc = NULL; + struct udma_desc *d; + u32 ring_id; + unsigned int i; + + d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = sglen; + d->hwdesc_count = sglen; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for_each_sg(sgl, sgent, sglen, i) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t sg_addr = sg_dma_address(sgent); + struct cppi5_host_desc_t *desc; + size_t sg_len = sg_dma_len(sgent); + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + d->residue += sg_len; + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + desc = hwdesc->cppi5_desc_vaddr; + + if (i == 0) { + cppi5_hdesc_init(desc, 0, 0); + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); + } else { + cppi5_hdesc_reset_hbdesc(desc); + cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); + } + + /* attach the sg buffer to the descriptor */ + cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); + + /* Attach link as host buffer descriptor */ + if (h_desc) + cppi5_hdesc_link_hbdesc(h_desc, + hwdesc->cppi5_desc_paddr); + + if (dir == DMA_MEM_TO_DEV) + h_desc = desc; + } + + if (d->residue >= SZ_4M) { + dev_err(uc->ud->dev, + "%s: Transfer size %u is over the supported 4M range\n", + __func__, d->residue); + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + cppi5_hdesc_set_pktlen(h_desc, d->residue); + + return d; +} + +static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (!data || len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + if (d->dir == DMA_MEM_TO_DEV) + memcpy(h_desc->epib, data, len); + + if (uc->config.needs_epib) + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + + d->metadata = data; + d->metadata_size = len; + if (uc->config.needs_epib) + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return ERR_PTR(-ENOTSUPP); + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + *max_len = uc->config.metadata_size; + + *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? + CPPI5_INFO0_HDESC_EPIB_SIZE : 0; + *payload_len += cppi5_hdesc_get_psdata_size(h_desc); + + return h_desc->epib; +} + +static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = payload_len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (payload_len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + if (uc->config.needs_epib) { + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + } + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static struct dma_descriptor_metadata_ops metadata_ops = { + .attach = udma_attach_metadata, + .get_ptr = udma_get_metadata_ptr, + .set_len = udma_set_metadata_len, +}; + +static struct dma_async_tx_descriptor * +udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, + context); + else + d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, + context); + + if (!d) + return NULL; + + d->dir = dir; + d->desc_idx = 0; + d->tr_idx = 0; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static struct udma_desc * +udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req; + unsigned int i; + unsigned int periods = buf_len / period_len; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, periods, dir); + if (!d) + return NULL; + + tr_req = d->hwdesc[0].tr_req_base; + for (i = 0; i < periods; i++) { + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[i].addr = buf_addr + period_len * i; + tr_req[i].icnt0 = dev_width; + tr_req[i].icnt1 = period_len / dev_width; + tr_req[i].dim1 = dev_width; + + if (!(flags & DMA_PREP_INTERRUPT)) + cppi5_tr_csf_set(&tr_req[i].flags, + CPPI5_TR_CSF_SUPR_EVT); + } + + return d; +} + +static struct udma_desc * +udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct udma_desc *d; + u32 ring_id; + int i; + int periods = buf_len / period_len; + + if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) + return NULL; + + if (period_len >= SZ_4M) + return NULL; + + d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->hwdesc_count = periods; + + /* TODO: re-check this... */ + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for (i = 0; i < periods; i++) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t period_addr = buf_addr + (period_len * i); + struct cppi5_host_desc_t *h_desc; + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + h_desc = hwdesc->cppi5_desc_vaddr; + + cppi5_hdesc_init(h_desc, 0, 0); + cppi5_hdesc_set_pktlen(h_desc, period_len); + + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&h_desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); + + /* attach each period to a new descriptor */ + cppi5_hdesc_attach_buf(h_desc, + period_addr, period_len, + period_addr, period_len); + } + + return d; +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + uc->cyclic = true; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, + dir, flags); + else + d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, + dir, flags); + + if (!d) + return NULL; + + d->sglen = buf_len / period_len; + + d->dir = dir; + d->residue = buf_len; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, flags); +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_desc *d; + struct cppi5_tr_type15_t *tr_req; + int num_tr; + size_t tr_size = sizeof(struct cppi5_tr_type15_t); + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + + if (uc->config.dir != DMA_MEM_TO_MEM) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(DMA_MEM_TO_MEM)); + return NULL; + } + + if (len < SZ_64K) { + num_tr = 1; + tr0_cnt0 = len; + tr0_cnt1 = 1; + } else { + unsigned long align_to = __ffs(src | dest); + + if (align_to > 3) + align_to = 3; + /* + * Keep simple: tr0: SZ_64K-alignment blocks, + * tr1: the remaining + */ + num_tr = 2; + tr0_cnt0 = (SZ_64K - BIT(align_to)); + if (len / tr0_cnt0 >= SZ_64K) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; + } + + tr0_cnt1 = len / tr0_cnt0; + tr1_cnt0 = len % tr0_cnt0; + } + + d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); + if (!d) + return NULL; + + d->dir = DMA_MEM_TO_MEM; + d->desc_idx = 0; + d->tr_idx = 0; + d->residue = len; + + tr_req = d->hwdesc[0].tr_req_base; + + cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[0].addr = src; + tr_req[0].icnt0 = tr0_cnt0; + tr_req[0].icnt1 = tr0_cnt1; + tr_req[0].icnt2 = 1; + tr_req[0].icnt3 = 1; + tr_req[0].dim1 = tr0_cnt0; + + tr_req[0].daddr = dest; + tr_req[0].dicnt0 = tr0_cnt0; + tr_req[0].dicnt1 = tr0_cnt1; + tr_req[0].dicnt2 = 1; + tr_req[0].dicnt3 = 1; + tr_req[0].ddim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; + tr_req[1].icnt0 = tr1_cnt0; + tr_req[1].icnt1 = 1; + tr_req[1].icnt2 = 1; + tr_req[1].icnt3 = 1; + + tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; + tr_req[1].dicnt0 = tr1_cnt0; + tr_req[1].dicnt1 = 1; + tr_req[1].dicnt2 = 1; + tr_req[1].dicnt3 = 1; + } + + cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static void udma_issue_pending(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* If we have something pending and no active descriptor, then */ + if (vchan_issue_pending(&uc->vc) && !uc->desc) { + /* + * start a descriptor if the channel is NOT [marked as + * terminating _and_ it is still running (teardown has not + * completed yet)]. + */ + if (!(uc->state == UDMA_CHAN_IS_TERMINATING && + udma_is_chan_running(uc))) + udma_start(uc); + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); +} + +static enum dma_status udma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_status ret; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + ret = dma_cookie_status(chan, cookie, txstate); + + if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) + ret = DMA_PAUSED; + + if (ret == DMA_COMPLETE || !txstate) + goto out; + + if (uc->desc && uc->desc->vd.tx.cookie == cookie) { + u32 peer_bcnt = 0; + u32 bcnt = 0; + u32 residue = uc->desc->residue; + u32 delay = 0; + + if (uc->desc->dir == DMA_MEM_TO_DEV) { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_SBCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_BCNT_REG); + + if (bcnt > peer_bcnt) + delay = bcnt - peer_bcnt; + } + } else if (uc->desc->dir == DMA_DEV_TO_MEM) { + bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_BCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_BCNT_REG); + + if (peer_bcnt > bcnt) + delay = peer_bcnt - bcnt; + } + } else { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_BCNT_REG); + } + + bcnt -= uc->bcnt; + if (bcnt && !(bcnt % uc->desc->residue)) + residue = 0; + else + residue -= bcnt % uc->desc->residue; + + if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { + ret = DMA_COMPLETE; + delay = 0; + } + + dma_set_residue(txstate, residue); + dma_set_in_flight_bytes(txstate, delay); + + } else { + ret = DMA_COMPLETE; + } + +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + return ret; +} + +static int udma_pause(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* pause the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, + UDMA_CHAN_RT_CTL_PAUSE); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_resume(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* resume the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_terminate_all(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&uc->vc.lock, flags); + + if (udma_is_chan_running(uc)) + udma_stop(uc); + + if (uc->desc) { + uc->terminated_desc = uc->desc; + uc->desc = NULL; + uc->terminated_desc->terminated = true; + cancel_delayed_work(&uc->tx_drain.work); + } + + uc->paused = false; + + vchan_get_all_descriptors(&uc->vc, &head); + spin_unlock_irqrestore(&uc->vc.lock, flags); + vchan_dma_desc_free_list(&uc->vc, &head); + + return 0; +} + +static void udma_synchronize(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long timeout = msecs_to_jiffies(1000); + + vchan_synchronize(&uc->vc); + + if (uc->state == UDMA_CHAN_IS_TERMINATING) { + timeout = wait_for_completion_timeout(&uc->teardown_completed, + timeout); + if (!timeout) { + dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", + uc->id); + udma_dump_chan_stdata(uc); + udma_reset_chan(uc, true); + } + } + + udma_reset_chan(uc, false); + if (udma_is_chan_running(uc)) + dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); + + cancel_delayed_work_sync(&uc->tx_drain.work); + udma_reset_rings(uc); +} + +static void udma_desc_pre_callback(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, + struct dmaengine_result *result) +{ + struct udma_chan *uc = to_udma_chan(&vc->chan); + struct udma_desc *d; + + if (!vd) + return; + + d = to_udma_desc(&vd->tx); + + if (d->metadata_size) + udma_fetch_epib(uc, d); + + /* Provide residue information for the client */ + if (result) { + void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); + + if (cppi5_desc_get_type(desc_vaddr) == + CPPI5_INFO0_DESC_TYPE_VAL_HOST) { + result->residue = d->residue - + cppi5_hdesc_get_pktlen(desc_vaddr); + if (result->residue) + result->result = DMA_TRANS_ABORTED; + else + result->result = DMA_TRANS_NOERROR; + } else { + result->residue = 0; + result->result = DMA_TRANS_NOERROR; + } + } +} + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void udma_vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd, *_vd; + struct dmaengine_desc_callback cb; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + dmaengine_desc_get_callback(&vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); + } + spin_unlock_irq(&vc->lock); + + udma_desc_pre_callback(vc, vd, NULL); + dmaengine_desc_callback_invoke(&cb, NULL); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct dmaengine_result result; + + dmaengine_desc_get_callback(&vd->tx, &cb); + + list_del(&vd->node); + + udma_desc_pre_callback(vc, vd, &result); + dmaengine_desc_callback_invoke(&cb, &result); + + vchan_vdesc_fini(vd); + } +} + +static void udma_free_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + + udma_terminate_all(chan); + if (uc->terminated_desc) { + udma_reset_chan(uc, false); + udma_reset_rings(uc); + } + + cancel_delayed_work_sync(&uc->tx_drain.work); + destroy_delayed_work_on_stack(&uc->tx_drain.work); + + if (uc->irq_num_ring > 0) { + free_irq(uc->irq_num_ring, uc); + + uc->irq_num_ring = 0; + } + if (uc->irq_num_udma > 0) { + free_irq(uc->irq_num_udma, uc); + + uc->irq_num_udma = 0; + } + + /* Release PSI-L pairing */ + if (uc->psil_paired) { + navss_psil_unpair(ud, uc->config.src_thread, + uc->config.dst_thread); + uc->psil_paired = false; + } + + vchan_free_chan_resources(&uc->vc); + tasklet_kill(&uc->vc.task); + + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } +} + +static struct platform_driver udma_driver; + +static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) +{ + struct udma_chan_config *ucc; + struct psil_endpoint_config *ep_config; + struct udma_chan *uc; + struct udma_dev *ud; + u32 *args; + + if (chan->device->dev->driver != &udma_driver.driver) + return false; + + uc = to_udma_chan(chan); + ucc = &uc->config; + ud = uc->ud; + args = param; + + ucc->remote_thread_id = args[0]; + + if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) + ucc->dir = DMA_MEM_TO_DEV; + else + ucc->dir = DMA_DEV_TO_MEM; + + ep_config = psil_get_ep_config(ucc->remote_thread_id); + if (IS_ERR(ep_config)) { + dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", + ucc->remote_thread_id); + ucc->dir = DMA_MEM_TO_MEM; + ucc->remote_thread_id = -1; + return false; + } + + ucc->pkt_mode = ep_config->pkt_mode; + ucc->channel_tpl = ep_config->channel_tpl; + ucc->notdpkt = ep_config->notdpkt; + ucc->ep_type = ep_config->ep_type; + + if (ucc->ep_type != PSIL_EP_NATIVE) { + const struct udma_match_data *match_data = ud->match_data; + + if (match_data->flags & UDMA_FLAG_PDMA_ACC32) + ucc->enable_acc32 = ep_config->pdma_acc32; + if (match_data->flags & UDMA_FLAG_PDMA_BURST) + ucc->enable_burst = ep_config->pdma_burst; + } + + ucc->needs_epib = ep_config->needs_epib; + ucc->psd_size = ep_config->psd_size; + ucc->metadata_size = + (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + + ucc->psd_size; + + if (ucc->pkt_mode) + ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + + ucc->metadata_size, ud->desc_align); + + dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, + ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); + + return true; +} + +static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct udma_dev *ud = ofdma->of_dma_data; + dma_cap_mask_t mask = ud->ddev.cap_mask; + struct dma_chan *chan; + + if (dma_spec->args_count != 1) + return NULL; + + chan = __dma_request_channel(&mask, udma_dma_filter_fn, + &dma_spec->args[0], ofdma->of_node); + if (!chan) { + dev_err(ud->dev, "get channel fail in %s.\n", __func__); + return ERR_PTR(-EINVAL); + } + + return chan; +} + +static struct udma_match_data am654_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 8, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data am654_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = true, /* TEST: DMA domains */ + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 3, + .level_start_idx = { + [0] = 16, /* Normal channels */ + [1] = 4, /* High Throughput channels */ + [2] = 0, /* Ultra High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static const struct of_device_id udma_of_match[] = { + { + .compatible = "ti,am654-navss-main-udmap", + .data = &am654_main_data, + }, + { + .compatible = "ti,am654-navss-mcu-udmap", + .data = &am654_mcu_data, + }, { + .compatible = "ti,j721e-navss-main-udmap", + .data = &j721e_main_data, + }, { + .compatible = "ti,j721e-navss-mcu-udmap", + .data = &j721e_mcu_data, + }, + { /* Sentinel */ }, +}; + +static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) +{ + struct resource *res; + int i; + + for (i = 0; i < MMR_LAST; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mmr_names[i]); + ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ud->mmrs[i])) + return PTR_ERR(ud->mmrs[i]); + } + + return 0; +} + +static int udma_setup_resources(struct udma_dev *ud) +{ + struct device *dev = ud->dev; + int ch_count, ret, i, j; + u32 cap2, cap3; + struct ti_sci_resource_desc *rm_desc; + struct ti_sci_resource *rm_res, irq_res; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + static const char * const range_names[] = { "ti,sci-rm-range-tchan", + "ti,sci-rm-range-rchan", + "ti,sci-rm-range-rflow" }; + + cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); + cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); + + ud->rflow_cnt = cap3 & 0x3fff; + ud->tchan_cnt = cap2 & 0x1ff; + ud->echan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + ch_count = ud->tchan_cnt + ud->rchan_cnt; + + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_gp_map_allocated = devm_kcalloc(dev, + BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + + if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || + !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || + !ud->rflows || !ud->rflow_in_use) + return -ENOMEM; + + /* + * RX flows with the same Ids as RX channels are reserved to be used + * as default flows if remote HW can't generate flow_ids. Those + * RX flows can be requested only explicitly by id. + */ + bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); + + /* by default no GP rflows are assigned to Linux */ + bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + irq_res.sets = rm_res->sets; + + /* rchan and matching default flow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + irq_res.sets += rm_res->sets; + irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + for (i = 0; i < rm_res->sets; i++) { + irq_res.desc[i].start = rm_res->desc[i].start; + irq_res.desc[i].num = rm_res->desc[i].num; + } + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + for (j = 0; j < rm_res->sets; j++, i++) { + irq_res.desc[i].start = rm_res->desc[j].start + + ud->match_data->rchan_oes_offset; + irq_res.desc[i].num = rm_res->desc[j].num; + } + ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); + kfree(irq_res.desc); + if (ret) { + dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); + return ret; + } + + /* GP rflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; + if (IS_ERR(rm_res)) { + /* all gp flows are assigned exclusively to Linux */ + bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, + ud->rflow_cnt - ud->rchan_cnt); + } else { + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rflow_gp_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); + ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); + if (!ch_count) + return -ENODEV; + + ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), + GFP_KERNEL); + if (!ud->channels) + return -ENOMEM; + + dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt), + ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, + ud->rflow_cnt)); + + return ch_count; +} + +#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +static int udma_probe(struct platform_device *pdev) +{ + struct device_node *navss_node = pdev->dev.parent->of_node; + struct device *dev = &pdev->dev; + struct udma_dev *ud; + const struct of_device_id *match; + int i, ret; + int ch_count; + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) + dev_err(dev, "failed to set dma mask stuff\n"); + + ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); + if (!ud) + return -ENOMEM; + + ret = udma_get_mmrs(pdev, ud); + if (ret) + return ret; + + ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); + if (IS_ERR(ud->tisci_rm.tisci)) + return PTR_ERR(ud->tisci_rm.tisci); + + ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); + return ret; + } + pdev->id = ud->tisci_rm.tisci_dev_id; + + ret = of_property_read_u32(navss_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_navss_dev_id); + if (ret) { + dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); + return ret; + } + + ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; + ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; + + ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); + if (IS_ERR(ud->ringacc)) + return PTR_ERR(ud->ringacc); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + match = of_match_node(udma_of_match, dev->of_node); + if (!match) { + dev_err(dev, "No compatible match found\n"); + return -ENODEV; + } + ud->match_data = match->data; + + dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); + + ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources; + ud->ddev.device_config = udma_slave_config; + ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; + ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; + ud->ddev.device_issue_pending = udma_issue_pending; + ud->ddev.device_tx_status = udma_tx_status; + ud->ddev.device_pause = udma_pause; + ud->ddev.device_resume = udma_resume; + ud->ddev.device_terminate_all = udma_terminate_all; + ud->ddev.device_synchronize = udma_synchronize; + + ud->ddev.device_free_chan_resources = udma_free_chan_resources; + ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES; + ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | + DESC_METADATA_ENGINE; + if (ud->match_data->enable_memcpy_support) { + dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); + ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; + ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); + } + + ud->ddev.dev = dev; + ud->dev = dev; + ud->psil_base = ud->match_data->psil_base; + + INIT_LIST_HEAD(&ud->ddev.channels); + INIT_LIST_HEAD(&ud->desc_to_purge); + + ch_count = udma_setup_resources(ud); + if (ch_count <= 0) + return ch_count; + + spin_lock_init(&ud->lock); + INIT_WORK(&ud->purge_work, udma_purge_desc_work); + + ud->desc_align = 64; + if (ud->desc_align < dma_get_cache_alignment()) + ud->desc_align = dma_get_cache_alignment(); + + for (i = 0; i < ud->tchan_cnt; i++) { + struct udma_tchan *tchan = &ud->tchans[i]; + + tchan->id = i; + tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rchan_cnt; i++) { + struct udma_rchan *rchan = &ud->rchans[i]; + + rchan->id = i; + rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rflow_cnt; i++) { + struct udma_rflow *rflow = &ud->rflows[i]; + + rflow->id = i; + } + + for (i = 0; i < ch_count; i++) { + struct udma_chan *uc = &ud->channels[i]; + + uc->ud = ud; + uc->vc.desc_free = udma_desc_free; + uc->id = i; + uc->tchan = NULL; + uc->rchan = NULL; + uc->config.remote_thread_id = -1; + uc->config.dir = DMA_MEM_TO_MEM; + uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", + dev_name(dev), i); + + vchan_init(&uc->vc, &ud->ddev); + /* Use custom vchan completion handling */ + tasklet_init(&uc->vc.task, udma_vchan_complete, + (unsigned long)&uc->vc); + init_completion(&uc->teardown_completed); + } + + ret = dma_async_device_register(&ud->ddev); + if (ret) { + dev_err(dev, "failed to register slave DMA engine: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, ud); + + ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); + if (ret) { + dev_err(dev, "failed to register of_dma controller\n"); + dma_async_device_unregister(&ud->ddev); + } + + return ret; +} + +static struct platform_driver udma_driver = { + .driver = { + .name = "ti-udma", + .of_match_table = udma_of_match, + .suppress_bind_attrs = true, + }, + .probe = udma_probe, +}; +builtin_platform_driver(udma_driver); + +/* Private interfaces to UDMA */ +#include "k3-udma-private.c" diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h new file mode 100644 index 000000000000..128d8744a435 --- /dev/null +++ b/drivers/dma/ti/k3-udma.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_UDMA_H_ +#define K3_UDMA_H_ + +#include <linux/soc/ti/ti_sci_protocol.h> + +/* Global registers */ +#define UDMA_REV_REG 0x0 +#define UDMA_PERF_CTL_REG 0x4 +#define UDMA_EMU_CTL_REG 0x8 +#define UDMA_PSIL_TO_REG 0x10 +#define UDMA_UTC_CTL_REG 0x1c +#define UDMA_CAP_REG(i) (0x20 + ((i) * 4)) +#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80 +#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88 + +/* TX chan RT regs */ +#define UDMA_TCHAN_RT_CTL_REG 0x0 +#define UDMA_TCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_TCHAN_RT_STDATA_REG 0x80 + +#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_TCHAN_RT_PEER_BCNT_REG \ + UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_TCHAN_RT_PEER_RT_EN_REG \ + UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_TCHAN_RT_PCNT_REG 0x400 +#define UDMA_TCHAN_RT_BCNT_REG 0x408 +#define UDMA_TCHAN_RT_SBCNT_REG 0x410 + +/* RX chan RT regs */ +#define UDMA_RCHAN_RT_CTL_REG 0x0 +#define UDMA_RCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_RCHAN_RT_STDATA_REG 0x80 + +#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_RCHAN_RT_PEER_BCNT_REG \ + UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_RCHAN_RT_PEER_RT_EN_REG \ + UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_RCHAN_RT_PCNT_REG 0x400 +#define UDMA_RCHAN_RT_BCNT_REG 0x408 +#define UDMA_RCHAN_RT_SBCNT_REG 0x410 + +/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */ +#define UDMA_CHAN_RT_CTL_EN BIT(31) +#define UDMA_CHAN_RT_CTL_TDOWN BIT(30) +#define UDMA_CHAN_RT_CTL_PAUSE BIT(29) +#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28) +#define UDMA_CHAN_RT_CTL_ERROR BIT(0) + +/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */ +#define UDMA_PEER_RT_EN_ENABLE BIT(31) +#define UDMA_PEER_RT_EN_TEARDOWN BIT(30) +#define UDMA_PEER_RT_EN_PAUSE BIT(29) +#define UDMA_PEER_RT_EN_FLUSH BIT(28) +#define UDMA_PEER_RT_EN_IDLE BIT(1) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG + */ +#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24) +#define PDMA_STATIC_TR_X_SHIFT (24) +#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Y_SHIFT (0) + +#define PDMA_STATIC_TR_Y(x) \ + (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK) +#define PDMA_STATIC_TR_X(x) \ + (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK) + +#define PDMA_STATIC_TR_XY_ACC32 BIT(30) +#define PDMA_STATIC_TR_XY_BURST BIT(31) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG + */ +#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask)) + +struct udma_dev; +struct udma_tchan; +struct udma_rchan; +struct udma_rflow; + +enum udma_rm_range { + RM_RANGE_TCHAN = 0, + RM_RANGE_RCHAN, + RM_RANGE_RFLOW, + RM_RANGE_LAST, +}; + +struct udma_tisci_rm { + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_udmap_ops *tisci_udmap_ops; + u32 tisci_dev_id; + + /* tisci information for PSI-L thread pairing/unpairing */ + const struct ti_sci_rm_psil_ops *tisci_psil_ops; + u32 tisci_navss_dev_id; + + struct ti_sci_resource *rm_ranges[RM_RANGE_LAST]; +}; + +/* Direct access to UDMA low lever resources for the glue layer */ +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread); +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property); +void xudma_dev_put(struct udma_dev *ud); +u32 xudma_dev_get_psil_base(struct udma_dev *ud); +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt); +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt); + +struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id); +struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id); +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id); + +void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p); +void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p); +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p); + +int xudma_tchan_get_id(struct udma_tchan *p); +int xudma_rchan_get_id(struct udma_rchan *p); +int xudma_rflow_get_id(struct udma_rflow *p); + +u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg); +void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val); +u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg); +void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val); +bool xudma_rflow_is_gp(struct udma_dev *ud, int id); + +#endif /* K3_UDMA_H_ */ diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 256fc662c500..23e33a85f033 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -114,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) struct virt_dma_desc *vd, *_vd; list_for_each_entry_safe(vd, _vd, head, node) { - if (dmaengine_desc_test_reuse(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + vchan_vdesc_fini(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -134,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); + INIT_LIST_HEAD(&vc->desc_terminated); tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index ab158bac03a7..e9f5250fbe4d 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -31,9 +31,9 @@ struct virt_dma_chan { struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; + struct list_head desc_terminated; struct virt_dma_desc *cyclic; - struct virt_dma_desc *vd_terminated; }; static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) @@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - if (dmaengine_desc_test_reuse(&vd->tx)) + if (dmaengine_desc_test_reuse(&vd->tx)) { + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); list_add(&vd->node, &vc->desc_allocated); - else + spin_unlock_irqrestore(&vc->lock, flags); + } else { vc->desc_free(vd); + } } /** @@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - /* free up stuck descriptor */ - if (vc->vd_terminated) - vchan_vdesc_fini(vc->vd_terminated); + list_add_tail(&vd->node, &vc->desc_terminated); - vc->vd_terminated = vd; if (vc->cyclic == vd) vc->cyclic = NULL; } @@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); + list_splice_tail_init(&vc->desc_terminated, head); } static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) @@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) */ static inline void vchan_synchronize(struct virt_dma_chan *vc) { + LIST_HEAD(head); unsigned long flags; tasklet_kill(&vc->task); spin_lock_irqsave(&vc->lock, flags); - if (vc->vd_terminated) { - vchan_vdesc_fini(vc->vd_terminated); - vc->vd_terminated = NULL; - } + + list_splice_tail_init(&vc->desc_terminated, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); } #endif diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9c845c07b107..d47749a35863 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -123,10 +123,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F @@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } @@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan); - chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); return 0; } @@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 5c8272329a65..b3c99bb5fe77 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -491,8 +491,7 @@ config EDAC_TI tristate "Texas Instruments DDR3 ECC Controller" depends on ARCH_KEYSTONE || SOC_DRA7XX help - Support for error detection and correction on the - TI SoCs. + Support for error detection and correction on the TI SoCs. config EDAC_QCOM tristate "QCOM EDAC Controller" diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 428ce98f6776..9fbad908a854 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate) scrubval = scrubrates[i].scrubval; - if (pvt->fam == 0x17 || pvt->fam == 0x18) { + if (pvt->umc) { __f17h_set_scrubval(pvt, scrubval); } else if (pvt->fam == 0x15 && pvt->model == 0x60) { f15h_select_dct(pvt, 0); @@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci) int i, retval = -EINVAL; u32 scrubval = 0; - switch (pvt->fam) { - case 0x15: - /* Erratum #505 */ - if (pvt->model < 0x10) - f15h_select_dct(pvt, 0); - - if (pvt->model == 0x60) - amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); - break; - - case 0x17: - case 0x18: + if (pvt->umc) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); if (scrubval & BIT(0)) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); @@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci) } else { scrubval = 0; } - break; + } else if (pvt->fam == 0x15) { + /* Erratum #505 */ + if (pvt->model < 0x10) + f15h_select_dct(pvt, 0); - default: + if (pvt->model == 0x60) + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); - break; } scrubval = scrubval & 0x001F; @@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; + if (pvt->umc) { + if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) + pvt->dram_type = MEM_LRDDR4; + else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) + pvt->dram_type = MEM_RDDR4; + else + pvt->dram_type = MEM_DDR4; + return; + } + switch (pvt->fam) { case 0xf: if (pvt->ext_model >= K8_REV_F) @@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt) case 0x16: goto ddr3; - case 0x17: - case 0x18: - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - default: WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); pvt->dram_type = MEM_EMPTY; @@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F19_CPUS] = { + .ctl_name = "F19h", + .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6, + .max_mcs = 8, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, }; /* @@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) family_types[F17_CPUS].ctl_name = "F18h"; break; + case 0x19: + fam_type = &family_types[F19_CPUS]; + pvt->ops = &family_types[F19_CPUS].ops; + family_types[F19_CPUS].ctl_name = "F19h"; + break; + default: amd64_err("Unsupported family!\n"); return NULL; @@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid) struct mem_ctl_info *mci; struct amd64_pvt *pvt; - mci = find_mci_by_dev(&F3->dev); - WARN_ON(!mci); - /* Remove from EDAC CORE tracking list */ mci = edac_mc_del_mc(&F3->dev); if (!mci) @@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = { { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { } }; MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 9be31688110b..abbf3c274d74 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -122,6 +122,8 @@ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 +#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 +#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 /* * Function 1 - Address Map @@ -292,6 +294,7 @@ enum amd_families { F17_M10H_CPUS, F17_M30H_CPUS, F17_M70H_CPUS, + F19_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c index 09a9e3de9595..b194658b8b5c 100644 --- a/drivers/edac/aspeed_edac.c +++ b/drivers/edac/aspeed_edac.c @@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (!np) { dev_err(mci->pdev, "dt: missing /memory node\n"); return -ENODEV; - }; + } rc = of_address_to_resource(np, 0, &r); @@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (rc) { dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n"); return rc; - }; + } dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n", r.start, resource_size(&r), PAGE_SHIFT); diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c index f564a4a8a4ae..5c1eea96230c 100644 --- a/drivers/edac/i3000_edac.c +++ b/drivers/edac/i3000_edac.c @@ -324,7 +324,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); mchbar &= I3000_MCHBAR_MASK; - window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE); + window = ioremap(mchbar, I3000_MMR_WINDOW_SIZE); if (!window) { printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n", mchbar); diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index 432b375a4075..a8988db6d423 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c @@ -280,7 +280,7 @@ static void __iomem *i3200_map_mchbar(struct pci_dev *pdev) return NULL; } - window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); + window = ioremap(u.mchbar, I3200_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 0ddc41e47a96..191aa7c19ded 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a) return a & ((1 << 16) - 1); } -static inline u32 i5100_redmemb_ecc_locator(u32 a) -{ - return a & ((1 << 18) - 1); -} - static inline u32 i5100_recmema_merr(u32 a) { return i5100_nrecmema_merr(a); @@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, u32 dw; u32 dw2; unsigned syndrome = 0; - unsigned ecc_loc = 0; unsigned merr; unsigned bank; unsigned rank; @@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); syndrome = dw2; pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); - ecc_loc = i5100_redmemb_ecc_locator(dw2); } if (i5100_validlog_recmemvalid(dw)) { diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 7c6a2d4d2360..6be99e0d850d 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c @@ -485,7 +485,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) goto fail0; } mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ - mch_window = ioremap_nocache(mchbar, 0x1000); + mch_window = ioremap(mchbar, 0x1000); if (!mch_window) { edac_dbg(3, "error ioremapping MCHBAR!\n"); goto fail0; diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 4f65073f230b..d68346a8e141 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -357,7 +357,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev) return NULL; } - window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE); + window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE); if (!window) ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ea622c6f3a39..ea980c556f2e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -6,7 +6,7 @@ #include "mce_amd.h" -static struct amd_decoder_ops *fam_ops; +static struct amd_decoder_ops fam_ops; static u8 xec_mask = 0xf; @@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = { "L2 Fill Data error", }; +static const char * const smca_ls2_mce_desc[] = { + "An ECC error was detected on a data cache read by a probe or victimization", + "An ECC error or L2 poison was detected on a data cache read by a load", + "An ECC error was detected on a data cache read-modify-write by a store", + "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization", + "An ECC error or poison bit mismatch was detected on a tag read by a load", + "An ECC error or poison bit mismatch was detected on a tag read by a store", + "An ECC error was detected on an EMEM read by a load", + "An ECC error was detected on an EMEM read-modify-write by a store", + "A parity error was detected in an L1 TLB entry by any access", + "A parity error was detected in an L2 TLB entry by any access", + "A parity error was detected in a PWC entry by any access", + "A parity error was detected in an STQ entry by any access", + "A parity error was detected in an LDQ entry by any access", + "A parity error was detected in a MAB entry by any access", + "A parity error was detected in an SCB entry state field by any access", + "A parity error was detected in an SCB entry address field by any access", + "A parity error was detected in an SCB entry data field by any access", + "A parity error was detected in a WCB entry by any access", + "A poisoned line was detected in an SCB entry by any access", + "A SystemReadDataError error was reported on read data returned from L2 for a load", + "A SystemReadDataError error was reported on read data returned from L2 for an SCB store", + "A SystemReadDataError error was reported on read data returned from L2 for a WCB store", + "A hardware assertion error was reported", + "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access", +}; + static const char * const smca_if_mce_desc[] = { "Op Cache Microtag Probe Port Parity Error", "IC Microtag or Full Tag Multi-hit Error", @@ -378,6 +405,7 @@ struct smca_mce_desc { static struct smca_mce_desc smca_mce_descs[] = { [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) }, + [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) }, [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) }, [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) }, [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) }, @@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m) : (xec ? "multimatch" : "parity"))); return; } - } else if (fam_ops->mc0_mce(ec, xec)) + } else if (fam_ops.mc0_mce(ec, xec)) ; else pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n"); @@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m) pr_cont("Hardware Assert.\n"); else goto wrong_mc1_mce; - } else if (fam_ops->mc1_mce(ec, xec)) + } else if (fam_ops.mc1_mce(ec, xec)) ; else goto wrong_mc1_mce; @@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m) pr_emerg(HW_ERR "MC2 Error: "); - if (!fam_ops->mc2_mce(ec, xec)) + if (!fam_ops.mc2_mce(ec, xec)) pr_cont(HW_ERR "Corrupted MC2 MCE info?\n"); } @@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) if (m->tsc) pr_emerg(HW_ERR "TSC: %llu\n", m->tsc); - if (!fam_ops) + /* Doesn't matter which member to test. */ + if (!fam_ops.mc0_mce) goto err_code; switch (m->bank) { @@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void) c->x86_vendor != X86_VENDOR_HYGON) return -ENODEV; - fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); - if (!fam_ops) - return -ENOMEM; + if (boot_cpu_has(X86_FEATURE_SMCA)) { + xec_mask = 0x3f; + goto out; + } switch (c->x86) { case 0xf: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x10: - fam_ops->mc0_mce = f10h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f10h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x11: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x12: - fam_ops->mc0_mce = f12h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f12h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x14: - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x15: xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f; - fam_ops->mc0_mce = f15h_mc0_mce; - fam_ops->mc1_mce = f15h_mc1_mce; - fam_ops->mc2_mce = f15h_mc2_mce; + fam_ops.mc0_mce = f15h_mc0_mce; + fam_ops.mc1_mce = f15h_mc1_mce; + fam_ops.mc2_mce = f15h_mc2_mce; break; case 0x16: xec_mask = 0x1f; - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = f16h_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = f16h_mc2_mce; break; case 0x17: case 0x18: - xec_mask = 0x3f; - if (!boot_cpu_has(X86_FEATURE_SMCA)) { - printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n"); - goto err_out; - } - break; + pr_warn("Decoding supported only on Scalable MCA processors.\n"); + return -EINVAL; default: printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86); - goto err_out; + return -EINVAL; } +out: pr_info("MCE: In-kernel MCE decoding enabled.\n"); mce_register_decode_chain(&amd_mce_dec_nb); return 0; - -err_out: - kfree(fam_ops); - fam_ops = NULL; - return -EINVAL; } early_initcall(mce_amd_init); @@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init); static void __exit mce_amd_exit(void) { mce_unregister_decode_chain(&amd_mce_dec_nb); - kfree(fam_ops); } MODULE_DESCRIPTION("AMD MCE decoder"); diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index c0cc72a3b2be..3a3dcb14ed99 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev) p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc", 1, 1, NULL, 0, edac_device_alloc_index()); - if (IS_ERR(p->dci)) - return PTR_ERR(p->dci); + if (!p->dci) + return -ENOMEM; p->dci->dev = &pdev->dev; p->dci->mod_name = "Sifive ECC Manager"; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 95662a4ff4c4..99bbaf629b8d 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL); if (!pdev) { - skx_printk(KERN_ERR, "Can't get tolm/tohm\n"); + edac_dbg(2, "Can't get tolm/tohm\n"); return -ENODEV; } diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index cc779f3f9e2d..a65e2f78a402 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c @@ -266,7 +266,7 @@ static void __iomem *x38_map_mchbar(struct pci_dev *pdev) return NULL; } - window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE); + window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 0cc746673677..6ca2f5ab6c57 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -551,7 +551,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) INIT_LIST_HEAD(&lynx->client_list); kref_init(&lynx->kref); - lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), + lynx->registers = ioremap(pci_resource_start(dev, 0), PCILYNX_MAX_REGISTER); if (lynx->registers == NULL) { dev_err(&dev->dev, "Failed to map registers\n"); diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c index da04fdae62a1..835ece9c00f1 100644 --- a/drivers/firmware/broadcom/bcm47xx_nvram.c +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c @@ -120,7 +120,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) void __iomem *iobase; int err; - iobase = ioremap_nocache(base, lim); + iobase = ioremap(base, lim); if (!iobase) return -ENOMEM; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index bcc378c19ebe..ecc83e2f032c 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -215,6 +215,28 @@ config EFI_RCI2_TABLE Say Y here for Dell EMC PowerEdge systems. +config EFI_DISABLE_PCI_DMA + bool "Clear Busmaster bit on PCI bridges during ExitBootServices()" + help + Disable the busmaster bit in the control register on all PCI bridges + while calling ExitBootServices() and passing control to the runtime + kernel. System firmware may configure the IOMMU to prevent malicious + PCI devices from being able to attack the OS via DMA. However, since + firmware can't guarantee that the OS is IOMMU-aware, it will tear + down IOMMU configuration when ExitBootServices() is called. This + leaves a window between where a hostile device could still cause + damage before Linux configures the IOMMU again. + + If you say Y here, the EFI stub will clear the busmaster bit on all + PCI bridges before ExitBootServices() is called. This will prevent + any malicious PCI devices from being able to perform DMA until the + kernel reenables busmastering after configuring the IOMMU. + + This option will cause failures with some poorly behaved hardware + and should not be enabled without testing. The kernel commandline + options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma" + may be used to override this option. + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 904fa09e6a6b..d99f5b0c8a09 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -10,10 +10,12 @@ #define pr_fmt(fmt) "efi: " fmt #include <linux/efi.h> +#include <linux/fwnode.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mm_types.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/platform_device.h> #include <linux/screen_info.h> @@ -276,15 +278,112 @@ void __init efi_init(void) efi_memmap_unmap(); } +static bool efifb_overlaps_pci_range(const struct of_pci_range *range) +{ + u64 fb_base = screen_info.lfb_base; + + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; + + return fb_base >= range->cpu_addr && + fb_base < (range->cpu_addr + range->size); +} + +static struct device_node *find_pci_overlap_node(void) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + struct of_pci_range_parser parser; + struct of_pci_range range; + int err; + + err = of_pci_range_parser_init(&parser, np); + if (err) { + pr_warn("of_pci_range_parser_init() failed: %d\n", err); + continue; + } + + for_each_of_pci_range(&parser, &range) + if (efifb_overlaps_pci_range(&range)) + return np; + } + return NULL; +} + +/* + * If the efifb framebuffer is backed by a PCI graphics controller, we have + * to ensure that this relation is expressed using a device link when + * running in DT mode, or the probe order may be reversed, resulting in a + * resource reservation conflict on the memory window that the efifb + * framebuffer steals from the PCIe host bridge. + */ +static int efifb_add_links(const struct fwnode_handle *fwnode, + struct device *dev) +{ + struct device_node *sup_np; + struct device *sup_dev; + + sup_np = find_pci_overlap_node(); + + /* + * If there's no PCI graphics controller backing the efifb, we are + * done here. + */ + if (!sup_np) + return 0; + + sup_dev = get_dev_from_fwnode(&sup_np->fwnode); + of_node_put(sup_np); + + /* + * Return -ENODEV if the PCI graphics controller device hasn't been + * registered yet. This ensures that efifb isn't allowed to probe + * and this function is retried again when new devices are + * registered. + */ + if (!sup_dev) + return -ENODEV; + + /* + * If this fails, retrying this function at a later point won't + * change anything. So, don't return an error after this. + */ + if (!device_link_add(dev, sup_dev, 0)) + dev_warn(dev, "device_link_add() failed\n"); + + put_device(sup_dev); + + return 0; +} + +static const struct fwnode_operations efifb_fwnode_ops = { + .add_links = efifb_add_links, +}; + +static struct fwnode_handle efifb_fwnode = { + .ops = &efifb_fwnode_ops, +}; + static int __init register_gop_device(void) { - void *pd; + struct platform_device *pd; + int err; if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) return 0; - pd = platform_device_register_data(NULL, "efi-framebuffer", 0, - &screen_info, sizeof(screen_info)); - return PTR_ERR_OR_ZERO(pd); + pd = platform_device_alloc("efi-framebuffer", 0); + if (!pd) + return -ENOMEM; + + if (IS_ENABLED(CONFIG_PCI)) + pd->dev.fwnode = &efifb_fwnode; + + err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); + if (err) + return err; + + return platform_device_add(pd); } subsys_initcall(register_gop_device); diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index b1395133389e..d3067cbd5114 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/highmem.h> +#include <linux/io.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/efi.h> diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2b02cb165f16..621220ab3d0e 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr) * * Search in the EFI memory map for the region covering @phys_addr. * Returns the EFI memory type if the region was found in the memory - * map, EFI_RESERVED_TYPE (zero) otherwise. + * map, -EINVAL otherwise. */ int efi_mem_type(unsigned long phys_addr) { diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c index bb9fc70d0cfa..6e0f34a38171 100644 --- a/drivers/firmware/efi/fake_mem.c +++ b/drivers/firmware/efi/fake_mem.c @@ -34,46 +34,45 @@ static int __init cmp_fake_mem(const void *x1, const void *x2) return 0; } -void __init efi_fake_memmap(void) +static void __init efi_fake_range(struct efi_mem_range *efi_range) { + struct efi_memory_map_data data = { 0 }; int new_nr_map = efi.memmap.nr_map; efi_memory_desc_t *md; - phys_addr_t new_memmap_phy; void *new_memmap; - int i; - - if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) - return; /* count up the number of EFI memory descriptor */ - for (i = 0; i < nr_fake_mem; i++) { - for_each_efi_memory_desc(md) { - struct range *r = &efi_fake_mems[i].range; - - new_nr_map += efi_memmap_split_count(md, r); - } - } + for_each_efi_memory_desc(md) + new_nr_map += efi_memmap_split_count(md, &efi_range->range); /* allocate memory for new EFI memmap */ - new_memmap_phy = efi_memmap_alloc(new_nr_map); - if (!new_memmap_phy) + if (efi_memmap_alloc(new_nr_map, &data) != 0) return; /* create new EFI memmap */ - new_memmap = early_memremap(new_memmap_phy, - efi.memmap.desc_size * new_nr_map); + new_memmap = early_memremap(data.phys_map, data.size); if (!new_memmap) { - memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map); + __efi_memmap_free(data.phys_map, data.size, data.flags); return; } - for (i = 0; i < nr_fake_mem; i++) - efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]); + efi_memmap_insert(&efi.memmap, new_memmap, efi_range); /* swap into new EFI memmap */ - early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map); + early_memunmap(new_memmap, data.size); + + efi_memmap_install(&data); +} + +void __init efi_fake_memmap(void) +{ + int i; - efi_memmap_install(new_memmap_phy, new_nr_map); + if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) + return; + + for (i = 0; i < nr_fake_mem; i++) + efi_fake_range(&efi_fake_mems[i]); /* print new EFI memmap */ efi_print_memmap(); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index c35f893897e1..98a81576213d 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ - random.o + random.o pci.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 817237ce2420..7bbef4a67350 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -37,16 +37,14 @@ static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; -void efi_char16_printk(efi_system_table_t *sys_table_arg, - efi_char16_t *str) -{ - struct efi_simple_text_output_protocol *out; +static efi_system_table_t *__efistub_global sys_table; - out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out; - out->output_string(out, str); +__pure efi_system_table_t *efi_system_table(void) +{ + return sys_table; } -static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) +static struct screen_info *setup_graphics(void) { efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; efi_status_t status; @@ -55,27 +53,27 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) struct screen_info *si = NULL; size = 0; - status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &gop_proto, NULL, &size, gop_handle); + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + &gop_proto, NULL, &size, gop_handle); if (status == EFI_BUFFER_TOO_SMALL) { - si = alloc_screen_info(sys_table_arg); + si = alloc_screen_info(); if (!si) return NULL; - efi_setup_gop(sys_table_arg, si, &gop_proto, size); + efi_setup_gop(si, &gop_proto, size); } return si; } -void install_memreserve_table(efi_system_table_t *sys_table_arg) +void install_memreserve_table(void) { struct linux_efi_memreserve *rsv; efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; efi_status_t status; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), - (void **)&rsv); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), + (void **)&rsv); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); + pr_efi_err("Failed to allocate memreserve entry!\n"); return; } @@ -83,11 +81,10 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) rsv->size = 0; atomic_set(&rsv->count, 0); - status = efi_call_early(install_configuration_table, - &memreserve_table_guid, - rsv); + status = efi_bs_call(install_configuration_table, + &memreserve_table_guid, rsv); if (status != EFI_SUCCESS) - pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); + pr_efi_err("Failed to install memreserve config table!\n"); } @@ -97,8 +94,7 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) * must be reserved. On failure it is required to free all * all allocations it has made. */ -efi_status_t handle_kernel_image(efi_system_table_t *sys_table, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -110,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * for both archictectures, with the arch-specific code provided in the * handle_kernel_image() function. */ -unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, +unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, unsigned long *image_addr) { efi_loaded_image_t *image; @@ -131,11 +127,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, enum efi_secureboot_mode secure_boot; struct screen_info *si; + sys_table = sys_table_arg; + /* Check if we were booted by the EFI firmware */ if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) goto fail; - status = check_platform_features(sys_table); + status = check_platform_features(); if (status != EFI_SUCCESS) goto fail; @@ -147,13 +145,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, status = sys_table->boottime->handle_protocol(handle, &loaded_image_proto, (void *)&image); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to get loaded image protocol\n"); + pr_efi_err("Failed to get loaded image protocol\n"); goto fail; } - dram_base = get_dram_base(sys_table); + dram_base = get_dram_base(); if (dram_base == EFI_ERROR) { - pr_efi_err(sys_table, "Failed to find DRAM base\n"); + pr_efi_err("Failed to find DRAM base\n"); goto fail; } @@ -162,9 +160,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, * protocol. We are going to copy the command line into the * device tree, so this can be allocated anywhere. */ - cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size); + cmdline_ptr = efi_convert_cmdline(image, &cmdline_size); if (!cmdline_ptr) { - pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n"); + pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n"); goto fail; } @@ -176,25 +174,25 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) efi_parse_options(cmdline_ptr); - pr_efi(sys_table, "Booting Linux Kernel...\n"); + pr_efi("Booting Linux Kernel...\n"); - si = setup_graphics(sys_table); + si = setup_graphics(); - status = handle_kernel_image(sys_table, image_addr, &image_size, + status = handle_kernel_image(image_addr, &image_size, &reserve_addr, &reserve_size, dram_base, image); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to relocate kernel\n"); + pr_efi_err("Failed to relocate kernel\n"); goto fail_free_cmdline; } - efi_retrieve_tpm2_eventlog(sys_table); + efi_retrieve_tpm2_eventlog(); /* Ask the firmware to clear memory on unclean shutdown */ - efi_enable_reset_attack_mitigation(sys_table); + efi_enable_reset_attack_mitigation(); - secure_boot = efi_get_secureboot(sys_table); + secure_boot = efi_get_secureboot(); /* * Unauthenticated device tree data is a security hazard, so ignore @@ -204,39 +202,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) || secure_boot != efi_secureboot_mode_disabled) { if (strstr(cmdline_ptr, "dtb=")) - pr_efi(sys_table, "Ignoring DTB from command line.\n"); + pr_efi("Ignoring DTB from command line.\n"); } else { - status = handle_cmdline_files(sys_table, image, cmdline_ptr, - "dtb=", + status = handle_cmdline_files(image, cmdline_ptr, "dtb=", ~0UL, &fdt_addr, &fdt_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to load device tree!\n"); + pr_efi_err("Failed to load device tree!\n"); goto fail_free_image; } } if (fdt_addr) { - pr_efi(sys_table, "Using DTB from command line\n"); + pr_efi("Using DTB from command line\n"); } else { /* Look for a device tree configuration table entry. */ - fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size); + fdt_addr = (uintptr_t)get_fdt(&fdt_size); if (fdt_addr) - pr_efi(sys_table, "Using DTB from configuration table\n"); + pr_efi("Using DTB from configuration table\n"); } if (!fdt_addr) - pr_efi(sys_table, "Generating empty DTB\n"); + pr_efi("Generating empty DTB\n"); - status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=", + status = handle_cmdline_files(image, cmdline_ptr, "initrd=", efi_get_max_initrd_addr(dram_base, *image_addr), (unsigned long *)&initrd_addr, (unsigned long *)&initrd_size); if (status != EFI_SUCCESS) - pr_efi_err(sys_table, "Failed initrd from command line!\n"); + pr_efi_err("Failed initrd from command line!\n"); - efi_random_get_seed(sys_table); + efi_random_get_seed(); /* hibernation expects the runtime regions to stay in the same place */ if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) { @@ -251,18 +248,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, EFI_RT_VIRTUAL_SIZE; u32 rnd; - status = efi_get_random_bytes(sys_table, sizeof(rnd), - (u8 *)&rnd); + status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd); if (status == EFI_SUCCESS) { virtmap_base = EFI_RT_VIRTUAL_BASE + (((headroom >> 21) * rnd) >> (32 - 21)); } } - install_memreserve_table(sys_table); + install_memreserve_table(); new_fdt_addr = fdt_addr; - status = allocate_new_fdt_and_exit_boot(sys_table, handle, + status = allocate_new_fdt_and_exit_boot(handle, &new_fdt_addr, efi_get_max_fdt_addr(dram_base), initrd_addr, initrd_size, cmdline_ptr, fdt_addr, fdt_size); @@ -275,17 +271,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (status == EFI_SUCCESS) return new_fdt_addr; - pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n"); + pr_efi_err("Failed to update FDT and exit boot services\n"); - efi_free(sys_table, initrd_size, initrd_addr); - efi_free(sys_table, fdt_size, fdt_addr); + efi_free(initrd_size, initrd_addr); + efi_free(fdt_size, fdt_addr); fail_free_image: - efi_free(sys_table, image_size, *image_addr); - efi_free(sys_table, reserve_size, reserve_addr); + efi_free(image_size, *image_addr); + efi_free(reserve_size, reserve_addr); fail_free_cmdline: - free_screen_info(sys_table, si); - efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr); + free_screen_info(si); + efi_free(cmdline_size, (unsigned long)cmdline_ptr); fail: return EFI_ERROR; } diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index 4566640de650..7b2a6382b647 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -7,7 +7,7 @@ #include "efistub.h" -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) +efi_status_t check_platform_features(void) { int block; @@ -18,7 +18,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) /* LPAE kernels need compatible hardware */ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); if (block < 5) { - pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n"); + pr_efi_err("This LPAE kernel is not supported by your CPU\n"); return EFI_UNSUPPORTED; } return EFI_SUCCESS; @@ -26,7 +26,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID; -struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg) +struct screen_info *alloc_screen_info(void) { struct screen_info *si; efi_status_t status; @@ -37,32 +37,31 @@ struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg) * its contents while we hand over to the kernel proper from the * decompressor. */ - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*si), (void **)&si); + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + sizeof(*si), (void **)&si); if (status != EFI_SUCCESS) return NULL; - status = efi_call_early(install_configuration_table, - &screen_info_guid, si); + status = efi_bs_call(install_configuration_table, + &screen_info_guid, si); if (status == EFI_SUCCESS) return si; - efi_call_early(free_pool, si); + efi_bs_call(free_pool, si); return NULL; } -void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si) +void free_screen_info(struct screen_info *si) { if (!si) return; - efi_call_early(install_configuration_table, &screen_info_guid, NULL); - efi_call_early(free_pool, si); + efi_bs_call(install_configuration_table, &screen_info_guid, NULL); + efi_bs_call(free_pool, si); } -static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, - unsigned long dram_base, +static efi_status_t reserve_kernel_base(unsigned long dram_base, unsigned long *reserve_addr, unsigned long *reserve_size) { @@ -92,8 +91,8 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, */ alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE; nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, - EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, + EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); if (status == EFI_SUCCESS) { if (alloc_addr == dram_base) { *reserve_addr = alloc_addr; @@ -119,10 +118,9 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, * released to the OS after ExitBootServices(), the decompressor can * safely overwrite them. */ - status = efi_get_memory_map(sys_table_arg, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, - "reserve_kernel_base(): Unable to retrieve memory map.\n"); + pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n"); return status; } @@ -158,14 +156,13 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, start = max(start, (u64)dram_base); end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE); - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, - (end - start) / EFI_PAGE_SIZE, - &start); + status = efi_bs_call(allocate_pages, + EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, + (end - start) / EFI_PAGE_SIZE, + &start); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, - "reserve_kernel_base(): alloc failed.\n"); + pr_efi_err("reserve_kernel_base(): alloc failed.\n"); goto out; } break; @@ -188,12 +185,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, status = EFI_SUCCESS; out: - efi_call_early(free_pool, memory_map); + efi_bs_call(free_pool, memory_map); return status; } -efi_status_t handle_kernel_image(efi_system_table_t *sys_table, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -221,10 +217,9 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, */ kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE; - status = reserve_kernel_base(sys_table, kernel_base, reserve_addr, - reserve_size); + status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n"); + pr_efi_err("Unable to allocate memory for uncompressed kernel.\n"); return status; } @@ -233,12 +228,11 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * memory window. */ *image_size = image->image_size; - status = efi_relocate_kernel(sys_table, image_addr, *image_size, - *image_size, + status = efi_relocate_kernel(image_addr, *image_size, *image_size, kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to relocate kernel.\n"); - efi_free(sys_table, *reserve_size, *reserve_addr); + pr_efi_err("Failed to relocate kernel.\n"); + efi_free(*reserve_size, *reserve_addr); *reserve_size = 0; return status; } @@ -249,10 +243,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * address at which the zImage is loaded. */ if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) { - pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n"); - efi_free(sys_table, *reserve_size, *reserve_addr); + pr_efi_err("Failed to relocate kernel, no low memory available.\n"); + efi_free(*reserve_size, *reserve_addr); *reserve_size = 0; - efi_free(sys_table, *image_size, *image_addr); + efi_free(*image_size, *image_addr); *image_size = 0; return EFI_LOAD_ERROR; } diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 1550d244e996..2915b44132e6 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -21,7 +21,7 @@ #include "efistub.h" -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) +efi_status_t check_platform_features(void) { u64 tg; @@ -32,16 +32,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) - pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n"); + pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else - pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n"); + pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n"); return EFI_UNSUPPORTED; } return EFI_SUCCESS; } -efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -56,17 +55,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (!nokaslr()) { - status = efi_get_random_bytes(sys_table_arg, - sizeof(phys_seed), + status = efi_get_random_bytes(sizeof(phys_seed), (u8 *)&phys_seed); if (status == EFI_NOT_FOUND) { - pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); + pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); } else if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n"); + pr_efi_err("efi_get_random_bytes() failed\n"); return status; } } else { - pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n"); + pr_efi("KASLR disabled on kernel command line\n"); } } @@ -108,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, * locate the kernel at a randomized offset in physical memory. */ *reserve_size = kernel_memsize + offset; - status = efi_random_alloc(sys_table_arg, *reserve_size, + status = efi_random_alloc(*reserve_size, MIN_KIMG_ALIGN, reserve_addr, (u32)phys_seed); @@ -131,19 +129,19 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, *image_addr = *reserve_addr = preferred_offset; *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN); - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, - *reserve_size / EFI_PAGE_SIZE, - (efi_physical_addr_t *)reserve_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, + *reserve_size / EFI_PAGE_SIZE, + (efi_physical_addr_t *)reserve_addr); } if (status != EFI_SUCCESS) { *reserve_size = kernel_memsize + TEXT_OFFSET; - status = efi_low_alloc(sys_table_arg, *reserve_size, + status = efi_low_alloc(*reserve_size, MIN_KIMG_ALIGN, reserve_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to relocate kernel\n"); + pr_efi_err("Failed to relocate kernel\n"); *reserve_size = 0; return status; } diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index e02579907f2e..74ddfb496140 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -27,24 +27,26 @@ */ #define EFI_READ_CHUNK_SIZE (1024 * 1024) -static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; +static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE; -static int __section(.data) __nokaslr; -static int __section(.data) __quiet; -static int __section(.data) __novamap; -static bool __section(.data) efi_nosoftreserve; +static bool __efistub_global efi_nokaslr; +static bool __efistub_global efi_quiet; +static bool __efistub_global efi_novamap; +static bool __efistub_global efi_nosoftreserve; +static bool __efistub_global efi_disable_pci_dma = + IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); -int __pure nokaslr(void) +bool __pure nokaslr(void) { - return __nokaslr; + return efi_nokaslr; } -int __pure is_quiet(void) +bool __pure is_quiet(void) { - return __quiet; + return efi_quiet; } -int __pure novamap(void) +bool __pure novamap(void) { - return __novamap; + return efi_novamap; } bool __pure __efi_soft_reserve_enabled(void) { @@ -58,7 +60,7 @@ struct file_info { u64 size; }; -void efi_printk(efi_system_table_t *sys_table_arg, char *str) +void efi_printk(char *str) { char *s8; @@ -68,10 +70,10 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str) ch[0] = *s8; if (*s8 == '\n') { efi_char16_t nl[2] = { '\r', 0 }; - efi_char16_printk(sys_table_arg, nl); + efi_char16_printk(nl); } - efi_char16_printk(sys_table_arg, ch); + efi_char16_printk(ch); } } @@ -84,8 +86,7 @@ static inline bool mmap_has_headroom(unsigned long buff_size, return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; } -efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, - struct efi_boot_memmap *map) +efi_status_t efi_get_memory_map(struct efi_boot_memmap *map) { efi_memory_desc_t *m = NULL; efi_status_t status; @@ -96,19 +97,19 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, *map->map_size = *map->desc_size * 32; *map->buff_size = *map->map_size; again: - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - *map->map_size, (void **)&m); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + *map->map_size, (void **)&m); if (status != EFI_SUCCESS) goto fail; *map->desc_size = 0; key = 0; - status = efi_call_early(get_memory_map, map->map_size, m, - &key, map->desc_size, &desc_version); + status = efi_bs_call(get_memory_map, map->map_size, m, + &key, map->desc_size, &desc_version); if (status == EFI_BUFFER_TOO_SMALL || !mmap_has_headroom(*map->buff_size, *map->map_size, *map->desc_size)) { - efi_call_early(free_pool, m); + efi_bs_call(free_pool, m); /* * Make sure there is some entries of headroom so that the * buffer can be reused for a new map after allocations are @@ -122,7 +123,7 @@ again: } if (status != EFI_SUCCESS) - efi_call_early(free_pool, m); + efi_bs_call(free_pool, m); if (map->key_ptr && status == EFI_SUCCESS) *map->key_ptr = key; @@ -135,7 +136,7 @@ fail: } -unsigned long get_dram_base(efi_system_table_t *sys_table_arg) +unsigned long get_dram_base(void) { efi_status_t status; unsigned long map_size, buff_size; @@ -151,7 +152,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) return membase; @@ -164,7 +165,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) } } - efi_call_early(free_pool, map.map); + efi_bs_call(free_pool, map.map); return membase; } @@ -172,8 +173,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) /* * Allocate at the highest possible address that is not above 'max'. */ -efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_high_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long max) { unsigned long map_size, desc_size, buff_size; @@ -191,7 +191,7 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) goto fail; @@ -251,9 +251,8 @@ again: if (!max_addr) status = EFI_NOT_FOUND; else { - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &max_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &max_addr); if (status != EFI_SUCCESS) { max = max_addr; max_addr = 0; @@ -263,7 +262,7 @@ again: *addr = max_addr; } - efi_call_early(free_pool, map); + efi_bs_call(free_pool, map); fail: return status; } @@ -271,8 +270,7 @@ fail: /* * Allocate at the lowest possible address that is not below 'min'. */ -efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, unsigned long *addr, unsigned long min) { unsigned long map_size, desc_size, buff_size; @@ -289,7 +287,7 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) goto fail; @@ -331,9 +329,8 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, if ((start + size) > end) continue; - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &start); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &start); if (status == EFI_SUCCESS) { *addr = start; break; @@ -343,13 +340,12 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, if (i == map_size / desc_size) status = EFI_NOT_FOUND; - efi_call_early(free_pool, map); + efi_bs_call(free_pool, map); fail: return status; } -void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, - unsigned long addr) +void efi_free(unsigned long size, unsigned long addr) { unsigned long nr_pages; @@ -357,12 +353,11 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, return; nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - efi_call_early(free_pages, addr, nr_pages); + efi_bs_call(free_pages, addr, nr_pages); } -static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, - efi_char16_t *filename_16, void **handle, - u64 *file_sz) +static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16, + void **handle, u64 *file_sz) { efi_file_handle_t *h, *fh = __fh; efi_file_info_t *info; @@ -370,81 +365,75 @@ static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, efi_guid_t info_guid = EFI_FILE_INFO_ID; unsigned long info_sz; - status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16, - EFI_FILE_MODE_READ, (u64)0); + status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to open file: "); - efi_char16_printk(sys_table_arg, filename_16); - efi_printk(sys_table_arg, "\n"); + efi_printk("Failed to open file: "); + efi_char16_printk(filename_16); + efi_printk("\n"); return status; } *handle = h; info_sz = 0; - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, - &info_sz, NULL); + status = h->get_info(h, &info_guid, &info_sz, NULL); if (status != EFI_BUFFER_TOO_SMALL) { - efi_printk(sys_table_arg, "Failed to get file info size\n"); + efi_printk("Failed to get file info size\n"); return status; } grow: - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - info_sz, (void **)&info); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz, + (void **)&info); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to alloc mem for file info\n"); + efi_printk("Failed to alloc mem for file info\n"); return status; } - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, - &info_sz, info); + status = h->get_info(h, &info_guid, &info_sz, info); if (status == EFI_BUFFER_TOO_SMALL) { - efi_call_early(free_pool, info); + efi_bs_call(free_pool, info); goto grow; } *file_sz = info->file_size; - efi_call_early(free_pool, info); + efi_bs_call(free_pool, info); if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to get initrd info\n"); + efi_printk("Failed to get initrd info\n"); return status; } -static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr) +static efi_status_t efi_file_read(efi_file_handle_t *handle, + unsigned long *size, void *addr) { - return efi_call_proto(efi_file_handle, read, handle, size, addr); + return handle->read(handle, size, addr); } -static efi_status_t efi_file_close(void *handle) +static efi_status_t efi_file_close(efi_file_handle_t *handle) { - return efi_call_proto(efi_file_handle, close, handle); + return handle->close(handle); } -static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +static efi_status_t efi_open_volume(efi_loaded_image_t *image, efi_file_handle_t **__fh) { efi_file_io_interface_t *io; efi_file_handle_t *fh; efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; efi_status_t status; - void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image, - device_handle, - image); + efi_handle_t handle = image->device_handle; - status = efi_call_early(handle_protocol, handle, - &fs_proto, (void **)&io); + status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to handle fs_proto\n"); + efi_printk("Failed to handle fs_proto\n"); return status; } - status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh); + status = io->open_volume(io, &fh); if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to open volume\n"); + efi_printk("Failed to open volume\n"); else *__fh = fh; @@ -465,11 +454,11 @@ efi_status_t efi_parse_options(char const *cmdline) str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - __nokaslr = 1; + efi_nokaslr = true; str = strstr(cmdline, "quiet"); if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - __quiet = 1; + efi_quiet = true; /* * If no EFI parameters were specified on the cmdline we've got @@ -489,18 +478,28 @@ efi_status_t efi_parse_options(char const *cmdline) while (*str && *str != ' ') { if (!strncmp(str, "nochunk", 7)) { str += strlen("nochunk"); - __chunk_size = -1UL; + efi_chunk_size = -1UL; } if (!strncmp(str, "novamap", 7)) { str += strlen("novamap"); - __novamap = 1; + efi_novamap = true; } if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && !strncmp(str, "nosoftreserve", 7)) { str += strlen("nosoftreserve"); - efi_nosoftreserve = 1; + efi_nosoftreserve = true; + } + + if (!strncmp(str, "disable_early_pci_dma", 21)) { + str += strlen("disable_early_pci_dma"); + efi_disable_pci_dma = true; + } + + if (!strncmp(str, "no_disable_early_pci_dma", 24)) { + str += strlen("no_disable_early_pci_dma"); + efi_disable_pci_dma = false; } /* Group words together, delimited by "," */ @@ -520,8 +519,7 @@ efi_status_t efi_parse_options(char const *cmdline) * We only support loading a file from the same filesystem as * the kernel image. */ -efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +efi_status_t handle_cmdline_files(efi_loaded_image_t *image, char *cmd_line, char *option_string, unsigned long max_addr, unsigned long *load_addr, @@ -570,10 +568,10 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, if (!nr_files) return EFI_SUCCESS; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - nr_files * sizeof(*files), (void **)&files); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + nr_files * sizeof(*files), (void **)&files); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n"); + pr_efi_err("Failed to alloc mem for file handle list\n"); goto fail; } @@ -612,13 +610,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, /* Only open the volume once. */ if (!i) { - status = efi_open_volume(sys_table_arg, image, &fh); + status = efi_open_volume(image, &fh); if (status != EFI_SUCCESS) goto free_files; } - status = efi_file_size(sys_table_arg, fh, filename_16, - (void **)&file->handle, &file->size); + status = efi_file_size(fh, filename_16, (void **)&file->handle, + &file->size); if (status != EFI_SUCCESS) goto close_handles; @@ -633,16 +631,16 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, * so allocate enough memory for all the files. This is used * for loading multiple files. */ - status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000, - &file_addr, max_addr); + status = efi_high_alloc(file_size_total, 0x1000, &file_addr, + max_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n"); + pr_efi_err("Failed to alloc highmem for files\n"); goto close_handles; } /* We've run out of free low memory. */ if (file_addr > max_addr) { - pr_efi_err(sys_table_arg, "We've run out of free low memory\n"); + pr_efi_err("We've run out of free low memory\n"); status = EFI_INVALID_PARAMETER; goto free_file_total; } @@ -655,8 +653,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, while (size) { unsigned long chunksize; - if (IS_ENABLED(CONFIG_X86) && size > __chunk_size) - chunksize = __chunk_size; + if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size) + chunksize = efi_chunk_size; else chunksize = size; @@ -664,7 +662,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, &chunksize, (void *)addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to read file\n"); + pr_efi_err("Failed to read file\n"); goto free_file_total; } addr += chunksize; @@ -676,7 +674,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, } - efi_call_early(free_pool, files); + efi_bs_call(free_pool, files); *load_addr = file_addr; *load_size = file_size_total; @@ -684,13 +682,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, return status; free_file_total: - efi_free(sys_table_arg, file_size_total, file_addr); + efi_free(file_size_total, file_addr); close_handles: for (k = j; k < i; k++) efi_file_close(files[k].handle); free_files: - efi_call_early(free_pool, files); + efi_bs_call(free_pool, files); fail: *load_addr = 0; *load_size = 0; @@ -707,8 +705,7 @@ fail: * address is not available the lowest available address will * be used. */ -efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, - unsigned long *image_addr, +efi_status_t efi_relocate_kernel(unsigned long *image_addr, unsigned long image_size, unsigned long alloc_size, unsigned long preferred_addr, @@ -737,20 +734,19 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, * as possible while respecting the required alignment. */ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &efi_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &efi_addr); new_addr = efi_addr; /* * If preferred address allocation failed allocate as low as * possible. */ if (status != EFI_SUCCESS) { - status = efi_low_alloc_above(sys_table_arg, alloc_size, - alignment, &new_addr, min_addr); + status = efi_low_alloc_above(alloc_size, alignment, &new_addr, + min_addr); } if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n"); + pr_efi_err("Failed to allocate usable memory for kernel.\n"); return status; } @@ -824,8 +820,7 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n) * Size of memory allocated return in *cmd_line_len. * Returns NULL on error. */ -char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) { const u16 *s2; @@ -854,8 +849,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, options_bytes++; /* NUL termination */ - status = efi_high_alloc(sys_table_arg, options_bytes, 0, - &cmdline_addr, MAX_CMDLINE_ADDRESS); + status = efi_high_alloc(options_bytes, 0, &cmdline_addr, + MAX_CMDLINE_ADDRESS); if (status != EFI_SUCCESS) return NULL; @@ -877,24 +872,26 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, * specific structure may be passed to the function via priv. The client * function may be called multiple times. */ -efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, - void *handle, +efi_status_t efi_exit_boot_services(void *handle, struct efi_boot_memmap *map, void *priv, efi_exit_boot_map_processing priv_func) { efi_status_t status; - status = efi_get_memory_map(sys_table_arg, map); + status = efi_get_memory_map(map); if (status != EFI_SUCCESS) goto fail; - status = priv_func(sys_table_arg, map, priv); + status = priv_func(map, priv); if (status != EFI_SUCCESS) goto free_map; - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); + if (efi_disable_pci_dma) + efi_pci_disable_bridge_busmaster(); + + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); if (status == EFI_INVALID_PARAMETER) { /* @@ -911,23 +908,23 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, * to get_memory_map() is expected to succeed here. */ *map->map_size = *map->buff_size; - status = efi_call_early(get_memory_map, - map->map_size, - *map->map, - map->key_ptr, - map->desc_size, - map->desc_ver); + status = efi_bs_call(get_memory_map, + map->map_size, + *map->map, + map->key_ptr, + map->desc_size, + map->desc_ver); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) goto fail; - status = priv_func(sys_table_arg, map, priv); + status = priv_func(map, priv); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) goto fail; - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); } /* exit_boot_services() was called, thus cannot free */ @@ -937,38 +934,31 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, return EFI_SUCCESS; free_map: - efi_call_early(free_pool, *map->map); + efi_bs_call(free_pool, *map->map); fail: return status; } -#define GET_EFI_CONFIG_TABLE(bits) \ -static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \ - efi_guid_t guid) \ -{ \ - efi_system_table_##bits##_t *sys_table; \ - efi_config_table_##bits##_t *tables; \ - int i; \ - \ - sys_table = (typeof(sys_table))_sys_table; \ - tables = (typeof(tables))(unsigned long)sys_table->tables; \ - \ - for (i = 0; i < sys_table->nr_tables; i++) { \ - if (efi_guidcmp(tables[i].guid, guid) != 0) \ - continue; \ - \ - return (void *)(unsigned long)tables[i].table; \ - } \ - \ - return NULL; \ +void *get_efi_config_table(efi_guid_t guid) +{ + unsigned long tables = efi_table_attr(efi_system_table(), tables); + int nr_tables = efi_table_attr(efi_system_table(), nr_tables); + int i; + + for (i = 0; i < nr_tables; i++) { + efi_config_table_t *t = (void *)tables; + + if (efi_guidcmp(t->guid, guid) == 0) + return efi_table_attr(t, table); + + tables += efi_is_native() ? sizeof(efi_config_table_t) + : sizeof(efi_config_table_32_t); + } + return NULL; } -GET_EFI_CONFIG_TABLE(32) -GET_EFI_CONFIG_TABLE(64) -void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) +void efi_char16_printk(efi_char16_t *str) { - if (efi_is_64bit()) - return get_efi_config_table64(sys_table, guid); - else - return get_efi_config_table32(sys_table, guid); + efi_call_proto(efi_table_attr(efi_system_table(), con_out), + output_string, str); } diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 05739ae013c8..c244b165005e 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -25,22 +25,30 @@ #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE #endif -extern int __pure nokaslr(void); -extern int __pure is_quiet(void); -extern int __pure novamap(void); +#ifdef CONFIG_ARM +#define __efistub_global __section(.data) +#else +#define __efistub_global +#endif + +extern bool __pure nokaslr(void); +extern bool __pure is_quiet(void); +extern bool __pure novamap(void); + +extern __pure efi_system_table_t *efi_system_table(void); -#define pr_efi(sys_table, msg) do { \ - if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \ +#define pr_efi(msg) do { \ + if (!is_quiet()) efi_printk("EFI stub: "msg); \ } while (0) -#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) +#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg) -void efi_char16_printk(efi_system_table_t *, efi_char16_t *); +void efi_char16_printk(efi_char16_t *); +void efi_char16_printk(efi_char16_t *); -unsigned long get_dram_base(efi_system_table_t *sys_table_arg); +unsigned long get_dram_base(void); -efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, - void *handle, +efi_status_t allocate_new_fdt_and_exit_boot(void *handle, unsigned long *new_fdt_addr, unsigned long max_addr, u64 initrd_addr, u64 initrd_size, @@ -48,22 +56,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, unsigned long fdt_addr, unsigned long fdt_size); -void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size); +void *get_fdt(unsigned long *fdt_size); void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, unsigned long desc_size, efi_memory_desc_t *runtime_map, int *count); -efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table, - unsigned long size, u8 *out); +efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); -efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed); -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg); +efi_status_t check_platform_features(void); -void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid); +void *get_efi_config_table(efi_guid_t guid); /* Helper macros for the usual case of using simple C variables: */ #ifndef fdt_setprop_inplace_var @@ -76,4 +82,12 @@ void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid); fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var)) #endif +#define get_efi_var(name, vendor, ...) \ + efi_rt_call(get_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + +#define set_efi_var(name, vendor, ...) \ + efi_rt_call(set_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 0bf0190917e0..0a91e5232127 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -16,7 +16,7 @@ #define EFI_DT_ADDR_CELLS_DEFAULT 2 #define EFI_DT_SIZE_CELLS_DEFAULT 2 -static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) +static void fdt_update_cell_size(void *fdt) { int offset; @@ -27,8 +27,7 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); } -static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, +static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, void *fdt, int new_fdt_size, char *cmdline_ptr, u64 initrd_addr, u64 initrd_size) { @@ -40,7 +39,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, /* Do some checks on provided FDT, if it exists: */ if (orig_fdt) { if (fdt_check_header(orig_fdt)) { - pr_efi_err(sys_table, "Device Tree header not valid!\n"); + pr_efi_err("Device Tree header not valid!\n"); return EFI_LOAD_ERROR; } /* @@ -48,7 +47,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, * configuration table: */ if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) { - pr_efi_err(sys_table, "Truncated device tree! foo!\n"); + pr_efi_err("Truncated device tree! foo!\n"); return EFI_LOAD_ERROR; } } @@ -62,7 +61,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, * Any failure from the following function is * non-critical: */ - fdt_update_cell_size(sys_table, fdt); + fdt_update_cell_size(fdt); } } @@ -111,7 +110,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, /* Add FDT entries for EFI runtime services in chosen node. */ node = fdt_subnode_offset(fdt, 0, "chosen"); - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table); + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table()); status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64); if (status) @@ -140,7 +139,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { efi_status_t efi_status; - efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64), + efi_status = efi_get_random_bytes(sizeof(fdt_val64), (u8 *)&fdt_val64); if (efi_status == EFI_SUCCESS) { status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64); @@ -210,8 +209,7 @@ struct exit_boot_struct { void *new_fdt_addr; }; -static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, - struct efi_boot_memmap *map, +static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) { struct exit_boot_struct *p = priv; @@ -244,8 +242,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, * with the final memory map in it. */ -efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, - void *handle, +efi_status_t allocate_new_fdt_and_exit_boot(void *handle, unsigned long *new_fdt_addr, unsigned long max_addr, u64 initrd_addr, u64 initrd_size, @@ -275,19 +272,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, * subsequent allocations adding entries, since they could not affect * the number of EFI_MEMORY_RUNTIME regions. */ - status = efi_get_memory_map(sys_table, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); + pr_efi_err("Unable to retrieve UEFI memory map.\n"); return status; } - pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n"); + pr_efi("Exiting boot services and installing virtual address map...\n"); map.map = &memory_map; - status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN, + status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN, new_fdt_addr, max_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n"); + pr_efi_err("Unable to allocate memory for new device tree.\n"); goto fail; } @@ -295,16 +292,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, * Now that we have done our final memory allocation (and free) * we can get the memory map key needed for exit_boot_services(). */ - status = efi_get_memory_map(sys_table, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) goto fail_free_new_fdt; - status = update_fdt(sys_table, (void *)fdt_addr, fdt_size, + status = update_fdt((void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, initrd_addr, initrd_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to construct new device tree.\n"); + pr_efi_err("Unable to construct new device tree.\n"); goto fail_free_new_fdt; } @@ -313,7 +310,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, priv.runtime_entry_count = &runtime_entry_count; priv.new_fdt_addr = (void *)*new_fdt_addr; - status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); + status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; @@ -322,7 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, return EFI_SUCCESS; /* Install the new virtual address map */ - svam = sys_table->runtime->set_virtual_address_map; + svam = efi_system_table()->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, desc_ver, runtime_map); @@ -350,28 +347,28 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, return EFI_SUCCESS; } - pr_efi_err(sys_table, "Exit boot services failed.\n"); + pr_efi_err("Exit boot services failed.\n"); fail_free_new_fdt: - efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr); + efi_free(MAX_FDT_SIZE, *new_fdt_addr); fail: - sys_table->boottime->free_pool(runtime_map); + efi_system_table()->boottime->free_pool(runtime_map); return EFI_LOAD_ERROR; } -void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size) +void *get_fdt(unsigned long *fdt_size) { void *fdt; - fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID); + fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) return NULL; if (fdt_check_header(fdt) != 0) { - pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n"); + pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n"); return NULL; } *fdt_size = fdt_totalsize(fdt); diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index b7bf1e993b8b..55e6b3f286fe 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -10,6 +10,8 @@ #include <asm/efi.h> #include <asm/setup.h> +#include "efistub.h" + static void find_bits(unsigned long mask, u8 *pos, u8 *size) { u8 first, len; @@ -35,7 +37,7 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) static void setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, - struct efi_pixel_bitmask pixel_info, int pixel_format) + efi_pixel_bitmask_t pixel_info, int pixel_format) { if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { si->lfb_depth = 32; @@ -83,48 +85,42 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } } -static efi_status_t -setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, - efi_guid_t *proto, unsigned long size, void **gop_handle) +static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, + unsigned long size, void **handles) { - struct efi_graphics_output_protocol_32 *gop32, *first_gop; - unsigned long nr_gops; + efi_graphics_output_protocol_t *gop, *first_gop; u16 width, height; u32 pixels_per_scan_line; u32 ext_lfb_base; - u64 fb_base; - struct efi_pixel_bitmask pixel_info; + efi_physical_addr_t fb_base; + efi_pixel_bitmask_t pixel_info; int pixel_format; efi_status_t status; - u32 *handles = (u32 *)(unsigned long)gop_handle; + efi_handle_t h; int i; first_gop = NULL; - gop32 = NULL; + gop = NULL; - nr_gops = size / sizeof(u32); - for (i = 0; i < nr_gops; i++) { - struct efi_graphics_output_protocol_mode_32 *mode; - struct efi_graphics_output_mode_info *info = NULL; + for_each_efi_handle(h, handles, size, i) { + efi_graphics_output_protocol_mode_t *mode; + efi_graphics_output_mode_info_t *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; void *dummy = NULL; - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; - u64 current_fb_base; + efi_physical_addr_t current_fb_base; - status = efi_call_early(handle_protocol, h, - proto, (void **)&gop32); + status = efi_bs_call(handle_protocol, h, proto, (void **)&gop); if (status != EFI_SUCCESS) continue; - status = efi_call_early(handle_protocol, h, - &conout_proto, &dummy); + status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy); if (status == EFI_SUCCESS) conout_found = true; - mode = (void *)(unsigned long)gop32->mode; - info = (void *)(unsigned long)mode->info; - current_fb_base = mode->frame_buffer_base; + mode = efi_table_attr(gop, mode); + info = efi_table_attr(mode, info); + current_fb_base = efi_table_attr(mode, frame_buffer_base); if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { @@ -146,104 +142,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, * Once we've found a GOP supporting ConOut, * don't bother looking any further. */ - first_gop = gop32; - if (conout_found) - break; - } - } - - /* Did we find any GOPs? */ - if (!first_gop) - return EFI_NOT_FOUND; - - /* EFI framebuffer */ - si->orig_video_isVGA = VIDEO_TYPE_EFI; - - si->lfb_width = width; - si->lfb_height = height; - si->lfb_base = fb_base; - - ext_lfb_base = (u64)(unsigned long)fb_base >> 32; - if (ext_lfb_base) { - si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; - si->ext_lfb_base = ext_lfb_base; - } - - si->pages = 1; - - setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format); - - si->lfb_size = si->lfb_linelength * si->lfb_height; - - si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; - - return EFI_SUCCESS; -} - -static efi_status_t -setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, - efi_guid_t *proto, unsigned long size, void **gop_handle) -{ - struct efi_graphics_output_protocol_64 *gop64, *first_gop; - unsigned long nr_gops; - u16 width, height; - u32 pixels_per_scan_line; - u32 ext_lfb_base; - u64 fb_base; - struct efi_pixel_bitmask pixel_info; - int pixel_format; - efi_status_t status; - u64 *handles = (u64 *)(unsigned long)gop_handle; - int i; - - first_gop = NULL; - gop64 = NULL; - - nr_gops = size / sizeof(u64); - for (i = 0; i < nr_gops; i++) { - struct efi_graphics_output_protocol_mode_64 *mode; - struct efi_graphics_output_mode_info *info = NULL; - efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; - bool conout_found = false; - void *dummy = NULL; - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; - u64 current_fb_base; - - status = efi_call_early(handle_protocol, h, - proto, (void **)&gop64); - if (status != EFI_SUCCESS) - continue; - - status = efi_call_early(handle_protocol, h, - &conout_proto, &dummy); - if (status == EFI_SUCCESS) - conout_found = true; - - mode = (void *)(unsigned long)gop64->mode; - info = (void *)(unsigned long)mode->info; - current_fb_base = mode->frame_buffer_base; - - if ((!first_gop || conout_found) && - info->pixel_format != PIXEL_BLT_ONLY) { - /* - * Systems that use the UEFI Console Splitter may - * provide multiple GOP devices, not all of which are - * backed by real hardware. The workaround is to search - * for a GOP implementing the ConOut protocol, and if - * one isn't found, to just fall back to the first GOP. - */ - width = info->horizontal_resolution; - height = info->vertical_resolution; - pixel_format = info->pixel_format; - pixel_info = info->pixel_information; - pixels_per_scan_line = info->pixels_per_scan_line; - fb_base = current_fb_base; - - /* - * Once we've found a GOP supporting ConOut, - * don't bother looking any further. - */ - first_gop = gop64; + first_gop = gop; if (conout_found) break; } @@ -280,33 +179,25 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, /* * See if we have Graphics Output Protocol */ -efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, - struct screen_info *si, efi_guid_t *proto, +efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, unsigned long size) { efi_status_t status; void **gop_handle = NULL; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - size, (void **)&gop_handle); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)&gop_handle); if (status != EFI_SUCCESS) return status; - status = efi_call_early(locate_handle, - EFI_LOCATE_BY_PROTOCOL, - proto, NULL, &size, gop_handle); + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL, + &size, gop_handle); if (status != EFI_SUCCESS) goto free_handle; - if (efi_is_64bit()) { - status = setup_gop64(sys_table_arg, si, proto, size, - gop_handle); - } else { - status = setup_gop32(sys_table_arg, si, proto, size, - gop_handle); - } + status = setup_gop(si, proto, size, gop_handle); free_handle: - efi_call_early(free_pool, gop_handle); + efi_bs_call(free_pool, gop_handle); return status; } diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c new file mode 100644 index 000000000000..b025e59b94df --- /dev/null +++ b/drivers/firmware/efi/libstub/pci.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI-related functions used by the EFI stub on multiple + * architectures. + * + * Copyright 2019 Google, LLC + */ + +#include <linux/efi.h> +#include <linux/pci.h> + +#include <asm/efi.h> + +#include "efistub.h" + +void efi_pci_disable_bridge_busmaster(void) +{ + efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; + unsigned long pci_handle_size = 0; + efi_handle_t *pci_handle = NULL; + efi_handle_t handle; + efi_status_t status; + u16 class, command; + int i; + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, + NULL, &pci_handle_size, NULL); + + if (status != EFI_BUFFER_TOO_SMALL) { + if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) + pr_efi_err("Failed to locate PCI I/O handles'\n"); + return; + } + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size, + (void **)&pci_handle); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to allocate memory for 'pci_handle'\n"); + return; + } + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, + NULL, &pci_handle_size, pci_handle); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to locate PCI I/O handles'\n"); + goto free_handle; + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + unsigned long segment_nr, bus_nr, device_nr, func_nr; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != EFI_SUCCESS) + continue; + + /* + * Disregard devices living on bus 0 - these are not behind a + * bridge so no point in disconnecting them from their drivers. + */ + status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr, + &device_nr, &func_nr); + if (status != EFI_SUCCESS || bus_nr == 0) + continue; + + /* + * Don't disconnect VGA controllers so we don't risk losing + * access to the framebuffer. Drivers for true PCIe graphics + * controllers that are behind a PCIe root port do not use + * DMA to implement the GOP framebuffer anyway [although they + * may use it in their implentation of Gop->Blt()], and so + * disabling DMA in the PCI bridge should not interfere with + * normal operation of the device. + */ + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_CLASS_DEVICE, 1, &class); + if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA) + continue; + + /* Disconnect this handle from all its drivers */ + efi_bs_call(disconnect_controller, handle, NULL, NULL); + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != EFI_SUCCESS || !pci) + continue; + + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_CLASS_DEVICE, 1, &class); + + if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI) + continue; + + /* Disable busmastering */ + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_COMMAND, 1, &command); + if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER)) + continue; + + command &= ~PCI_COMMAND_MASTER; + status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16, + PCI_COMMAND, 1, &command); + if (status != EFI_SUCCESS) + pr_efi_err("Failed to disable PCI busmastering\n"); + } + +free_handle: + efi_bs_call(free_pool, pci_handle); +} diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 97378cf96a2e..316ce9ff0193 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -9,38 +9,34 @@ #include "efistub.h" -typedef struct efi_rng_protocol efi_rng_protocol_t; - -typedef struct { - u32 get_info; - u32 get_rng; -} efi_rng_protocol_32_t; - -typedef struct { - u64 get_info; - u64 get_rng; -} efi_rng_protocol_64_t; - -struct efi_rng_protocol { - efi_status_t (*get_info)(struct efi_rng_protocol *, - unsigned long *, efi_guid_t *); - efi_status_t (*get_rng)(struct efi_rng_protocol *, - efi_guid_t *, unsigned long, u8 *out); +typedef union efi_rng_protocol efi_rng_protocol_t; + +union efi_rng_protocol { + struct { + efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *, + unsigned long *, + efi_guid_t *); + efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *, + efi_guid_t *, unsigned long, + u8 *out); + }; + struct { + u32 get_info; + u32 get_rng; + } mixed_mode; }; -efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, - unsigned long size, u8 *out) +efi_status_t efi_get_random_bytes(unsigned long size, u8 *out) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_status_t status; - struct efi_rng_protocol *rng = NULL; + efi_rng_protocol_t *rng = NULL; - status = efi_call_early(locate_protocol, &rng_proto, NULL, - (void **)&rng); + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; - return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out); + return efi_call_proto(rng, get_rng, NULL, size, out); } /* @@ -81,8 +77,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, */ #define MD_NUM_SLOTS(md) ((md)->virt_addr) -efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, +efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed) @@ -101,7 +96,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, map.key_ptr = NULL; map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) return status; @@ -145,39 +140,38 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, target = round_up(md->phys_addr, align) + target_slot * align; pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, pages, &target); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, pages, &target); if (status == EFI_SUCCESS) *addr = target; break; } - efi_call_early(free_pool, memory_map); + efi_bs_call(free_pool, memory_map); return status; } -efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) +efi_status_t efi_random_get_seed(void) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; - struct efi_rng_protocol *rng = NULL; + efi_rng_protocol_t *rng = NULL; struct linux_efi_random_seed *seed = NULL; efi_status_t status; - status = efi_call_early(locate_protocol, &rng_proto, NULL, - (void **)&rng); + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*seed) + EFI_RANDOM_SEED_SIZE, - (void **)&seed); + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + sizeof(*seed) + EFI_RANDOM_SEED_SIZE, + (void **)&seed); if (status != EFI_SUCCESS) return status; - status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw, + status = efi_call_proto(rng, get_rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE, seed->bits); if (status == EFI_UNSUPPORTED) @@ -185,21 +179,20 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) * Use whatever algorithm we have available if the raw algorithm * is not implemented. */ - status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, - EFI_RANDOM_SEED_SIZE, seed->bits); + status = efi_call_proto(rng, get_rng, NULL, + EFI_RANDOM_SEED_SIZE, seed->bits); if (status != EFI_SUCCESS) goto err_freepool; seed->size = EFI_RANDOM_SEED_SIZE; - status = efi_call_early(install_configuration_table, &rng_table_guid, - seed); + status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); if (status != EFI_SUCCESS) goto err_freepool; return EFI_SUCCESS; err_freepool: - efi_call_early(free_pool, seed); + efi_bs_call(free_pool, seed); return status; } diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index edba5e7a3743..a765378ad18c 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode"; static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; -#define get_efi_var(name, vendor, ...) \ - efi_call_runtime(get_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__); - /* * Determine whether we're in secure boot mode. * * Please keep the logic in sync with * arch/x86/xen/efi.c:xen_efi_get_secureboot(). */ -enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) +enum efi_secureboot_mode efi_get_secureboot(void) { u32 attr; u8 secboot, setupmode, moksbstate; @@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) return efi_secureboot_mode_disabled; secure_boot_enabled: - pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n"); + pr_efi("UEFI Secure Boot is enabled.\n"); return efi_secureboot_mode_enabled; out_efi_err: - pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); + pr_efi_err("Could not determine UEFI Secure Boot status.\n"); return efi_secureboot_mode_unknown; } diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index eb9af83e4d59..1d59e103a2e3 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -20,23 +20,13 @@ static const efi_char16_t efi_MemoryOverWriteRequest_name[] = #define MEMORY_ONLY_RESET_CONTROL_GUID \ EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29) -#define get_efi_var(name, vendor, ...) \ - efi_call_runtime(get_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__) - -#define set_efi_var(name, vendor, ...) \ - efi_call_runtime(set_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__) - /* * Enable reboot attack mitigation. This requests that the firmware clear the * RAM on next reboot before proceeding with boot, ensuring that any secrets * are cleared. If userland has ensured that all secrets have been removed * from RAM before reboot it can simply reset this variable. */ -void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) +void efi_enable_reset_attack_mitigation(void) { u8 val = 1; efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID; @@ -57,7 +47,7 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) #endif -void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) +void efi_retrieve_tpm2_eventlog(void) { efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; @@ -69,23 +59,22 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) size_t log_size, last_entry_size; efi_bool_t truncated; int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; - void *tcg2_protocol = NULL; + efi_tcg2_protocol_t *tcg2_protocol = NULL; int final_events_size = 0; - status = efi_call_early(locate_protocol, &tcg2_guid, NULL, - &tcg2_protocol); + status = efi_bs_call(locate_protocol, &tcg2_guid, NULL, + (void **)&tcg2_protocol); if (status != EFI_SUCCESS) return; - status = efi_call_proto(efi_tcg2_protocol, get_event_log, - tcg2_protocol, version, &log_location, - &log_last_entry, &truncated); + status = efi_call_proto(tcg2_protocol, get_event_log, version, + &log_location, &log_last_entry, &truncated); if (status != EFI_SUCCESS || !log_location) { version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; - status = efi_call_proto(efi_tcg2_protocol, get_event_log, - tcg2_protocol, version, &log_location, - &log_last_entry, &truncated); + status = efi_call_proto(tcg2_protocol, get_event_log, version, + &log_location, &log_last_entry, + &truncated); if (status != EFI_SUCCESS || !log_location) return; @@ -126,13 +115,11 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) } /* Allocate space for the logs and copy them. */ - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - sizeof(*log_tbl) + log_size, - (void **) &log_tbl); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + sizeof(*log_tbl) + log_size, (void **)&log_tbl); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, - "Unable to allocate memory for event log\n"); + efi_printk("Unable to allocate memory for event log\n"); return; } @@ -140,8 +127,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) * Figure out whether any events have already been logged to the * final events structure, and if so how much space they take up */ - final_events_table = get_efi_config_table(sys_table_arg, - LINUX_EFI_TPM_FINAL_LOG_GUID); + final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID); if (final_events_table && final_events_table->nr_events) { struct tcg_pcr_event2_head *header; int offset; @@ -169,12 +155,12 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) log_tbl->version = version; memcpy(log_tbl->log, (void *) first_entry_addr, log_size); - status = efi_call_early(install_configuration_table, - &linux_eventlog_guid, log_tbl); + status = efi_bs_call(install_configuration_table, + &linux_eventlog_guid, log_tbl); if (status != EFI_SUCCESS) goto err_free; return; err_free: - efi_call_early(free_pool, log_tbl); + efi_bs_call(free_pool, log_tbl); } diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 38b686c67b17..2ff1883dc788 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -29,9 +29,32 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) return PFN_PHYS(page_to_pfn(p)); } +void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags) +{ + if (flags & EFI_MEMMAP_MEMBLOCK) { + if (slab_is_available()) + memblock_free_late(phys, size); + else + memblock_free(phys, size); + } else if (flags & EFI_MEMMAP_SLAB) { + struct page *p = pfn_to_page(PHYS_PFN(phys)); + unsigned int order = get_order(size); + + free_pages((unsigned long) page_address(p), order); + } +} + +static void __init efi_memmap_free(void) +{ + __efi_memmap_free(efi.memmap.phys_map, + efi.memmap.desc_size * efi.memmap.nr_map, + efi.memmap.flags); +} + /** * efi_memmap_alloc - Allocate memory for the EFI memory map * @num_entries: Number of entries in the allocated map. + * @data: efi memmap installation parameters * * Depending on whether mm_init() has already been invoked or not, * either memblock or "normal" page allocation is used. @@ -39,34 +62,47 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) * Returns the physical address of the allocated memory map on * success, zero on failure. */ -phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) +int __init efi_memmap_alloc(unsigned int num_entries, + struct efi_memory_map_data *data) { - unsigned long size = num_entries * efi.memmap.desc_size; - - if (slab_is_available()) - return __efi_memmap_alloc_late(size); + /* Expect allocation parameters are zero initialized */ + WARN_ON(data->phys_map || data->size); + + data->size = num_entries * efi.memmap.desc_size; + data->desc_version = efi.memmap.desc_version; + data->desc_size = efi.memmap.desc_size; + data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK); + data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE; + + if (slab_is_available()) { + data->flags |= EFI_MEMMAP_SLAB; + data->phys_map = __efi_memmap_alloc_late(data->size); + } else { + data->flags |= EFI_MEMMAP_MEMBLOCK; + data->phys_map = __efi_memmap_alloc_early(data->size); + } - return __efi_memmap_alloc_early(size); + if (!data->phys_map) + return -ENOMEM; + return 0; } /** * __efi_memmap_init - Common code for mapping the EFI memory map * @data: EFI memory map data - * @late: Use early or late mapping function? * * This function takes care of figuring out which function to use to * map the EFI memory map in efi.memmap based on how far into the boot * we are. * - * During bootup @late should be %false since we only have access to - * the early_memremap*() functions as the vmalloc space isn't setup. - * Once the kernel is fully booted we can fallback to the more robust - * memremap*() API. + * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we + * only have access to the early_memremap*() functions as the vmalloc + * space isn't setup. Once the kernel is fully booted we can fallback + * to the more robust memremap*() API. * * Returns zero on success, a negative error code on failure. */ -static int __init -__efi_memmap_init(struct efi_memory_map_data *data, bool late) +static int __init __efi_memmap_init(struct efi_memory_map_data *data) { struct efi_memory_map map; phys_addr_t phys_map; @@ -76,7 +112,7 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) phys_map = data->phys_map; - if (late) + if (data->flags & EFI_MEMMAP_LATE) map.map = memremap(phys_map, data->size, MEMREMAP_WB); else map.map = early_memremap(phys_map, data->size); @@ -86,13 +122,16 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) return -ENOMEM; } + /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */ + efi_memmap_free(); + map.phys_map = data->phys_map; map.nr_map = data->size / data->desc_size; map.map_end = map.map + data->size; map.desc_version = data->desc_version; map.desc_size = data->desc_size; - map.late = late; + map.flags = data->flags; set_bit(EFI_MEMMAP, &efi.flags); @@ -111,9 +150,10 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) int __init efi_memmap_init_early(struct efi_memory_map_data *data) { /* Cannot go backwards */ - WARN_ON(efi.memmap.late); + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); - return __efi_memmap_init(data, false); + data->flags = 0; + return __efi_memmap_init(data); } void __init efi_memmap_unmap(void) @@ -121,7 +161,7 @@ void __init efi_memmap_unmap(void) if (!efi_enabled(EFI_MEMMAP)) return; - if (!efi.memmap.late) { + if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) { unsigned long size; size = efi.memmap.desc_size * efi.memmap.nr_map; @@ -162,13 +202,14 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) struct efi_memory_map_data data = { .phys_map = addr, .size = size, + .flags = EFI_MEMMAP_LATE, }; /* Did we forget to unmap the early EFI memmap? */ WARN_ON(efi.memmap.map); /* Were we already called? */ - WARN_ON(efi.memmap.late); + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); /* * It makes no sense to allow callers to register different @@ -178,13 +219,12 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) data.desc_version = efi.memmap.desc_version; data.desc_size = efi.memmap.desc_size; - return __efi_memmap_init(&data, true); + return __efi_memmap_init(&data); } /** * efi_memmap_install - Install a new EFI memory map in efi.memmap - * @addr: Physical address of the memory map - * @nr_map: Number of entries in the memory map + * @ctx: map allocation parameters (address, size, flags) * * Unlike efi_memmap_init_*(), this function does not allow the caller * to switch from early to late mappings. It simply uses the existing @@ -192,18 +232,11 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) * * Returns zero on success, a negative error code on failure. */ -int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map) +int __init efi_memmap_install(struct efi_memory_map_data *data) { - struct efi_memory_map_data data; - efi_memmap_unmap(); - data.phys_map = addr; - data.size = efi.memmap.desc_size * nr_map; - data.desc_version = efi.memmap.desc_version; - data.desc_size = efi.memmap.desc_size; - - return __efi_memmap_init(&data, efi.memmap.late); + return __efi_memmap_init(data); } /** diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 4b6d2ef15c39..f57d95a3db02 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -479,6 +479,15 @@ config GPIO_SAMA5D2_PIOBU The difference from regular GPIOs is that they maintain their value during backup/self-refresh. +config GPIO_SIFIVE + bool "SiFive GPIO support" + depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY + select GPIO_GENERIC + select GPIOLIB_IRQCHIP + select REGMAP_MMIO + help + Say yes here to support the GPIO device on SiFive SoCs. + config GPIO_SIOX tristate "SIOX GPIO support" depends on SIOX diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 34eb8b2b12dd..11eeeebbde0d 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -124,6 +124,7 @@ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o obj-$(CONFIG_GPIO_SCH) += gpio-sch.o +obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c new file mode 100644 index 000000000000..147a1bd04515 --- /dev/null +++ b/drivers/gpio/gpio-sifive.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 SiFive + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/of_irq.h> +#include <linux/gpio/driver.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/regmap.h> + +#define SIFIVE_GPIO_INPUT_VAL 0x00 +#define SIFIVE_GPIO_INPUT_EN 0x04 +#define SIFIVE_GPIO_OUTPUT_EN 0x08 +#define SIFIVE_GPIO_OUTPUT_VAL 0x0C +#define SIFIVE_GPIO_RISE_IE 0x18 +#define SIFIVE_GPIO_RISE_IP 0x1C +#define SIFIVE_GPIO_FALL_IE 0x20 +#define SIFIVE_GPIO_FALL_IP 0x24 +#define SIFIVE_GPIO_HIGH_IE 0x28 +#define SIFIVE_GPIO_HIGH_IP 0x2C +#define SIFIVE_GPIO_LOW_IE 0x30 +#define SIFIVE_GPIO_LOW_IP 0x34 +#define SIFIVE_GPIO_OUTPUT_XOR 0x40 + +#define SIFIVE_GPIO_MAX 32 +#define SIFIVE_GPIO_IRQ_OFFSET 7 + +struct sifive_gpio { + void __iomem *base; + struct gpio_chip gc; + struct regmap *regs; + u32 irq_state; + unsigned int trigger[SIFIVE_GPIO_MAX]; + unsigned int irq_parent[SIFIVE_GPIO_MAX]; +}; + +static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset) +{ + unsigned long flags; + unsigned int trigger; + + spin_lock_irqsave(&chip->gc.bgpio_lock, flags); + trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0; + regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset), + (trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0); + regmap_update_bits(chip->regs, SIFIVE_GPIO_FALL_IE, BIT(offset), + (trigger & IRQ_TYPE_EDGE_FALLING) ? BIT(offset) : 0); + regmap_update_bits(chip->regs, SIFIVE_GPIO_HIGH_IE, BIT(offset), + (trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0); + regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset), + (trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0); + spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags); +} + +static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d); + + if (offset < 0 || offset >= gc->ngpio) + return -EINVAL; + + chip->trigger[offset] = trigger; + sifive_gpio_set_ie(chip, offset); + return 0; +} + +static void sifive_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX; + u32 bit = BIT(offset); + unsigned long flags; + + irq_chip_enable_parent(d); + + /* Switch to input */ + gc->direction_input(gc, offset); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + /* Clear any sticky pending interrupts */ + regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + /* Enable interrupts */ + assign_bit(offset, (unsigned long *)&chip->irq_state, 1); + sifive_gpio_set_ie(chip, offset); +} + +static void sifive_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX; + + assign_bit(offset, (unsigned long *)&chip->irq_state, 0); + sifive_gpio_set_ie(chip, offset); + irq_chip_disable_parent(d); +} + +static void sifive_gpio_irq_eoi(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX; + u32 bit = BIT(offset); + unsigned long flags; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + /* Clear all pending interrupts */ + regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + irq_chip_eoi_parent(d); +} + +static struct irq_chip sifive_gpio_irqchip = { + .name = "sifive-gpio", + .irq_set_type = sifive_gpio_irq_set_type, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_enable = sifive_gpio_irq_enable, + .irq_disable = sifive_gpio_irq_disable, + .irq_eoi = sifive_gpio_irq_eoi, +}; + +static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc, + unsigned int child, + unsigned int child_type, + unsigned int *parent, + unsigned int *parent_type) +{ + *parent_type = IRQ_TYPE_NONE; + *parent = child + SIFIVE_GPIO_IRQ_OFFSET; + return 0; +} + +static const struct regmap_config sifive_gpio_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + .disable_locking = true, +}; + +static int sifive_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct device_node *irq_parent; + struct irq_domain *parent; + struct gpio_irq_chip *girq; + struct sifive_gpio *chip; + int ret, ngpio; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(chip->base)) { + dev_err(dev, "failed to allocate device memory\n"); + return PTR_ERR(chip->base); + } + + chip->regs = devm_regmap_init_mmio(dev, chip->base, + &sifive_gpio_regmap_config); + if (IS_ERR(chip->regs)) + return PTR_ERR(chip->regs); + + ngpio = of_irq_count(node); + if (ngpio >= SIFIVE_GPIO_MAX) { + dev_err(dev, "Too many GPIO interrupts (max=%d)\n", + SIFIVE_GPIO_MAX); + return -ENXIO; + } + + irq_parent = of_irq_find_parent(node); + if (!irq_parent) { + dev_err(dev, "no IRQ parent node\n"); + return -ENODEV; + } + parent = irq_find_host(irq_parent); + if (!parent) { + dev_err(dev, "no IRQ parent domain\n"); + return -ENODEV; + } + + ret = bgpio_init(&chip->gc, dev, 4, + chip->base + SIFIVE_GPIO_INPUT_VAL, + chip->base + SIFIVE_GPIO_OUTPUT_VAL, + NULL, + chip->base + SIFIVE_GPIO_OUTPUT_EN, + chip->base + SIFIVE_GPIO_INPUT_EN, + 0); + if (ret) { + dev_err(dev, "unable to init generic GPIO\n"); + return ret; + } + + /* Disable all GPIO interrupts before enabling parent interrupts */ + regmap_write(chip->regs, SIFIVE_GPIO_RISE_IE, 0); + regmap_write(chip->regs, SIFIVE_GPIO_FALL_IE, 0); + regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IE, 0); + regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0); + chip->irq_state = 0; + + chip->gc.base = -1; + chip->gc.ngpio = ngpio; + chip->gc.label = dev_name(dev); + chip->gc.parent = dev; + chip->gc.owner = THIS_MODULE; + girq = &chip->gc.irq; + girq->chip = &sifive_gpio_irqchip; + girq->fwnode = of_node_to_fwnode(node); + girq->parent_domain = parent; + girq->child_to_parent_hwirq = sifive_gpio_child_to_parent_hwirq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + platform_set_drvdata(pdev, chip); + return gpiochip_add_data(&chip->gc, chip); +} + +static const struct of_device_id sifive_gpio_match[] = { + { .compatible = "sifive,gpio0" }, + { .compatible = "sifive,fu540-c000-gpio" }, + { }, +}; + +static struct platform_driver sifive_gpio_driver = { + .probe = sifive_gpio_probe, + .driver = { + .name = "sifive_gpio", + .of_match_table = of_match_ptr(sifive_gpio_match), + }, +}; +builtin_platform_driver(sifive_gpio_driver) diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index b696e4598a24..1b3f217a35e2 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -132,27 +132,6 @@ static void of_gpio_flags_quirks(struct device_node *np, int index) { /* - * Handle MMC "cd-inverted" and "wp-inverted" semantics. - */ - if (IS_ENABLED(CONFIG_MMC)) { - /* - * Active low is the default according to the - * SDHCI specification and the device tree - * bindings. However the code in the current - * kernel was written such that the phandle - * flags were always respected, and "cd-inverted" - * would invert the flag from the device phandle. - */ - if (!strcmp(propname, "cd-gpios")) { - if (of_property_read_bool(np, "cd-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - if (!strcmp(propname, "wp-gpios")) { - if (of_property_read_bool(np, "wp-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - } - /* * Some GPIO fixed regulator quirks. * Note that active low is the default. */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 78a16e42f222..bcfbfded9ba3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3371,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc) } EXPORT_SYMBOL_GPL(gpiod_is_active_low); +/** + * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not + * @desc: the gpio descriptor to change + */ +void gpiod_toggle_active_low(struct gpio_desc *desc) +{ + VALIDATE_DESC_VOID(desc); + change_bit(FLAG_ACTIVE_LOW, &desc->flags); +} +EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); + /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index afaf4bea21cf..9278bcfad1bf 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) * Map the GTT and the stolen memory area */ if (!resume) - dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, + dev_priv->gtt_map = ioremap(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT); if (!dev_priv->gtt_map) { dev_err(dev->dev, "Failure to map gtt.\n"); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 7005f8f69c68..0900052fc484 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) PSB_AUX_RESOURCE); resource_len = pci_resource_len(dev_priv->aux_pdev, PSB_AUX_RESOURCE); - dev_priv->aux_reg = ioremap_nocache(resource_start, + dev_priv->aux_reg = ioremap(resource_start, resource_len); if (!dev_priv->aux_reg) goto out_err; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 2fd4ca91a62d..8dd5a43e5486 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -211,7 +211,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv) ioaddr = pci_resource_start(pdev, 1); iosize = pci_resource_len(pdev, 1); - priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize); + priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize); if (!priv->mmio) { DRM_ERROR("Cannot map mmio region\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 44727806dfd7..d6ce57d30958 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2849,7 +2849,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) * readback check when writing GTT PTE entries. */ if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) - ggtt->gsm = ioremap_nocache(phys_addr, size); + ggtt->gsm = ioremap(phys_addr, size); else ggtt->gsm = ioremap_wc(phys_addr, size); if (!ggtt->gsm) { diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c84f0a8b3f2c..ac678ace09a3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, size = resource_size(res); - ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); + ptr = devm_ioremap(&pdev->dev, res->start, size); if (!ptr) { DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 098bc9f40b98..15bf8a207cb0 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -443,7 +443,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ mem->bus.size); else mem->bus.addr = - ioremap_nocache(mem->bus.base + mem->bus.offset, + ioremap(mem->bus.base + mem->bus.offset, mem->bus.size); if (!mem->bus.addr) return -ENOMEM; diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 68289b0b063a..68261c7f8c5f 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -534,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev) DRM_ERROR("Invalid dvo resource\n"); return -ENOMEM; } - dvo->regs = devm_ioremap_nocache(dev, res->start, + dvo->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!dvo->regs) return -ENOMEM; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 8f7bf33815fd..2bb32009d117 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -759,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev) DRM_ERROR("Invalid hda resource\n"); return -ENOMEM; } - hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + hda->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!hda->regs) return -ENOMEM; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "video-dacs-ctrl"); if (res) { - hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start, + hda->video_dacs_ctrl = devm_ioremap(dev, res->start, resource_size(res)); if (!hda->video_dacs_ctrl) return -ENOMEM; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 814560ead4e1..64ed102033c8 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1393,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev) ret = -ENOMEM; goto release_adapter; } - hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + hdmi->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!hdmi->regs) { ret = -ENOMEM; goto release_adapter; diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index 5767e93dd1cd..c36a8da373cb 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -860,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev) DRM_ERROR("Invalid glue resource\n"); return -ENOMEM; } - tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + tvout->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!tvout->regs) return -ENOMEM; diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 0b17ac8a3faa..5e5f82b6a5d9 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev) DRM_ERROR("Get memory resource failed\n"); return -ENOMEM; } - vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + vtg->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!vtg->regs) { DRM_ERROR("failed to remap I/O memory\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 2a9e67597375..a3612369750f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -256,7 +256,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) goto init_failed; } - priv->mmio = ioremap_nocache(res->start, resource_size(res)); + priv->mmio = ioremap(res->start, resource_size(res)); if (!priv->mmio) { dev_err(dev, "failed to ioremap\n"); ret = -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 6b0883a1776e..97fd1dafc3e8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -218,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m if (mem->placement & TTM_PL_FLAG_WC) addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); else - addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); + addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size); if (!addr) { (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); @@ -565,7 +565,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, size); else - map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, + map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset, size); } return (!map->virtual) ? -ENOMEM : 0; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index cd9193078525..70e1cb928bf0 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64 +#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0) +#define HIDPP_REPORT_LONG_SUPPORTED BIT(1) +#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2) + #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03 #define HIDPP_SUB_ID_ROLLER 0x05 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06 @@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1) #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) +#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4) /* * There are two hidpp protocols in use, the first version hidpp10 is known @@ -135,12 +140,15 @@ struct hidpp_report { struct hidpp_battery { u8 feature_index; u8 solar_feature_index; + u8 voltage_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; + int voltage; + int charge_type; bool online; }; @@ -183,9 +191,12 @@ struct hidpp_device { unsigned long quirks; unsigned long capabilities; + u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; + + u8 wireless_feature_index; }; /* HID++ 1.0 error codes */ @@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, struct hidpp_report *message; int ret, max_count; + /* Send as long report if short reports are not supported. */ + if (report_id == REPORT_ID_HIDPP_SHORT && + !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED)) + report_id = REPORT_ID_HIDPP_LONG; + switch (report_id) { case REPORT_ID_HIDPP_SHORT: max_count = HIDPP_REPORT_SHORT_LENGTH - 4; @@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question, (answer->fap.params[0] == question->fap.funcindex_clientid); } -static inline bool hidpp_report_is_connect_event(struct hidpp_report *report) +static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp, + struct hidpp_report *report) { - return (report->report_id == REPORT_ID_HIDPP_SHORT) && - (report->rap.sub_id == 0x41); + return (hidpp->wireless_feature_index && + (report->fap.feature_index == hidpp->wireless_feature_index)) || + ((report->report_id == REPORT_ID_HIDPP_SHORT) && + (report->rap.sub_id == 0x41)); } /** @@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp, return 0; } +/* -------------------------------------------------------------------------- */ +/* 0x1001: Battery voltage */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001 + +#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00 + +#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00 + +static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, + int *level, int *charge_type) +{ + int status; + + long charge_sts = (long)data[2]; + + *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + switch (data[2] & 0xe0) { + case 0x00: + status = POWER_SUPPLY_STATUS_CHARGING; + break; + case 0x20: + status = POWER_SUPPLY_STATUS_FULL; + *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + break; + case 0x40: + status = POWER_SUPPLY_STATUS_DISCHARGING; + break; + case 0xe0: + status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + default: + status = POWER_SUPPLY_STATUS_UNKNOWN; + } + + *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; + if (test_bit(3, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; + } + if (test_bit(4, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + } + + if (test_bit(5, &charge_sts)) { + *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + } + + *voltage = get_unaligned_be16(data); + + return status; +} + +static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp, + u8 feature_index, + int *status, int *voltage, + int *level, int *charge_type) +{ + struct hidpp_report response; + int ret; + u8 *params = (u8 *)response.fap.params; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE, + NULL, 0, &response); + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE; + + *status = hidpp20_battery_map_status_voltage(params, voltage, + level, charge_type); + + return 0; +} + +static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + int status, voltage, level, charge_type; + + if (hidpp->battery.voltage_feature_index == 0xff) { + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE, + &hidpp->battery.voltage_feature_index, + &feature_type); + if (ret) + return ret; + } + + ret = hidpp20_battery_get_battery_voltage(hidpp, + hidpp->battery.voltage_feature_index, + &status, &voltage, &level, &charge_type); + + if (ret) + return ret; + + hidpp->battery.status = status; + hidpp->battery.voltage = voltage; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + return 0; +} + +static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp, + u8 *data, int size) +{ + struct hidpp_report *report = (struct hidpp_report *)data; + int status, voltage, level, charge_type; + + if (report->fap.feature_index != hidpp->battery.voltage_feature_index || + report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST) + return 0; + + status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage, + &level, &charge_type); + + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { + hidpp->battery.voltage = voltage; + hidpp->battery.status = status; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + if (hidpp->battery.ps) + power_supply_changed(hidpp->battery.ps); + } + return 0; +} + static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_STATUS, @@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */ 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */ + 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */ }; static int hidpp_battery_get_property(struct power_supply *psy, @@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = hidpp->hid_dev->uniq; break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + /* hardware reports voltage in in mV. sysfs expects uV */ + val->intval = hidpp->battery.voltage * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = hidpp->battery.charge_type; + break; default: ret = -EINVAL; break; @@ -1277,6 +1442,24 @@ static int hidpp_battery_get_property(struct power_supply *psy, } /* -------------------------------------------------------------------------- */ +/* 0x1d4b: Wireless device status */ +/* -------------------------------------------------------------------------- */ +#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b + +static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + + ret = hidpp_root_get_feature(hidpp, + HIDPP_PAGE_WIRELESS_DEVICE_STATUS, + &hidpp->wireless_feature_index, + &feature_type); + + return ret; +} + +/* -------------------------------------------------------------------------- */ /* 0x2120: Hi-resolution scrolling */ /* -------------------------------------------------------------------------- */ @@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, } } - if (unlikely(hidpp_report_is_connect_event(report))) { + if (unlikely(hidpp_report_is_connect_event(hidpp, report))) { atomic_set(&hidpp->connected, !(report->rap.params[0] & (1 << 6))); if (schedule_work(&hidpp->work) == 0) @@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, ret = hidpp_solar_battery_event(hidpp, data, size); if (ret != 0) return ret; + ret = hidpp20_battery_voltage_event(hidpp, data, size); + if (ret != 0) + return ret; } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { @@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) hidpp->battery.feature_index = 0xff; hidpp->battery.solar_feature_index = 0xff; + hidpp->battery.voltage_feature_index = 0xff; if (hidpp->protocol_major >= 2) { if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750) ret = hidpp_solar_request_battery_event(hidpp); - else - ret = hidpp20_query_battery_info(hidpp); + else { + ret = hidpp20_query_battery_voltage_info(hidpp); + if (ret) + ret = hidpp20_query_battery_info(hidpp); + } if (ret) return ret; @@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) if (!battery_props) return -ENOMEM; - num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2; + num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) battery_props[num_battery_props++] = @@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY_LEVEL; + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + battery_props[num_battery_props++] = + POWER_SUPPLY_PROP_VOLTAGE_NOW; + battery = &hidpp->battery; n = atomic_inc_return(&battery_no) - 1; @@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp) else hidpp10_query_battery_status(hidpp); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { - hidpp20_query_battery_info(hidpp); + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + hidpp20_query_battery_voltage_info(hidpp); + else + hidpp20_query_battery_info(hidpp); } if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); @@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id) return report->field[0]->report_count + 1; } -static bool hidpp_validate_device(struct hid_device *hdev) +static u8 hidpp_validate_device(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); - int id, report_length, supported_reports = 0; + int id, report_length; + u8 supported_reports = 0; id = REPORT_ID_HIDPP_SHORT; report_length = hidpp_get_report_length(hdev, id); @@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_SHORT_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED; } id = REPORT_ID_HIDPP_LONG; @@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_LONG_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_LONG_SUPPORTED; } id = REPORT_ID_HIDPP_VERY_LONG; @@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED; hidpp->very_long_report_length = report_length; } @@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) /* * Make sure the device is HID++ capable, otherwise treat as generic HID */ - if (!hidpp_validate_device(hdev)) { + hidpp->supported_reports = hidpp_validate_device(hdev); + + if (!hidpp->supported_reports) { hid_set_drvdata(hdev, NULL); devm_kfree(&hdev->dev, hidpp); return hid_hw_start(hdev, HID_CONNECT_DEFAULT); @@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret < 0) { dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n", __func__, ret); - hid_hw_stop(hdev); goto hid_hw_open_fail; } @@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp_overwrite_name(hdev); } + if (connected && hidpp->protocol_major >= 2) { + ret = hidpp_set_wireless_feature_index(hidpp); + if (ret == -ENOENT) + hidpp->wireless_feature_index = 0; + else if (ret) + goto hid_hw_init_fail; + } + if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { ret = wtp_get_config(hidpp); if (ret) @@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = { { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech MX Master 2S */ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Master 3 */ + LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech Performance MX */ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Keyboard logitech K400 */ @@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* MX Master mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* MX Master 3 mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, {} }; diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 7a75aff78388..2eee5e31c2b7 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -451,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, -EFAULT : len; break; } + + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) { + int len = strlen(hid->uniq) + 1; + if (len > _IOC_SIZE(cmd)) + len = _IOC_SIZE(cmd); + ret = copy_to_user(user_arg, hid->uniq, len) ? + -EFAULT : len; + break; + } } ret = -ENOTTY; diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 766bd8457346..296f9098c9e4 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -211,7 +211,7 @@ static struct timespec64 hv_get_adj_host_time(void) unsigned long flags; spin_lock_irqsave(&host_ts.lock, flags); - reftime = hyperv_cs->read(hyperv_cs); + reftime = hv_read_reference_counter(); newtime = host_ts.host_time + (reftime - host_ts.ref_time); ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100); spin_unlock_irqrestore(&host_ts.lock, flags); @@ -250,7 +250,7 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags) */ spin_lock_irqsave(&host_ts.lock, flags); - cur_reftime = hyperv_cs->read(hyperv_cs); + cur_reftime = hv_read_reference_counter(); host_ts.host_time = hosttime; host_ts.ref_time = cur_reftime; @@ -315,7 +315,7 @@ static void timesync_onchannelcallback(void *context) sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; adj_guesttime(timedatap->parenttime, - hyperv_cs->read(hyperv_cs), + hv_read_reference_counter(), timedatap->flags); } } @@ -524,7 +524,7 @@ static struct ptp_clock *hv_ptp_clock; static int hv_timesync_init(struct hv_util_service *srv) { /* TimeSync requires Hyper-V clocksource. */ - if (!hyperv_cs) + if (!hv_read_reference_counter) return -ENODEV; spin_lock_init(&host_ts.lock); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 23dfe848979a..47ac20aee06f 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -164,6 +164,16 @@ config SENSORS_ADM1031 This driver can also be built as a module. If so, the module will be called adm1031. +config SENSORS_ADM1177 + tristate "Analog Devices ADM1177 and compatibles" + depends on I2C + help + If you say yes here you get support for Analog Devices ADM1177 + sensor chips. + + This driver can also be built as a module. If so, the module + will be called adm1177. + config SENSORS_ADM9240 tristate "Analog Devices ADM9240 and compatibles" depends on I2C @@ -385,6 +395,16 @@ config SENSORS_ATXP1 This driver can also be built as a module. If so, the module will be called atxp1. +config SENSORS_DRIVETEMP + tristate "Hard disk drives with temperature sensors" + depends on SCSI && ATA + help + If you say yes you get support for the temperature sensor on + hard disk drives. + + This driver can also be built as a module. If so, the module + will be called satatemp. + config SENSORS_DS620 tristate "Dallas Semiconductor DS620" depends on I2C @@ -889,7 +909,7 @@ config SENSORS_MAX197 will be called max197. config SENSORS_MAX31722 -tristate "MAX31722 temperature sensor" + tristate "MAX31722 temperature sensor" depends on SPI help Support for the Maxim Integrated MAX31722/MAX31723 digital @@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor" This driver can also be built as a module. If so, the module will be called max31722. +config SENSORS_MAX31730 + tristate "MAX31730 temperature sensor" + depends on I2C + help + Support for the Maxim Integrated MAX31730 3-Channel Remote + Temperature Sensor. + + This driver can also be built as a module. If so, the module + will be called max31730. + config SENSORS_MAX6621 tristate "Maxim MAX6621 sensor chip" depends on I2C @@ -1905,7 +1935,7 @@ config SENSORS_W83627HF will be called w83627hf. config SENSORS_W83627EHF - tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F" + tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG" depends on !PPC select HWMON_VID help @@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF the Core 2 Duo. And also the W83627UHG, which is a stripped down version of the W83627DHG (as far as hardware monitoring goes.) - This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F - (also known as W83667HG-I), and NCT6776F. + This driver also supports Nuvoton W83667HG and W83667HG-B. This driver can also be built as a module. If so, the module will be called w83627ehf. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 6db5db9cdc29..613f50987965 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o +obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o @@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o obj-$(CONFIG_SENSORS_DME1737) += dme1737.o +obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o obj-$(CONFIG_SENSORS_DS620) += ds620.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o @@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o obj-$(CONFIG_SENSORS_MAX1668) += max1668.o obj-$(CONFIG_SENSORS_MAX197) += max197.o obj-$(CONFIG_SENSORS_MAX31722) += max31722.o +obj-$(CONFIG_SENSORS_MAX31730) += max31730.o obj-$(CONFIG_SENSORS_MAX6621) += max6621.o obj-$(CONFIG_SENSORS_MAX6639) += max6639.o obj-$(CONFIG_SENSORS_MAX6642) += max6642.o diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c new file mode 100644 index 000000000000..d314223a404a --- /dev/null +++ b/drivers/hwmon/adm1177.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin + * + * Copyright 2015-2019 Analog Devices Inc. + */ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +/* Command Byte Operations */ +#define ADM1177_CMD_V_CONT BIT(0) +#define ADM1177_CMD_I_CONT BIT(2) +#define ADM1177_CMD_VRANGE BIT(4) + +/* Extended Register */ +#define ADM1177_REG_ALERT_TH 2 + +#define ADM1177_BITS 12 + +/** + * struct adm1177_state - driver instance specific data + * @client pointer to i2c client + * @reg regulator info for the the power supply of the device + * @r_sense_uohm current sense resistor value + * @alert_threshold_ua current limit for shutdown + * @vrange_high internal voltage divider + */ +struct adm1177_state { + struct i2c_client *client; + struct regulator *reg; + u32 r_sense_uohm; + u32 alert_threshold_ua; + bool vrange_high; +}; + +static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data) +{ + return i2c_master_recv(st->client, data, num); +} + +static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd) +{ + return i2c_smbus_write_byte(st->client, cmd); +} + +static int adm1177_write_alert_thr(struct adm1177_state *st, + u32 alert_threshold_ua) +{ + u64 val; + int ret; + + val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm; + val = div_u64(val, 105840000U); + val = div_u64(val, 1000U); + if (val > 0xFF) + val = 0xFF; + + ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH, + val); + if (ret) + return ret; + + st->alert_threshold_ua = alert_threshold_ua; + return 0; +} + +static int adm1177_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + u8 data[3]; + long dummy; + int ret; + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[1] << 4) | (data[2] & 0xF); + /* + * convert to milliamperes + * ((105.84mV / 4096) x raw) / senseResistor(ohm) + */ + *val = div_u64((105840000ull * dummy), + 4096 * st->r_sense_uohm); + return 0; + case hwmon_curr_max_alarm: + *val = st->alert_threshold_ua; + return 0; + default: + return -EOPNOTSUPP; + } + case hwmon_in: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[0] << 4) | (data[2] >> 4); + /* + * convert to millivolts based on resistor devision + * (V_fullscale / 4096) * raw + */ + if (st->vrange_high) + dummy *= 26350; + else + dummy *= 6650; + + *val = DIV_ROUND_CLOSEST(dummy, 4096); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int adm1177_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_max_alarm: + adm1177_write_alert_thr(st, val); + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static umode_t adm1177_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct adm1177_state *st = data; + + switch (type) { + case hwmon_in: + switch (attr) { + case hwmon_in_input: + return 0444; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + if (st->r_sense_uohm) + return 0444; + return 0; + case hwmon_curr_max_alarm: + if (st->r_sense_uohm) + return 0644; + return 0; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *adm1177_info[] = { + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_MAX_ALARM), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT), + NULL +}; + +static const struct hwmon_ops adm1177_hwmon_ops = { + .is_visible = adm1177_is_visible, + .read = adm1177_read, + .write = adm1177_write, +}; + +static const struct hwmon_chip_info adm1177_chip_info = { + .ops = &adm1177_hwmon_ops, + .info = adm1177_info, +}; + +static void adm1177_remove(void *data) +{ + struct adm1177_state *st = data; + + regulator_disable(st->reg); +} + +static int adm1177_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct adm1177_state *st; + u32 alert_threshold_ua; + int ret; + + st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->client = client; + + st->reg = devm_regulator_get_optional(&client->dev, "vref"); + if (IS_ERR(st->reg)) { + if (PTR_ERR(st->reg) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + st->reg = NULL; + } else { + ret = regulator_enable(st->reg); + if (ret) + return ret; + ret = devm_add_action_or_reset(&client->dev, adm1177_remove, + st); + if (ret) + return ret; + } + + if (device_property_read_u32(dev, "shunt-resistor-micro-ohms", + &st->r_sense_uohm)) + st->r_sense_uohm = 0; + if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp", + &alert_threshold_ua)) { + if (st->r_sense_uohm) + /* + * set maximum default value from datasheet based on + * shunt-resistor + */ + alert_threshold_ua = div_u64(105840000000, + st->r_sense_uohm); + else + alert_threshold_ua = 0; + } + st->vrange_high = device_property_read_bool(dev, + "adi,vrange-high-enable"); + if (alert_threshold_ua && st->r_sense_uohm) + adm1177_write_alert_thr(st, alert_threshold_ua); + + ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT | + ADM1177_CMD_I_CONT | + (st->vrange_high ? 0 : ADM1177_CMD_VRANGE)); + if (ret) + return ret; + + hwmon_dev = + devm_hwmon_device_register_with_info(dev, client->name, st, + &adm1177_chip_info, NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id adm1177_id[] = { + {"adm1177", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, adm1177_id); + +static const struct of_device_id adm1177_dt_ids[] = { + { .compatible = "adi,adm1177" }, + {}, +}; +MODULE_DEVICE_TABLE(of, adm1177_dt_ids); + +static struct i2c_driver adm1177_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "adm1177", + .of_match_table = adm1177_dt_ids, + }, + .probe = adm1177_probe, + .id_table = adm1177_id, +}; +module_i2c_driver(adm1177_driver); + +MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>"); +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c new file mode 100644 index 000000000000..370d0c74eb01 --- /dev/null +++ b/drivers/hwmon/drivetemp.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hwmon client for disk and solid state drives with temperature sensors + * Copyright (C) 2019 Zodiac Inflight Innovations + * + * With input from: + * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors. + * (C) 2018 Linus Walleij + * + * hwmon: Driver for SCSI/ATA temperature sensors + * by Constantin Baranov <const@mimas.ru>, submitted September 2009 + * + * This drive supports reporting the temperatire of SATA drives. It can be + * easily extended to report the temperature of SCSI drives. + * + * The primary means to read drive temperatures and temperature limits + * for ATA drives is the SCT Command Transport feature set as specified in + * ATA8-ACS. + * It can be used to read the current drive temperature, temperature limits, + * and historic minimum and maximum temperatures. The SCT Command Transport + * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set + * (ATA8-ACS)". + * + * If the SCT Command Transport feature set is not available, drive temperatures + * may be readable through SMART attributes. Since SMART attributes are not well + * defined, this method is only used as fallback mechanism. + * + * There are three SMART attributes which may report drive temperatures. + * Those are defined as follows (from + * http://www.cropel.com/library/smart-attribute-list.aspx). + * + * 190 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 194 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 231 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * Wikipedia defines attributes a bit differently. + * + * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer + * Difference or to set a minimum threshold which corresponds to a + * Airflow maximum temperature. This also follows the convention of + * Temperature 100 being a best-case value and lower values being + * undesirable. However, some older drives may instead + * report raw Temperature (identical to 0xC2) or + * Temperature minus 50 here. + * 194 Temperature or Indicates the device temperature, if the appropriate + * Temperature sensor is fitted. Lowest byte of the raw value contains + * Celsius the exact temperature value (Celsius degrees). + * 231 Life Left Indicates the approximate SSD life left, in terms of + * (SSDs) or program/erase cycles or available reserved blocks. + * Temperature A normalized value of 100 represents a new drive, with + * a threshold value at 10 indicating a need for + * replacement. A value of 0 may mean that the drive is + * operating in read-only mode to allow data recovery. + * Previously (pre-2010) occasionally used for Drive + * Temperature (more typically reported at 0xC2). + * + * Common denominator is that the first raw byte reports the temperature + * in degrees C on almost all drives. Some drives may report a fractional + * temperature in the second raw byte. + * + * Known exceptions (from libatasmart): + * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th + * degrees C in the first two raw bytes. + * - A few Maxtor drives report an unknown or bad value in attribute 194. + * - Certain Apple SSD drives report an unknown value in attribute 190. + * Only certain firmware versions are affected. + * + * Those exceptions affect older ATA drives and are currently ignored. + * Also, the second raw byte (possibly reporting the fractional temperature) + * is currently ignored. + * + * Many drives also report temperature limits in additional SMART data raw + * bytes. The format of those is not well defined and varies widely. + * The driver does not currently attempt to report those limits. + * + * According to data in smartmontools, attribute 231 is rarely used to report + * drive temperatures. At the same time, several drives report SSD life left + * in attribute 231, but do not support temperature sensors. For this reason, + * attribute 231 is currently ignored. + * + * Following above definitions, temperatures are reported as follows. + * If SCT Command Transport is supported, it is used to read the + * temperature and, if available, temperature limits. + * - Otherwise, if SMART attribute 194 is supported, it is used to read + * the temperature. + * - Otherwise, if SMART attribute 190 is supported, it is used to read + * the temperature. + */ + +#include <linux/ata.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_driver.h> +#include <scsi/scsi_proto.h> + +struct drivetemp_data { + struct list_head list; /* list of instantiated devices */ + struct mutex lock; /* protect data buffer accesses */ + struct scsi_device *sdev; /* SCSI device */ + struct device *dev; /* instantiating device */ + struct device *hwdev; /* hardware monitoring device */ + u8 smartdata[ATA_SECT_SIZE]; /* local buffer */ + int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val); + bool have_temp_lowest; /* lowest temp in SCT status */ + bool have_temp_highest; /* highest temp in SCT status */ + bool have_temp_min; /* have min temp */ + bool have_temp_max; /* have max temp */ + bool have_temp_lcrit; /* have lower critical limit */ + bool have_temp_crit; /* have critical limit */ + int temp_min; /* min temp */ + int temp_max; /* max temp */ + int temp_lcrit; /* lower critical limit */ + int temp_crit; /* critical limit */ +}; + +static LIST_HEAD(drivetemp_devlist); + +#define ATA_MAX_SMART_ATTRS 30 +#define SMART_TEMP_PROP_190 190 +#define SMART_TEMP_PROP_194 194 + +#define SCT_STATUS_REQ_ADDR 0xe0 +#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */ +#define SCT_STATUS_VERSION_HIGH 1 +#define SCT_STATUS_TEMP 200 +#define SCT_STATUS_TEMP_LOWEST 201 +#define SCT_STATUS_TEMP_HIGHEST 202 +#define SCT_READ_LOG_ADDR 0xe1 +#define SMART_READ_LOG 0xd5 +#define SMART_WRITE_LOG 0xd6 + +#define INVALID_TEMP 0x80 + +#define temp_is_valid(temp) ((temp) != INVALID_TEMP) +#define temp_from_sct(temp) (((s8)(temp)) * 1000) + +static inline bool ata_id_smart_supported(u16 *id) +{ + return id[ATA_ID_COMMAND_SET_1] & BIT(0); +} + +static inline bool ata_id_smart_enabled(u16 *id) +{ + return id[ATA_ID_CFS_ENABLE_1] & BIT(0); +} + +static int drivetemp_scsi_command(struct drivetemp_data *st, + u8 ata_command, u8 feature, + u8 lba_low, u8 lba_mid, u8 lba_high) +{ + u8 scsi_cmd[MAX_COMMAND_SIZE]; + int data_dir; + + memset(scsi_cmd, 0, sizeof(scsi_cmd)); + scsi_cmd[0] = ATA_16; + if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) { + scsi_cmd[1] = (5 << 1); /* PIO Data-out */ + /* + * No off.line or cc, write to dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x06; + data_dir = DMA_TO_DEVICE; + } else { + scsi_cmd[1] = (4 << 1); /* PIO Data-in */ + /* + * No off.line or cc, read from dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x0e; + data_dir = DMA_FROM_DEVICE; + } + scsi_cmd[4] = feature; + scsi_cmd[6] = 1; /* 1 sector */ + scsi_cmd[8] = lba_low; + scsi_cmd[10] = lba_mid; + scsi_cmd[12] = lba_high; + scsi_cmd[14] = ata_command; + + return scsi_execute_req(st->sdev, scsi_cmd, data_dir, + st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5, + NULL); +} + +static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature, + u8 select) +{ + return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select, + ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS); +} + +static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr, + long *temp) +{ + u8 *buf = st->smartdata; + bool have_temp = false; + u8 temp_raw; + u8 csum; + int err; + int i; + + err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0); + if (err) + return err; + + /* Checksum the read value table */ + csum = 0; + for (i = 0; i < ATA_SECT_SIZE; i++) + csum += buf[i]; + if (csum) { + dev_dbg(&st->sdev->sdev_gendev, + "checksum error reading SMART values\n"); + return -EIO; + } + + for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) { + u8 *attr = buf + i * 12; + int id = attr[2]; + + if (!id) + continue; + + if (id == SMART_TEMP_PROP_190) { + temp_raw = attr[7]; + have_temp = true; + } + if (id == SMART_TEMP_PROP_194) { + temp_raw = attr[7]; + have_temp = true; + break; + } + } + + if (have_temp) { + *temp = temp_raw * 1000; + return 0; + } + + return -ENXIO; +} + +static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val) +{ + u8 *buf = st->smartdata; + int err; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + return err; + switch (attr) { + case hwmon_temp_input: + *val = temp_from_sct(buf[SCT_STATUS_TEMP]); + break; + case hwmon_temp_lowest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]); + break; + case hwmon_temp_highest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]); + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int drivetemp_identify_sata(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + u8 *buf = st->smartdata; + struct scsi_vpd *vpd; + bool is_ata, is_sata; + bool have_sct_data_table; + bool have_sct_temp; + bool have_smart; + bool have_sct; + u16 *ata_id; + u16 version; + long temp; + int err; + + /* SCSI-ATA Translation present? */ + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + + /* + * Verify that ATA IDENTIFY DEVICE data is included in ATA Information + * VPD and that the drive implements the SATA protocol. + */ + if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA || + vpd->data[36] != 0x34) { + rcu_read_unlock(); + return -ENODEV; + } + ata_id = (u16 *)&vpd->data[60]; + is_ata = ata_id_is_ata(ata_id); + is_sata = ata_id_is_sata(ata_id); + have_sct = ata_id_sct_supported(ata_id); + have_sct_data_table = ata_id_sct_data_tables(ata_id); + have_smart = ata_id_smart_supported(ata_id) && + ata_id_smart_enabled(ata_id); + + rcu_read_unlock(); + + /* bail out if this is not a SATA device */ + if (!is_ata || !is_sata) + return -ENODEV; + if (!have_sct) + goto skip_sct; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct; + + version = (buf[SCT_STATUS_VERSION_HIGH] << 8) | + buf[SCT_STATUS_VERSION_LOW]; + if (version != 2 && version != 3) + goto skip_sct; + + have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]); + if (!have_sct_temp) + goto skip_sct; + + st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]); + st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]); + + if (!have_sct_data_table) + goto skip_sct; + + /* Request and read temperature history table */ + memset(buf, '\0', sizeof(st->smartdata)); + buf[0] = 5; /* data table command */ + buf[2] = 1; /* read table */ + buf[4] = 2; /* temperature history table */ + + err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct_data; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR); + if (err) + goto skip_sct_data; + + /* + * Temperature limits per AT Attachment 8 - + * ATA/ATAPI Command Set (ATA8-ACS) + */ + st->have_temp_max = temp_is_valid(buf[6]); + st->have_temp_crit = temp_is_valid(buf[7]); + st->have_temp_min = temp_is_valid(buf[8]); + st->have_temp_lcrit = temp_is_valid(buf[9]); + + st->temp_max = temp_from_sct(buf[6]); + st->temp_crit = temp_from_sct(buf[7]); + st->temp_min = temp_from_sct(buf[8]); + st->temp_lcrit = temp_from_sct(buf[9]); + +skip_sct_data: + if (have_sct_temp) { + st->get_temp = drivetemp_get_scttemp; + return 0; + } +skip_sct: + if (!have_smart) + return -ENODEV; + st->get_temp = drivetemp_get_smarttemp; + return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp); +} + +static int drivetemp_identify(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + + /* Bail out immediately if there is no inquiry data */ + if (!sdev->inquiry || sdev->inquiry_len < 16) + return -ENODEV; + + /* Disk device? */ + if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC) + return -ENODEV; + + return drivetemp_identify_sata(st); +} + +static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct drivetemp_data *st = dev_get_drvdata(dev); + int err = 0; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_lowest: + case hwmon_temp_highest: + mutex_lock(&st->lock); + err = st->get_temp(st, attr, val); + mutex_unlock(&st->lock); + break; + case hwmon_temp_lcrit: + *val = st->temp_lcrit; + break; + case hwmon_temp_min: + *val = st->temp_min; + break; + case hwmon_temp_max: + *val = st->temp_max; + break; + case hwmon_temp_crit: + *val = st->temp_crit; + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static umode_t drivetemp_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct drivetemp_data *st = data; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_lowest: + if (st->have_temp_lowest) + return 0444; + break; + case hwmon_temp_highest: + if (st->have_temp_highest) + return 0444; + break; + case hwmon_temp_min: + if (st->have_temp_min) + return 0444; + break; + case hwmon_temp_max: + if (st->have_temp_max) + return 0444; + break; + case hwmon_temp_lcrit: + if (st->have_temp_lcrit) + return 0444; + break; + case hwmon_temp_crit: + if (st->have_temp_crit) + return 0444; + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *drivetemp_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | + HWMON_T_LOWEST | HWMON_T_HIGHEST | + HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_LCRIT | HWMON_T_CRIT), + NULL +}; + +static const struct hwmon_ops drivetemp_ops = { + .is_visible = drivetemp_is_visible, + .read = drivetemp_read, +}; + +static const struct hwmon_chip_info drivetemp_chip_info = { + .ops = &drivetemp_ops, + .info = drivetemp_info, +}; + +/* + * The device argument points to sdev->sdev_dev. Its parent is + * sdev->sdev_gendev, which we can use to get the scsi_device pointer. + */ +static int drivetemp_add(struct device *dev, struct class_interface *intf) +{ + struct scsi_device *sdev = to_scsi_device(dev->parent); + struct drivetemp_data *st; + int err; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->sdev = sdev; + st->dev = dev; + mutex_init(&st->lock); + + if (drivetemp_identify(st)) { + err = -ENODEV; + goto abort; + } + + st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp", + st, &drivetemp_chip_info, + NULL); + if (IS_ERR(st->hwdev)) { + err = PTR_ERR(st->hwdev); + goto abort; + } + + list_add(&st->list, &drivetemp_devlist); + return 0; + +abort: + kfree(st); + return err; +} + +static void drivetemp_remove(struct device *dev, struct class_interface *intf) +{ + struct drivetemp_data *st, *tmp; + + list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) { + if (st->dev == dev) { + list_del(&st->list); + hwmon_device_unregister(st->hwdev); + kfree(st); + break; + } + } +} + +static struct class_interface drivetemp_interface = { + .add_dev = drivetemp_add, + .remove_dev = drivetemp_remove, +}; + +static int __init drivetemp_init(void) +{ + return scsi_register_interface(&drivetemp_interface); +} + +static void __exit drivetemp_exit(void) +{ + scsi_unregister_interface(&drivetemp_interface); +} + +module_init(drivetemp_init); +module_exit(drivetemp_exit); + +MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>"); +MODULE_DESCRIPTION("Hard drive temperature monitor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index d018b20089ec..6a30fb453f7a 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -188,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) static int hwmon_attr_base(enum hwmon_sensor_types type) { - if (type == hwmon_in) + if (type == hwmon_in || type == hwmon_intrusion) return 0; return 1; } @@ -343,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = { }; static const char * const hwmon_temp_attr_templates[] = { + [hwmon_temp_enable] = "temp%d_enable", [hwmon_temp_input] = "temp%d_input", [hwmon_temp_type] = "temp%d_type", [hwmon_temp_lcrit] = "temp%d_lcrit", @@ -370,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = { }; static const char * const hwmon_in_attr_templates[] = { + [hwmon_in_enable] = "in%d_enable", [hwmon_in_input] = "in%d_input", [hwmon_in_min] = "in%d_min", [hwmon_in_max] = "in%d_max", @@ -385,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = { [hwmon_in_max_alarm] = "in%d_max_alarm", [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm", [hwmon_in_crit_alarm] = "in%d_crit_alarm", - [hwmon_in_enable] = "in%d_enable", }; static const char * const hwmon_curr_attr_templates[] = { + [hwmon_curr_enable] = "curr%d_enable", [hwmon_curr_input] = "curr%d_input", [hwmon_curr_min] = "curr%d_min", [hwmon_curr_max] = "curr%d_max", @@ -407,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = { }; static const char * const hwmon_power_attr_templates[] = { + [hwmon_power_enable] = "power%d_enable", [hwmon_power_average] = "power%d_average", [hwmon_power_average_interval] = "power%d_average_interval", [hwmon_power_average_interval_max] = "power%d_interval_max", @@ -438,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = { }; static const char * const hwmon_energy_attr_templates[] = { + [hwmon_energy_enable] = "energy%d_enable", [hwmon_energy_input] = "energy%d_input", [hwmon_energy_label] = "energy%d_label", }; static const char * const hwmon_humidity_attr_templates[] = { + [hwmon_humidity_enable] = "humidity%d_enable", [hwmon_humidity_input] = "humidity%d_input", [hwmon_humidity_label] = "humidity%d_label", [hwmon_humidity_min] = "humidity%d_min", @@ -454,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = { }; static const char * const hwmon_fan_attr_templates[] = { + [hwmon_fan_enable] = "fan%d_enable", [hwmon_fan_input] = "fan%d_input", [hwmon_fan_label] = "fan%d_label", [hwmon_fan_min] = "fan%d_min", @@ -474,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = { [hwmon_pwm_freq] = "pwm%d_freq", }; +static const char * const hwmon_intrusion_attr_templates[] = { + [hwmon_intrusion_alarm] = "intrusion%d_alarm", + [hwmon_intrusion_beep] = "intrusion%d_beep", +}; + static const char * const *__templates[] = { [hwmon_chip] = hwmon_chip_attrs, [hwmon_temp] = hwmon_temp_attr_templates, @@ -484,6 +495,7 @@ static const char * const *__templates[] = { [hwmon_humidity] = hwmon_humidity_attr_templates, [hwmon_fan] = hwmon_fan_attr_templates, [hwmon_pwm] = hwmon_pwm_attr_templates, + [hwmon_intrusion] = hwmon_intrusion_attr_templates, }; static const int __templates_size[] = { @@ -496,6 +508,7 @@ static const int __templates_size[] = { [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates), [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates), [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates), + [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates), }; static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index b09c39abd3a8..eeac4b04df27 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c @@ -528,7 +528,7 @@ static int i5k_amb_probe(struct platform_device *pdev) goto err; } - data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len); + data->amb_mmio = ioremap(data->amb_base, data->amb_len); if (!data->amb_mmio) { res = -EBUSY; goto err_map_failed; diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 5c1dddde193c..e39354ffe973 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,13 +1,29 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h + * processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> + * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net> + * + * Implementation notes: + * - CCD register address information as well as the calculation to + * convert raw register values is from https://github.com/ocerman/zenpower. + * The information is not confirmed from chip datasheets, but experiments + * suggest that it provides reasonable temperature values. + * - Register addresses to read chip voltage and current are also from + * https://github.com/ocerman/zenpower, and not confirmed from chip + * datasheets. Current calibration is board specific and not typically + * shared by board vendors. For this reason, current values are + * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC + * current. Reported values can be adjusted using the sensors configuration + * file. */ +#include <linux/bitops.h> +#include <linux/debugfs.h> #include <linux/err.h> #include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> @@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #endif /* CPUID function 0x80000001, ebx */ -#define CPUID_PKGTYPE_MASK 0xf0000000 +#define CPUID_PKGTYPE_MASK GENMASK(31, 28) #define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 /* DRAM controller (PCI function 2) */ #define REG_DCT0_CONFIG_HIGH 0x094 -#define DDR3_MODE 0x00000100 +#define DDR3_MODE BIT(8) /* miscellaneous (PCI function 3) */ #define REG_HARDWARE_THERMAL_CONTROL 0x64 -#define HTC_ENABLE 0x00000001 +#define HTC_ENABLE BIT(0) #define REG_REPORTED_TEMPERATURE 0xa4 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 -#define NB_CAP_HTC 0x00000400 +#define NB_CAP_HTC BIT(10) /* * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL @@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); /* F17h M01h Access througn SMN */ #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 +#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4)) +#define F17H_M70H_CCD_TEMP_VALID BIT(11) +#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0) + +#define F17H_M01H_SVI 0x0005A000 +#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc) +#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10) + +#define CUR_TEMP_SHIFT 21 +#define CUR_TEMP_RANGE_SEL_MASK BIT(19) + +#define CFACTOR_ICORE 1000000 /* 1A / LSB */ +#define CFACTOR_ISOC 250000 /* 0.25A / LSB */ + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -67,6 +97,10 @@ struct k10temp_data { int temp_offset; u32 temp_adjust_mask; bool show_tdie; + u32 show_tccd; + u32 svi_addr[2]; + bool show_current; + int cfactor[2]; }; struct tctl_offset { @@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ }; +static bool is_threadripper(void) +{ + return strstr(boot_cpu_data.x86_model_id, "Threadripper"); +} + +static bool is_epyc(void) +{ + return strstr(boot_cpu_data.x86_model_id, "EPYC"); +} + static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) { pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); @@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); } -static unsigned int get_raw_temp(struct k10temp_data *data) +static long get_raw_temp(struct k10temp_data *data) { - unsigned int temp; u32 regval; + long temp; data->read_tempreg(data->pdev, ®val); - temp = (regval >> 21) * 125; + temp = (regval >> CUR_TEMP_SHIFT) * 125; if (regval & data->temp_adjust_mask) temp -= 49000; return temp; } -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); +const char *k10temp_temp_label[] = { + "Tdie", + "Tctl", + "Tccd1", + "Tccd2", + "Tccd3", + "Tccd4", + "Tccd5", + "Tccd6", + "Tccd7", + "Tccd8", +}; - if (temp > data->temp_offset) - temp -= data->temp_offset; - else - temp = 0; +const char *k10temp_in_label[] = { + "Vcore", + "Vsoc", +}; - return sprintf(buf, "%u\n", temp); -} +const char *k10temp_curr_label[] = { + "Icore", + "Isoc", +}; -static ssize_t temp2_input_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) { - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - return sprintf(buf, "%u\n", temp); + switch (type) { + case hwmon_temp: + *str = k10temp_temp_label[channel]; + break; + case hwmon_in: + *str = k10temp_in_label[channel]; + break; + case hwmon_curr: + *str = k10temp_curr_label[channel]; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_label_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_curr(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; - return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie"); + switch (attr) { + case hwmon_curr_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + *val = DIV_ROUND_CLOSEST(data->cfactor[channel] * + (regval & 0xff), + 1000); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) +static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val) { - return sprintf(buf, "%d\n", 70 * 1000); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; + + switch (attr) { + case hwmon_in_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + regval = (regval >> 16) & 0xff; + *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_crit_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct k10temp_data *data = dev_get_drvdata(dev); - int show_hyst = attr->index; u32 regval; - int value; - data->read_htcreg(data->pdev, ®val); - value = ((regval >> 16) & 0x7f) * 500 + 52000; - if (show_hyst) - value -= ((regval >> 24) & 0xf) * 500; - return sprintf(buf, "%d\n", value); + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie */ + *val = get_raw_temp(data) - data->temp_offset; + if (*val < 0) + *val = 0; + break; + case 1: /* Tctl */ + *val = get_raw_temp(data); + if (*val < 0) + *val = 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + F17H_M70H_CCD_TEMP(channel - 2), ®val); + *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_temp_max: + *val = 70 * 1000; + break; + case hwmon_temp_crit: + data->read_htcreg(data->pdev, ®val); + *val = ((regval >> 16) & 0x7f) * 500 + 52000; + break; + case hwmon_temp_crit_hyst: + data->read_htcreg(data->pdev, ®val); + *val = (((regval >> 16) & 0x7f) + - ((regval >> 24) & 0xf)) * 500 + 52000; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static DEVICE_ATTR_RO(temp1_input); -static DEVICE_ATTR_RO(temp1_max); -static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1); - -static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0); -static DEVICE_ATTR_RO(temp2_input); -static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1); +static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_temp: + return k10temp_read_temp(dev, attr, channel, val); + case hwmon_in: + return k10temp_read_in(dev, attr, channel, val); + case hwmon_curr: + return k10temp_read_curr(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} -static umode_t k10temp_is_visible(struct kobject *kobj, - struct attribute *attr, int index) +static umode_t k10temp_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) { - struct device *dev = container_of(kobj, struct device, kobj); - struct k10temp_data *data = dev_get_drvdata(dev); + const struct k10temp_data *data = _data; struct pci_dev *pdev = data->pdev; u32 reg; - switch (index) { - case 0 ... 1: /* temp1_input, temp1_max */ - default: - break; - case 2 ... 3: /* temp1_crit, temp1_crit_hyst */ - if (!data->read_htcreg) - return 0; - - pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, - ®); - if (!(reg & NB_CAP_HTC)) - return 0; - - data->read_htcreg(data->pdev, ®); - if (!(reg & HTC_ENABLE)) + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie, or Tctl if we don't show it */ + break; + case 1: /* Tctl */ + if (!data->show_tdie) + return 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + case hwmon_temp_max: + if (channel || data->show_tdie) + return 0; + break; + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + if (channel || !data->read_htcreg) + return 0; + + pci_read_config_dword(pdev, + REG_NORTHBRIDGE_CAPABILITIES, + ®); + if (!(reg & NB_CAP_HTC)) + return 0; + + data->read_htcreg(data->pdev, ®); + if (!(reg & HTC_ENABLE)) + return 0; + break; + case hwmon_temp_label: + /* No labels if we don't show the die temperature */ + if (!data->show_tdie) + return 0; + switch (channel) { + case 0: /* Tdie */ + case 1: /* Tctl */ + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + default: return 0; + } break; - case 4 ... 6: /* temp1_label, temp2_input, temp2_label */ - if (!data->show_tdie) + case hwmon_in: + case hwmon_curr: + if (!data->show_current) return 0; break; + default: + return 0; } - return attr->mode; + return 0444; } -static struct attribute *k10temp_attrs[] = { - &dev_attr_temp1_input.attr, - &dev_attr_temp1_max.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - &dev_attr_temp2_input.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL -}; - -static const struct attribute_group k10temp_group = { - .attrs = k10temp_attrs, - .is_visible = k10temp_is_visible, -}; -__ATTRIBUTE_GROUPS(k10temp); - static bool has_erratum_319(struct pci_dev *pdev) { u32 pkg_type, reg_dram_cfg; @@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev) (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } -static int k10temp_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +#ifdef CONFIG_DEBUG_FS + +static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev, + u32 addr, int count) +{ + u32 reg; + int i; + + for (i = 0; i < count; i++) { + if (!(i & 3)) + seq_printf(s, "0x%06x: ", addr + i * 4); + amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, ®); + seq_printf(s, "%08x ", reg); + if ((i & 3) == 3) + seq_puts(s, "\n"); + } +} + +static int svi_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(svi); + +static int thm_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, + F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(thm); + +static void k10temp_debugfs_cleanup(void *ddir) +{ + debugfs_remove_recursive(ddir); +} + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ + struct dentry *debugfs; + char name[32]; + + /* Only show debugfs data for Family 17h/18h CPUs */ + if (!data->show_tdie) + return; + + scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev)); + + debugfs = debugfs_create_dir(name, NULL); + if (debugfs) { + debugfs_create_file("svi", 0444, debugfs, data, &svi_fops); + debugfs_create_file("thm", 0444, debugfs, data, &thm_fops); + devm_add_action_or_reset(&data->pdev->dev, + k10temp_debugfs_cleanup, debugfs); + } +} + +#else + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ +} + +#endif + +static const struct hwmon_channel_info *k10temp_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL), + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_LABEL, + HWMON_C_INPUT | HWMON_C_LABEL), + NULL +}; + +static const struct hwmon_ops k10temp_hwmon_ops = { + .is_visible = k10temp_is_visible, + .read = k10temp_read, + .read_string = k10temp_read_labels, +}; + +static const struct hwmon_chip_info k10temp_chip_info = { + .ops = &k10temp_hwmon_ops, + .info = k10temp_info, +}; + +static void k10temp_get_ccd_support(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + u32 regval; + int i; + + for (i = 0; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + F17H_M70H_CCD_TEMP(i), ®val); + if (regval & F17H_M70H_CCD_TEMP_VALID) + data->show_tccd |= BIT(i); + } +} + +static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; @@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev, data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { - data->temp_adjust_mask = 0x80000; + data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_f17; data->show_tdie = true; + + switch (boot_cpu_data.x86_model) { + case 0x1: /* Zen */ + case 0x8: /* Zen+ */ + case 0x11: /* Zen APU */ + case 0x18: /* Zen+ APU */ + data->show_current = !is_threadripper() && !is_epyc(); + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1; + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + k10temp_get_ccd_support(pdev, data, 4); + break; + case 0x31: /* Zen2 Threadripper */ + case 0x71: /* Zen2 */ + data->show_current = !is_threadripper() && !is_epyc(); + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0; + k10temp_get_ccd_support(pdev, data, 8); + break; + } } else { data->read_htcreg = read_htcreg_pci; data->read_tempreg = read_tempreg_pci; @@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev, } } - hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, - k10temp_groups); - return PTR_ERR_OR_ZERO(hwmon_dev); + hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data, + &k10temp_chip_info, + NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + k10temp_init_debugfs(data); + + return 0; } static const struct pci_device_id k10temp_id_table[] = { diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c new file mode 100644 index 000000000000..eb22a34dc36b --- /dev/null +++ b/drivers/hwmon/max31730.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX31730 3-Channel Remote Temperature Sensor + * + * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net> + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/hwmon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/slab.h> + +/* Addresses scanned */ +static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c, + 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; + +/* The MAX31730 registers */ +#define MAX31730_REG_TEMP 0x00 +#define MAX31730_REG_CONF 0x13 +#define MAX31730_STOP BIT(7) +#define MAX31730_EXTRANGE BIT(1) +#define MAX31730_REG_TEMP_OFFSET 0x16 +#define MAX31730_TEMP_OFFSET_BASELINE 0x77 +#define MAX31730_REG_OFFSET_ENABLE 0x17 +#define MAX31730_REG_TEMP_MAX 0x20 +#define MAX31730_REG_TEMP_MIN 0x30 +#define MAX31730_REG_STATUS_HIGH 0x32 +#define MAX31730_REG_STATUS_LOW 0x33 +#define MAX31730_REG_CHANNEL_ENABLE 0x35 +#define MAX31730_REG_TEMP_FAULT 0x36 + +#define MAX31730_REG_MFG_ID 0x50 +#define MAX31730_MFG_ID 0x4d +#define MAX31730_REG_MFG_REV 0x51 +#define MAX31730_MFG_REV 0x01 + +#define MAX31730_TEMP_MIN (-128000) +#define MAX31730_TEMP_MAX 127937 + +/* Each client has this additional data */ +struct max31730_data { + struct i2c_client *client; + u8 orig_conf; + u8 current_conf; + u8 offset_enable; + u8 channel_enable; +}; + +/*-----------------------------------------------------------------------*/ + +static inline long max31730_reg_to_mc(s16 temp) +{ + return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16); +} + +static int max31730_write_config(struct max31730_data *data, u8 set_mask, + u8 clr_mask) +{ + u8 value; + + clr_mask |= MAX31730_EXTRANGE; + value = data->current_conf & ~clr_mask; + value |= set_mask; + + if (data->current_conf != value) { + s32 err; + + err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF, + value); + if (err) + return err; + data->current_conf = value; + } + return 0; +} + +static int max31730_set_enable(struct i2c_client *client, int reg, + u8 *confdata, int channel, bool enable) +{ + u8 regval = *confdata; + int err; + + if (enable) + regval |= BIT(channel); + else + regval &= ~BIT(channel); + + if (regval != *confdata) { + err = i2c_smbus_write_byte_data(client, reg, regval); + if (err) + return err; + *confdata = regval; + } + return 0; +} + +static int max31730_set_offset_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE, + &data->offset_enable, channel, enable); +} + +static int max31730_set_channel_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE, + &data->channel_enable, channel, enable); +} + +static int max31730_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int regval, reg, offset; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + if (!(data->channel_enable & BIT(channel))) + return -ENODATA; + reg = MAX31730_REG_TEMP + (channel * 2); + break; + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + (channel * 2); + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + *val = !!(data->channel_enable & BIT(channel)); + return 0; + case hwmon_temp_offset: + if (!channel) + return -EINVAL; + if (!(data->offset_enable & BIT(channel))) { + *val = 0; + return 0; + } + offset = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET); + if (offset < 0) + return offset; + *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125; + return 0; + case hwmon_temp_fault: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_FAULT); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_min_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_LOW); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_max_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_HIGH); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + default: + return -EINVAL; + } + regval = i2c_smbus_read_word_swapped(data->client, reg); + if (regval < 0) + return regval; + + *val = max31730_reg_to_mc(regval); + + return 0; +} + +static int max31730_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int reg, err; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + channel * 2; + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + if (val != 0 && val != 1) + return -EINVAL; + return max31730_set_channel_enable(data, channel, val); + case hwmon_temp_offset: + val = clamp_val(val, -14875, 17000) + 14875; + val = DIV_ROUND_CLOSEST(val, 125); + err = max31730_set_offset_enable(data, channel, + val != MAX31730_TEMP_OFFSET_BASELINE); + if (err) + return err; + return i2c_smbus_write_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET, val); + default: + return -EINVAL; + } + + val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX); + val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4; + + return i2c_smbus_write_word_swapped(data->client, reg, (u16)val); +} + +static umode_t max31730_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_fault: + return 0444; + case hwmon_temp_min: + return channel ? 0444 : 0644; + case hwmon_temp_offset: + case hwmon_temp_enable: + case hwmon_temp_max: + return 0644; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *max31730_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT + ), + NULL +}; + +static const struct hwmon_ops max31730_hwmon_ops = { + .is_visible = max31730_is_visible, + .read = max31730_read, + .write = max31730_write, +}; + +static const struct hwmon_chip_info max31730_chip_info = { + .ops = &max31730_hwmon_ops, + .info = max31730_info, +}; + +static void max31730_remove(void *data) +{ + struct max31730_data *max31730 = data; + struct i2c_client *client = max31730->client; + + i2c_smbus_write_byte_data(client, MAX31730_REG_CONF, + max31730->orig_conf); +} + +static int +max31730_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct max31730_data *data; + int status, err; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) + return -EIO; + + data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + + /* Cache original configuration and enable status */ + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE); + if (status < 0) + return status; + data->channel_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE); + if (status < 0) + return status; + data->offset_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF); + if (status < 0) + return status; + data->orig_conf = status; + data->current_conf = status; + + err = max31730_write_config(data, + data->channel_enable ? 0 : MAX31730_STOP, + data->channel_enable ? MAX31730_STOP : 0); + if (err) + return err; + + dev_set_drvdata(dev, data); + + err = devm_add_action_or_reset(dev, max31730_remove, data); + if (err) + return err; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, + &max31730_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id max31730_ids[] = { + { "max31730", 0, }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max31730_ids); + +static const struct of_device_id __maybe_unused max31730_of_match[] = { + { + .compatible = "maxim,max31730", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, max31730_of_match); + +static bool max31730_check_reg_temp(struct i2c_client *client, + int reg) +{ + int regval; + + regval = i2c_smbus_read_byte_data(client, reg + 1); + return regval < 0 || (regval & 0x0f); +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max31730_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int regval; + int i; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID); + if (regval != MAX31730_MFG_ID) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV); + if (regval != MAX31730_MFG_REV) + return -ENODEV; + + /* lower 4 bit of temperature and limit registers must be 0 */ + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN)) + return -ENODEV; + + for (i = 0; i < 4; i++) { + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2)) + return -ENODEV; + if (max31730_check_reg_temp(client, + MAX31730_REG_TEMP_MAX + i * 2)) + return -ENODEV; + } + + strlcpy(info->type, "max31730", I2C_NAME_SIZE); + + return 0; +} + +static int __maybe_unused max31730_suspend(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, MAX31730_STOP, 0); +} + +static int __maybe_unused max31730_resume(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, 0, MAX31730_STOP); +} + +static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume); + +static struct i2c_driver max31730_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "max31730", + .of_match_table = of_match_ptr(max31730_of_match), + .pm = &max31730_pm_ops, + }, + .probe = max31730_probe, + .id_table = max31730_ids, + .detect = max31730_detect, + .address_list = normal_i2c, +}; + +module_i2c_driver(max31730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("MAX31730 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index 59859979571d..a9ea06204767 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -20,8 +20,8 @@ config SENSORS_PMBUS help If you say yes here you get hardware monitoring support for generic PMBus devices, including but not limited to ADP4000, BMR453, BMR454, - MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20, - TPS544B25, TPS544C20, TPS544C25, and UDT020. + MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, + TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020. This driver can also be built as a module. If so, the module will be called pmbus. @@ -145,6 +145,15 @@ config SENSORS_MAX16064 This driver can also be built as a module. If so, the module will be called max16064. +config SENSORS_MAX20730 + tristate "Maxim MAX20730, MAX20734, MAX20743" + help + If you say yes here you get hardware monitoring support for Maxim + MAX20730, MAX20734, and MAX20743. + + This driver can also be built as a module. If so, the module will + be called max20730. + config SENSORS_MAX20751 tristate "Maxim MAX20751" help @@ -200,20 +209,20 @@ config SENSORS_TPS40422 be called tps40422. config SENSORS_TPS53679 - tristate "TI TPS53679" + tristate "TI TPS53679, TPS53688" help If you say yes here you get hardware monitoring support for TI - TPS53679. + TPS53679, TPS53688 This driver can also be built as a module. If so, the module will be called tps53679. config SENSORS_UCD9000 - tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910" + tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910" help If you say yes here you get hardware monitoring support for TI - UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System - Health Controllers. + UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer + and System Health Controllers. This driver can also be built as a module. If so, the module will be called ucd9000. @@ -228,6 +237,15 @@ config SENSORS_UCD9200 This driver can also be built as a module. If so, the module will be called ucd9200. +config SENSORS_XDPE122 + tristate "Infineon XDPE122 family" + help + If you say yes here you get hardware monitoring support for Infineon + XDPE12254, XDPE12284, device. + + This driver can also be built as a module. If so, the module will + be called xdpe12284. + config SENSORS_ZL6100 tristate "Intersil ZL6100 and compatibles" help diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 3f8c1014938b..5feb45806123 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o +obj-$(CONFIG_SENSORS_MAX20730) += max20730.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o obj-$(CONFIG_SENSORS_MAX31785) += max31785.o obj-$(CONFIG_SENSORS_MAX34440) += max34440.o @@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o +obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c index d359b76bcb36..3795fe55b84f 100644 --- a/drivers/hwmon/pmbus/ibm-cffps.c +++ b/drivers/hwmon/pmbus/ibm-cffps.c @@ -20,12 +20,15 @@ #define CFFPS_FRU_CMD 0x9A #define CFFPS_PN_CMD 0x9B +#define CFFPS_HEADER_CMD 0x9C #define CFFPS_SN_CMD 0x9E +#define CFFPS_MAX_POWER_OUT_CMD 0xA7 #define CFFPS_CCIN_CMD 0xBD #define CFFPS_FW_CMD 0xFA #define CFFPS1_FW_NUM_BYTES 4 #define CFFPS2_FW_NUM_WORDS 3 #define CFFPS_SYS_CONFIG_CMD 0xDA +#define CFFPS_12VCS_VOUT_CMD 0xDE #define CFFPS_INPUT_HISTORY_CMD 0xD6 #define CFFPS_INPUT_HISTORY_SIZE 100 @@ -44,22 +47,21 @@ #define CFFPS_MFR_VAUX_FAULT BIT(6) #define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7) -/* - * LED off state actually relinquishes LED control to PSU firmware, so it can - * turn on the LED for faults. - */ -#define CFFPS_LED_OFF 0 #define CFFPS_LED_BLINK BIT(0) #define CFFPS_LED_ON BIT(1) +#define CFFPS_LED_OFF BIT(2) #define CFFPS_BLINK_RATE_MS 250 enum { CFFPS_DEBUGFS_INPUT_HISTORY = 0, CFFPS_DEBUGFS_FRU, CFFPS_DEBUGFS_PN, + CFFPS_DEBUGFS_HEADER, CFFPS_DEBUGFS_SN, + CFFPS_DEBUGFS_MAX_POWER_OUT, CFFPS_DEBUGFS_CCIN, CFFPS_DEBUGFS_FW, + CFFPS_DEBUGFS_ON_OFF_CONFIG, CFFPS_DEBUGFS_NUM_ENTRIES }; @@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu, psu->input_history.byte_count); } -static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) { u8 cmd; int i, rc; int *idxp = file->private_data; int idx = *idxp; struct ibm_cffps *psu = to_psu(idxp, idx); - char data[I2C_SMBUS_BLOCK_MAX] = { 0 }; + char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 }; pmbus_set_page(psu->client, 0); @@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, case CFFPS_DEBUGFS_PN: cmd = CFFPS_PN_CMD; break; + case CFFPS_DEBUGFS_HEADER: + cmd = CFFPS_HEADER_CMD; + break; case CFFPS_DEBUGFS_SN: cmd = CFFPS_SN_CMD; break; + case CFFPS_DEBUGFS_MAX_POWER_OUT: + rc = i2c_smbus_read_word_swapped(psu->client, + CFFPS_MAX_POWER_OUT_CMD); + if (rc < 0) + return rc; + + rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc); + goto done; case CFFPS_DEBUGFS_CCIN: rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD); if (rc < 0) @@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, return -EOPNOTSUPP; } goto done; + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + rc = i2c_smbus_read_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG); + if (rc < 0) + return rc; + + rc = snprintf(data, 3, "%02x", rc); + goto done; default: return -EINVAL; } @@ -214,9 +235,42 @@ done: return simple_read_from_buffer(buf, count, ppos, data, rc); } +static ssize_t ibm_cffps_debugfs_write(struct file *file, + const char __user *buf, size_t count, + loff_t *ppos) +{ + u8 data; + ssize_t rc; + int *idxp = file->private_data; + int idx = *idxp; + struct ibm_cffps *psu = to_psu(idxp, idx); + + switch (idx) { + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + pmbus_set_page(psu->client, 0); + + rc = simple_write_to_buffer(&data, 1, ppos, buf, count); + if (rc <= 0) + return rc; + + rc = i2c_smbus_write_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG, data); + if (rc) + return rc; + + rc = 1; + break; + default: + return -EINVAL; + } + + return rc; +} + static const struct file_operations ibm_cffps_fops = { .llseek = noop_llseek, - .read = ibm_cffps_debugfs_op, + .read = ibm_cffps_debugfs_read, + .write = ibm_cffps_debugfs_write, .open = simple_open, }; @@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page, if (mfr & CFFPS_MFR_PS_KILL) rc |= PB_STATUS_OFF; break; + case PMBUS_VIRT_READ_VMON: + rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD); + break; default: rc = -ENODATA; break; @@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu) rc = devm_led_classdev_register(dev, &psu->led); if (rc) dev_warn(dev, "failed to register led class: %d\n", rc); + else + i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD, + CFFPS_LED_OFF); } static struct pmbus_driver_info ibm_cffps_info[] = { @@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = { PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP | - PMBUS_HAVE_STATUS_FAN12, + PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON, .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT, @@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client, debugfs_create_file("part_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_PN], &ibm_cffps_fops); + debugfs_create_file("header", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER], + &ibm_cffps_fops); debugfs_create_file("serial_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_SN], &ibm_cffps_fops); + debugfs_create_file("max_power_out", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT], + &ibm_cffps_fops); debugfs_create_file("ccin", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN], &ibm_cffps_fops); debugfs_create_file("fw_version", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_FW], &ibm_cffps_fops); + debugfs_create_file("on_off_config", 0644, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG], + &ibm_cffps_fops); return 0; } diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c new file mode 100644 index 000000000000..294e2212f61e --- /dev/null +++ b/drivers/hwmon/pmbus/max20730.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down + * Switching Regulators + * + * Copyright 2019 Google LLC. + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/pmbus.h> +#include <linux/util_macros.h> +#include "pmbus.h" + +enum chips { + max20730, + max20734, + max20743 +}; + +struct max20730_data { + enum chips id; + struct pmbus_driver_info info; + struct mutex lock; /* Used to protect against parallel writes */ + u16 mfr_devset1; +}; + +#define to_max20730_data(x) container_of(x, struct max20730_data, info) + +#define MAX20730_MFR_DEVSET1 0xd2 + +/* + * Convert discreet value to direct data format. Strictly speaking, all passed + * values are constants, so we could do that calculation manually. On the + * downside, that would make the driver more difficult to maintain, so lets + * use this approach. + */ +static u16 val_to_direct(int v, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; /* take milli-units into account */ + int b = info->b[class] * 1000; + long d; + + d = v * info->m[class] + b; + /* + * R < 0 is true for all callers, so we don't need to bother + * about the R > 0 case. + */ + while (R < 0) { + d = DIV_ROUND_CLOSEST(d, 10); + R++; + } + return (u16)d; +} + +static long direct_to_val(u16 w, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; + int b = info->b[class] * 1000; + int m = info->m[class]; + long d = (s16)w; + + if (m == 0) + return 0; + + while (R < 0) { + d *= 10; + R++; + } + d = (d - b) / m; + return d; +} + +static u32 max_current[][5] = { + [max20730] = { 13000, 16600, 20100, 23600 }, + [max20734] = { 21000, 27000, 32000, 38000 }, + [max20743] = { 18900, 24100, 29200, 34100 }, +}; + +static int max20730_read_word_data(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + const struct max20730_data *data = to_max20730_data(info); + int ret = 0; + u32 max_c; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + switch ((data->mfr_devset1 >> 11) & 0x3) { + case 0x0: + ret = val_to_direct(150000, PSC_TEMPERATURE, info); + break; + case 0x1: + ret = val_to_direct(130000, PSC_TEMPERATURE, info); + break; + default: + ret = -ENODATA; + break; + } + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3]; + ret = val_to_direct(max_c, PSC_CURRENT_OUT, info); + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int max20730_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + struct pmbus_driver_info *info; + struct max20730_data *data; + u16 devset1; + int ret = 0; + int idx; + + info = (struct pmbus_driver_info *)pmbus_get_driver_info(client); + data = to_max20730_data(info); + + mutex_lock(&data->lock); + devset1 = data->mfr_devset1; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + devset1 &= ~(BIT(11) | BIT(12)); + if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000) + devset1 |= BIT(11); + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + devset1 &= ~(BIT(5) | BIT(6)); + + idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info), + max_current[data->id], 4); + devset1 |= (idx << 5); + break; + default: + ret = -ENODATA; + break; + } + + if (!ret && devset1 != data->mfr_devset1) { + ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1, + devset1); + if (!ret) { + data->mfr_devset1 = devset1; + pmbus_clear_cache(client); + } + } + mutex_unlock(&data->lock); + return ret; +} + +static const struct pmbus_driver_info max20730_info[] = { + [max20730] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3609, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + /* + * Values in the datasheet are adjusted for temperature and + * for the relationship between Vin and Vout. + * Unfortunately, the data sheet suggests that Vout measurement + * may be scaled with a resistor array. This is indeed the case + * at least on the evaulation boards. As a result, any in-driver + * adjustments would either be wrong or require elaborate means + * to configure the scaling. Instead of doing that, just report + * raw values and let userspace handle adjustments. + */ + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 153, + .b[PSC_CURRENT_OUT] = 4976, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20734] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6209 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3592, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 111, + .b[PSC_CURRENT_OUT] = 3461, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20743] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3597, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 95, + .b[PSC_CURRENT_OUT] = 5014, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, +}; + +static int max20730_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + struct max20730_data *data; + enum chips chip_id; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA | + I2C_FUNC_SMBUS_READ_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf); + if (ret < 0) { + dev_err(&client->dev, "Failed to read Manufacturer ID\n"); + return ret; + } + if (ret != 5 || strncmp(buf, "MAXIM", 5)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf); + return -ENODEV; + } + + /* + * The chips support reading PMBUS_MFR_MODEL. On both MAX20730 + * and MAX20734, reading it returns M20743. Presumably that is + * the reason why the command is not documented. Unfortunately, + * that means that there is no reliable means to detect the chip. + * However, we can at least detect the chip series. Compare + * the returned value against 'M20743' and bail out if there is + * a mismatch. If that doesn't work for all chips, we may have + * to remove this check. + */ + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Model\n"); + return ret; + } + if (ret != 6 || strncmp(buf, "M20743", 6)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf); + return -ENODEV; + } + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Revision\n"); + return ret; + } + if (ret != 1 || buf[0] != 'F') { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf); + return -ENODEV; + } + + if (client->dev.of_node) + chip_id = (enum chips)of_device_get_match_data(dev); + else + chip_id = id->driver_data; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->id = chip_id; + mutex_init(&data->lock); + memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info)); + + ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1); + if (ret < 0) + return ret; + data->mfr_devset1 = ret; + + return pmbus_do_probe(client, id, &data->info); +} + +static const struct i2c_device_id max20730_id[] = { + { "max20730", max20730 }, + { "max20734", max20734 }, + { "max20743", max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, max20730_id); + +static const struct of_device_id max20730_of_match[] = { + { .compatible = "maxim,max20730", .data = (void *)max20730 }, + { .compatible = "maxim,max20734", .data = (void *)max20734 }, + { .compatible = "maxim,max20743", .data = (void *)max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(of, max20730_of_match); + +static struct i2c_driver max20730_driver = { + .driver = { + .name = "max20730", + .of_match_table = max20730_of_match, + }, + .probe = max20730_probe, + .remove = pmbus_do_remove, + .id_table = max20730_id, +}; + +module_i2c_driver(max20730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c index ee5f0cdbde06..da3c38cb9a5c 100644 --- a/drivers/hwmon/pmbus/max20751.c +++ b/drivers/hwmon/pmbus/max20751.c @@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = { .pages = 1, .format[PSC_VOLTAGE_IN] = linear, .format[PSC_VOLTAGE_OUT] = vid, - .vrm_version = vr12, + .vrm_version[0] = vr12, .format[PSC_TEMPERATURE] = linear, .format[PSC_CURRENT_OUT] = linear, .format[PSC_POWER] = linear, diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index c0bc43d01018..51e8312b6c2d 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client, } if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - int vout_mode; + int vout_mode, i; vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { @@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client, break; case 1: info->format[PSC_VOLTAGE_OUT] = vid; - info->vrm_version = vr11; + for (i = 0; i < info->pages; i++) + info->vrm_version[i] = vr11; break; case 2: info->format[PSC_VOLTAGE_OUT] = direct; @@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = { {"dps460", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps800", (kernel_ulong_t)&pmbus_info_one_skip}, + {"max20796", (kernel_ulong_t)&pmbus_info_one}, {"mdt040", (kernel_ulong_t)&pmbus_info_one}, {"ncp4200", (kernel_ulong_t)&pmbus_info_one}, {"ncp4208", (kernel_ulong_t)&pmbus_info_one}, diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index d198af3a92b6..13b34bd67f23 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -22,6 +22,8 @@ enum pmbus_regs { PMBUS_CLEAR_FAULTS = 0x03, PMBUS_PHASE = 0x04, + PMBUS_WRITE_PROTECT = 0x10, + PMBUS_CAPABILITY = 0x19, PMBUS_QUERY = 0x1A, @@ -226,6 +228,15 @@ enum pmbus_regs { #define PB_OPERATION_CONTROL_ON BIT(7) /* + * WRITE_PROTECT + */ +#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */ +#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */ +#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */ + +#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT) + +/* * CAPABILITY */ #define PB_CAPABILITY_SMBALERT BIT(4) @@ -377,12 +388,12 @@ enum pmbus_sensor_classes { #define PMBUS_PAGE_VIRTUAL BIT(31) enum pmbus_data_format { linear = 0, direct, vid }; -enum vrm_version { vr11 = 0, vr12, vr13 }; +enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv }; struct pmbus_driver_info { int pages; /* Total number of pages */ enum pmbus_data_format format[PSC_NUM_CLASSES]; - enum vrm_version vrm_version; + enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */ /* * Support one set of coefficients for each sensor type * Used for chips providing data in direct mode. diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 8470097907bc..d9c17feb7b4a 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, long val = sensor->data; long rv = 0; - switch (data->info->vrm_version) { + switch (data->info->vrm_version[sensor->page]) { case vr11: if (val >= 0x02 && val <= 0xb2) rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100); @@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, if (val >= 0x01) rv = 500 + (val - 1) * 10; break; + case imvp9: + if (val >= 0x01) + rv = 200 + (val - 1) * 10; + break; + case amd625mv: + if (val >= 0x0 && val <= 0xd8) + rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100); + break; } return rv; } @@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, snprintf(sensor->name, sizeof(sensor->name), "%s%d", name, seq); + if (data->flags & PMBUS_WRITE_PROTECTED) + readonly = true; + sensor->page = page; sensor->reg = reg; sensor->class = class; @@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) client->flags |= I2C_CLIENT_PEC; + /* + * Check if the chip is write protected. If it is, we can not clear + * faults, and we should not try it. Also, in that case, writes into + * limit registers need to be disabled. + */ + ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT); + if (ret > 0 && (ret & PB_WP_ANY)) + data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK; + if (data->info->pages) pmbus_clear_faults(client); else diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c index ebe3f023f840..517584cff3de 100644 --- a/drivers/hwmon/pmbus/pxe1610.c +++ b/drivers/hwmon/pmbus/pxe1610.c @@ -19,26 +19,30 @@ static int pxe1610_identify(struct i2c_client *client, struct pmbus_driver_info *info) { - if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - u8 vout_mode; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_mode = ret & GENMASK(4, 0); - - switch (vout_mode) { - case 1: - info->vrm_version = vr12; - break; - case 2: - info->vrm_version = vr13; - break; - default: - return -ENODEV; + int i; + + for (i = 0; i < PXE1610_NUM_PAGES; i++) { + if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) { + u8 vout_mode; + int ret; + + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_mode = ret & GENMASK(4, 0); + + switch (vout_mode) { + case 1: + info->vrm_version[i] = vr12; + break; + case 2: + info->vrm_version[i] = vr13; + break; + default: + return -ENODEV; + } } } diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c index 86bb3aca09ed..9c22e9013dd7 100644 --- a/drivers/hwmon/pmbus/tps53679.c +++ b/drivers/hwmon/pmbus/tps53679.c @@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client, struct pmbus_driver_info *info) { u8 vout_params; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_params = ret & GENMASK(4, 0); - - switch (vout_params) { - case TPS53679_PROT_VR13_10MV: - case TPS53679_PROT_VR12_5_10MV: - info->vrm_version = vr13; - break; - case TPS53679_PROT_VR13_5MV: - case TPS53679_PROT_VR12_5MV: - case TPS53679_PROT_IMVP8_5MV: - info->vrm_version = vr12; - break; - default: - return -EINVAL; + int i, ret; + + for (i = 0; i < TPS53679_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case TPS53679_PROT_VR13_10MV: + case TPS53679_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case TPS53679_PROT_VR13_5MV: + case TPS53679_PROT_VR12_5MV: + case TPS53679_PROT_IMVP8_5MV: + info->vrm_version[i] = vr12; + break; + default: + return -EINVAL; + } } return 0; @@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client, static const struct i2c_device_id tps53679_id[] = { {"tps53679", 0}, + {"tps53688", 0}, {} }; @@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id); static const struct of_device_id __maybe_unused tps53679_of_match[] = { {.compatible = "ti,tps53679"}, + {.compatible = "ti,tps53688"}, {} }; MODULE_DEVICE_TABLE(of, tps53679_of_match); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index a9229c6b0e84..23ea3415f166 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -18,7 +18,8 @@ #include <linux/gpio/driver.h> #include "pmbus.h" -enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; +enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090, + ucd90910 }; #define UCD9000_MONITOR_CONFIG 0xd5 #define UCD9000_NUM_PAGES 0xd6 @@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_OUTPUT 1 #define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07) -#define UCD9000_MON_PAGE(x) ((x) & 0x0f) +#define UCD9000_MON_PAGE(x) ((x) & 0x1f) #define UCD9000_MON_VOLTAGE 1 #define UCD9000_MON_TEMPERATURE 2 @@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_NAME_LEN 16 #define UCD9090_NUM_GPIOS 23 #define UCD901XX_NUM_GPIOS 26 +#define UCD90320_NUM_GPIOS 84 #define UCD90910_NUM_GPIOS 26 #define UCD9000_DEBUGFS_NAME_LEN 24 #define UCD9000_GPI_COUNT 8 +#define UCD90320_GPI_COUNT 32 struct ucd9000_data { u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX]; @@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = { {"ucd90120", ucd90120}, {"ucd90124", ucd90124}, {"ucd90160", ucd90160}, + {"ucd90320", ucd90320}, {"ucd9090", ucd9090}, {"ucd90910", ucd90910}, {} @@ -155,6 +159,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = { .data = (void *)ucd90160 }, { + .compatible = "ti,ucd90320", + .data = (void *)ucd90320 + }, + { .compatible = "ti,ucd9090", .data = (void *)ucd9090 }, @@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client, case ucd90160: data->gpio.ngpio = UCD901XX_NUM_GPIOS; break; + case ucd90320: + data->gpio.ngpio = UCD90320_NUM_GPIOS; + break; case ucd90910: data->gpio.ngpio = UCD90910_NUM_GPIOS; break; @@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val) struct ucd9000_debugfs_entry *entry = data; struct i2c_client *client = entry->client; u8 buffer[I2C_SMBUS_BLOCK_MAX]; - int ret; + int ret, i; ret = ucd9000_get_mfr_status(client, buffer); if (ret < 0) return ret; /* - * Attribute only created for devices with gpi fault bits at bits - * 16-23, which is the second byte of the response. + * GPI fault bits are in sets of 8, two bytes from end of response. */ - *val = !!(buffer[1] & BIT(entry->index)); + i = ret - 3 - entry->index / 8; + if (i >= 0) + *val = !!(buffer[i] & BIT(entry->index % 8)); return 0; } @@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client, { struct dentry *debugfs; struct ucd9000_debugfs_entry *entries; - int i; + int i, gpi_count; char name[UCD9000_DEBUGFS_NAME_LEN]; debugfs = pmbus_get_debugfs_dir(client); @@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client, /* * Of the chips this driver supports, only the UCD9090, UCD90160, - * and UCD90910 report GPI faults in their MFR_STATUS register, so only - * create the GPI fault debugfs attributes for those chips. + * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS + * register, so only create the GPI fault debugfs attributes for those + * chips. */ if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 || - mid->driver_data == ucd90910) { + mid->driver_data == ucd90320 || mid->driver_data == ucd90910) { + gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT + : UCD9000_GPI_COUNT; entries = devm_kcalloc(&client->dev, - UCD9000_GPI_COUNT, sizeof(*entries), + gpi_count, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; - for (i = 0; i < UCD9000_GPI_COUNT; i++) { + for (i = 0; i < gpi_count; i++) { entries[i].client = client; entries[i].index = i; scnprintf(name, UCD9000_DEBUGFS_NAME_LEN, diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c new file mode 100644 index 000000000000..3d47806ff4d3 --- /dev/null +++ b/drivers/hwmon/pmbus/xdpe12284.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers + * + * Copyright (c) 2020 Mellanox Technologies. All rights reserved. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include "pmbus.h" + +#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */ +#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */ +#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */ +#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */ +#define XDPE122_PAGE_NUM 2 + +static int xdpe122_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + u8 vout_params; + int i, ret; + + for (i = 0; i < XDPE122_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case XDPE122_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case XDPE122_PROT_VR12_5MV: + info->vrm_version[i] = vr12; + break; + case XDPE122_PROT_IMVP9_10MV: + info->vrm_version[i] = imvp9; + break; + case XDPE122_AMD_625MV: + info->vrm_version[i] = amd625mv; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static struct pmbus_driver_info xdpe122_info = { + .pages = XDPE122_PAGE_NUM, + .format[PSC_VOLTAGE_IN] = linear, + .format[PSC_VOLTAGE_OUT] = vid, + .format[PSC_TEMPERATURE] = linear, + .format[PSC_CURRENT_IN] = linear, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_POWER] = linear, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .identify = xdpe122_identify, +}; + +static int xdpe122_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pmbus_driver_info *info; + + info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + return pmbus_do_probe(client, id, info); +} + +static const struct i2c_device_id xdpe122_id[] = { + {"xdpe12254", 0}, + {"xdpe12284", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, xdpe122_id); + +static const struct of_device_id __maybe_unused xdpe122_of_match[] = { + {.compatible = "infineon, xdpe12254"}, + {.compatible = "infineon, xdpe12284"}, + {} +}; +MODULE_DEVICE_TABLE(of, xdpe122_of_match); + +static struct i2c_driver xdpe122_driver = { + .driver = { + .name = "xdpe12284", + .of_match_table = of_match_ptr(xdpe122_of_match), + }, + .probe = xdpe122_probe, + .remove = pmbus_do_remove, + .id_table = xdpe122_id, +}; + +module_i2c_driver(xdpe122_driver); + +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 42ffd2e5182d..30b7b3ea8836 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int pwm_fan_suspend(struct device *dev) +static int pwm_fan_disable(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); struct pwm_args args; @@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev) return 0; } +static void pwm_fan_shutdown(struct platform_device *pdev) +{ + pwm_fan_disable(&pdev->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int pwm_fan_suspend(struct device *dev) +{ + return pwm_fan_disable(dev); +} + static int pwm_fan_resume(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); @@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, + .shutdown = pwm_fan_shutdown, .driver = { .name = "pwm-fan", .pm = &pwm_fan_pm, diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index eb171d15ac48..7ffadc2da57b 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -28,8 +28,6 @@ * w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3 * w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3 * w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3 - * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3 - * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -50,7 +48,7 @@ enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83627uhg, - w83667hg, w83667hg_b, nct6775, nct6776, + w83667hg, w83667hg_b, }; /* used to set data->name = w83627ehf_device_names[data->sio_kind] */ @@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = { "w83627uhg", "w83667hg", "w83667hg", - "nct6775", - "nct6776", }; static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -static unsigned short fan_debounce; -module_param(fan_debounce, ushort, 0); -MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); - #define DRVNAME "w83627ehf" /* @@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); #define SIO_W83627UHG_ID 0xa230 #define SIO_W83667HG_ID 0xa510 #define SIO_W83667HG_B_ID 0xb350 -#define SIO_NCT6775_ID 0xb470 -#define SIO_NCT6776_ID 0xc330 #define SIO_ID_MASK 0xFFF0 static inline void @@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 }; #define W83627EHF_REG_DIODE 0x59 #define W83627EHF_REG_SMI_OVT 0x4C -/* NCT6775F has its own fan divider registers */ -#define NCT6775_REG_FANDIV1 0x506 -#define NCT6775_REG_FANDIV2 0x507 -#define NCT6775_REG_FAN_DEBOUNCE 0xf0 - #define W83627EHF_REG_ALARM1 0x459 #define W83627EHF_REG_ALARM2 0x45A #define W83627EHF_REG_ALARM3 0x45B @@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 }; -static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 }; -static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 }; -static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 }; -static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 }; -static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 }; -static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 }; -static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a }; -static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b }; -static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; -static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642}; - -static const u16 NCT6775_REG_TEMP[] - = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d }; -static const u16 NCT6775_REG_TEMP_CONFIG[] - = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A }; -static const u16 NCT6775_REG_TEMP_HYST[] - = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D }; -static const u16 NCT6775_REG_TEMP_OVER[] - = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C }; -static const u16 NCT6775_REG_TEMP_SOURCE[] - = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 }; - static const char *const w83667hg_b_temp_label[] = { "SYSTIN", "CPUTIN", @@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = { "PECI Agent 4" }; -static const char *const nct6775_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "AMD SB-TSI", - "PECI Agent 0", - "PECI Agent 1", - "PECI Agent 2", - "PECI Agent 3", - "PECI Agent 4", - "PECI Agent 5", - "PECI Agent 6", - "PECI Agent 7", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP" -}; - -static const char *const nct6776_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "SMBUSMASTER 0", - "SMBUSMASTER 1", - "SMBUSMASTER 2", - "SMBUSMASTER 3", - "SMBUSMASTER 4", - "SMBUSMASTER 5", - "SMBUSMASTER 6", - "SMBUSMASTER 7", - "PECI Agent 0", - "PECI Agent 1", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP", - "BYTE_TEMP" -}; - -#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP) +#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP) static int is_word_sized(u16 reg) { @@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg) return 1350000U / (reg << divreg); } -static unsigned int fan_from_reg13(u16 reg, unsigned int divreg) -{ - if ((reg & 0xff1f) == 0xff1f) - return 0; - - reg = (reg & 0x1f) | ((reg & 0xff00) >> 3); - - if (reg == 0) - return 0; - - return 1350000U / reg; -} - -static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) -{ - if (reg == 0 || reg == 0xffff) - return 0; - - /* - * Even though the registers are 16 bit wide, the fan divisor - * still applies. - */ - return 1350000U / (reg << divreg); -} - static inline unsigned int div_from_reg(u8 reg) { @@ -418,7 +306,6 @@ struct w83627ehf_data { int addr; /* IO base of hw monitor block */ const char *name; - struct device *hwmon_dev; struct mutex lock; u16 reg_temp[NUM_REG_TEMP]; @@ -428,20 +315,10 @@ struct w83627ehf_data { u8 temp_src[NUM_REG_TEMP]; const char * const *temp_label; - const u16 *REG_PWM; - const u16 *REG_TARGET; - const u16 *REG_FAN; - const u16 *REG_FAN_MIN; - const u16 *REG_FAN_START_OUTPUT; - const u16 *REG_FAN_STOP_OUTPUT; - const u16 *REG_FAN_STOP_TIME; const u16 *REG_FAN_MAX_OUTPUT; const u16 *REG_FAN_STEP_OUTPUT; const u16 *scale_in; - unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg); - unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg); - struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -457,7 +334,6 @@ struct w83627ehf_data { u8 fan_div[5]; u8 has_fan; /* some fan inputs can be disabled */ u8 has_fan_min; /* some fans don't have min register */ - bool has_fan_div; u8 temp_type[3]; s8 temp_offset[3]; s16 temp[9]; @@ -494,6 +370,7 @@ struct w83627ehf_data { u16 have_temp_offset; u8 in6_skip:1; u8 temp3_val_only:1; + u8 have_vid:1; #ifdef CONFIG_PM /* Remember extra register values over suspend/resume */ @@ -584,35 +461,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg, } /* This function assumes that the caller holds data->update_lock */ -static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr) -{ - u8 reg; - - switch (nr) { - case 0: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70) - | (data->fan_div[0] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 1: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7) - | ((data->fan_div[1] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 2: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70) - | (data->fan_div[2] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - case 3: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7) - | ((data->fan_div[3] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - } -} - -/* This function assumes that the caller holds data->update_lock */ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) { u8 reg; @@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) } } -static void w83627ehf_write_fan_div_common(struct device *dev, - struct w83627ehf_data *data, int nr) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_write_fan_div(data, nr); - else - w83627ehf_write_fan_div(data, nr); -} - -static void nct6775_update_fan_div(struct w83627ehf_data *data) -{ - u8 i; - - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fan_div[0] = i & 0x7; - data->fan_div[1] = (i & 0x70) >> 4; - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - data->fan_div[2] = i & 0x7; - if (data->has_fan & (1<<3)) - data->fan_div[3] = (i & 0x70) >> 4; -} - static void w83627ehf_update_fan_div(struct w83627ehf_data *data) { int i; @@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data) } } -static void w83627ehf_update_fan_div_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_update_fan_div(data); - else - w83627ehf_update_fan_div(data); -} - -static void nct6775_update_pwm(struct w83627ehf_data *data) -{ - int i; - int pwmcfg, fanmodecfg; - - for (i = 0; i < data->pwm_num; i++) { - pwmcfg = w83627ehf_read_value(data, - W83627EHF_REG_PWM_ENABLE[i]); - fanmodecfg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[i]); - data->pwm_mode[i] = - ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; - data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1; - data->tolerance[i] = fanmodecfg & 0x0f; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); - } -} - static void w83627ehf_update_pwm(struct w83627ehf_data *data) { int i; @@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data) ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i]) & 3) + 1; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); + data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]); data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f; } } -static void w83627ehf_update_pwm_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) - nct6775_update_pwm(data); - else - w83627ehf_update_pwm(data); -} - static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int i; mutex_lock(&data->update_lock); @@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (time_after(jiffies, data->last_updated + HZ + HZ/2) || !data->valid) { /* Fan clock dividers */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Measured voltages and limits */ for (i = 0; i < data->in_num; i++) { @@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (!(data->has_fan & (1 << i))) continue; - reg = w83627ehf_read_value(data, data->REG_FAN[i]); - data->rpm[i] = data->fan_from_reg(reg, - data->fan_div[i]); + reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]); + data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]); if (data->has_fan_min & (1 << i)) data->fan_min[i] = w83627ehf_read_value(data, - data->REG_FAN_MIN[i]); + W83627EHF_REG_FAN_MIN[i]); /* * If we failed to measure the fan speed and clock * divider can be increased, let's try that for next * time */ - if (data->has_fan_div - && (reg >= 0xff || (sio_data->kind == nct6775 - && reg == 0x00)) - && data->fan_div[i] < 0x07) { + if (reg >= 0xff && data->fan_div[i] < 0x07) { dev_dbg(dev, "Increasing fan%d clock divider from %u to %u\n", i + 1, div_from_reg(data->fan_div[i]), div_from_reg(data->fan_div[i] + 1)); data->fan_div[i]++; - w83627ehf_write_fan_div_common(dev, data, i); + w83627ehf_write_fan_div(data, i); /* Preserve min limit if possible */ if ((data->has_fan_min & (1 << i)) && data->fan_min[i] >= 2 && data->fan_min[i] != 255) w83627ehf_write_value(data, - data->REG_FAN_MIN[i], + W83627EHF_REG_FAN_MIN[i], (data->fan_min[i] /= 2)); } } - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) { if (!(data->has_fan & (1 << i))) @@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->fan_start_output[i] = w83627ehf_read_value(data, - data->REG_FAN_START_OUTPUT[i]); + W83627EHF_REG_FAN_START_OUTPUT[i]); data->fan_stop_output[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_OUTPUT[i]); + W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_TIME[i]); + W83627EHF_REG_FAN_STOP_TIME[i]); if (data->REG_FAN_MAX_OUTPUT && data->REG_FAN_MAX_OUTPUT[i] != 0xff) @@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->target_temp[i] = w83627ehf_read_value(data, - data->REG_TARGET[i]) & + W83627EHF_REG_TARGET[i]) & (data->pwm_mode[i] == 1 ? 0x7f : 0xff); } @@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) return data; } -/* - * Sysfs callback functions - */ -#define show_in_reg(reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \ - data->scale_in)); \ -} -show_in_reg(in) -show_in_reg(in_min) -show_in_reg(in_max) - #define store_in_reg(REG, reg) \ -static ssize_t \ -store_in_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - unsigned long val; \ - int err; \ - err = kstrtoul(buf, 10, &val); \ - if (err < 0) \ - return err; \ + if (val < 0) \ + return -EINVAL; \ mutex_lock(&data->update_lock); \ - data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \ - w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \ - data->in_##reg[nr]); \ + data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \ + w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \ + data->in_##reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_in_reg(MIN, min) store_in_reg(MAX, max) -static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, - char *buf) +static int +store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01); -} - -static struct sensor_device_attribute sda_in_input[] = { - SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), - SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), - SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), - SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), - SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), - SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), - SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), - SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), - SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), - SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9), -}; - -static struct sensor_device_attribute sda_in_alarm[] = { - SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0), - SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1), - SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2), - SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3), - SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8), - SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21), - SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20), - SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16), - SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17), - SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19), -}; - -static struct sensor_device_attribute sda_in_min[] = { - SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), - SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), - SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), - SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), - SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), - SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), - SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), - SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), - SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), - SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), -}; - -static struct sensor_device_attribute sda_in_max[] = { - SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), - SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), - SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), - SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), - SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), - SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), - SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), - SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), - SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), - SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), -}; - -static ssize_t -show_fan(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", data->rpm[nr]); -} - -static ssize_t -show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", - data->fan_from_reg_min(data->fan_min[nr], - data->fan_div[nr])); -} - -static ssize_t -show_fan_div(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr])); -} - -static ssize_t -store_fan_min(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; unsigned int reg; u8 new_div; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; + if (val < 0) + return -EINVAL; mutex_lock(&data->update_lock); - if (!data->has_fan_div) { - /* - * Only NCT6776F for now, so we know that this is a 13 bit - * register - */ - if (!val) { - val = 0xff1f; - } else { - if (val > 1350000U) - val = 135000U; - val = 1350000U / val; - val = (val & 0x1f) | ((val << 3) & 0xff00); - } - data->fan_min[nr] = val; - goto done; /* Leave fan divider alone */ - } if (!val) { /* No min limit, alarm disabled */ - data->fan_min[nr] = 255; - new_div = data->fan_div[nr]; /* No change */ - dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1); + data->fan_min[channel] = 255; + new_div = data->fan_div[channel]; /* No change */ + dev_info(dev, "fan%u low limit and alarm disabled\n", + channel + 1); } else if ((reg = 1350000U / val) >= 128 * 255) { /* * Speed below this value cannot possibly be represented, * even with the highest divider (128) */ - data->fan_min[nr] = 254; + data->fan_min[channel] = 254; new_div = 7; /* 128 == (1 << 7) */ dev_warn(dev, "fan%u low limit %lu below minimum %u, set to minimum\n", - nr + 1, val, data->fan_from_reg_min(254, 7)); + channel + 1, val, fan_from_reg8(254, 7)); } else if (!reg) { /* * Speed above this value cannot possibly be represented, * even with the lowest divider (1) */ - data->fan_min[nr] = 1; + data->fan_min[channel] = 1; new_div = 0; /* 1 == (1 << 0) */ dev_warn(dev, "fan%u low limit %lu above maximum %u, set to maximum\n", - nr + 1, val, data->fan_from_reg_min(1, 0)); + channel + 1, val, fan_from_reg8(1, 0)); } else { /* * Automatically pick the best divider, i.e. the one such @@ -1127,362 +763,117 @@ store_fan_min(struct device *dev, struct device_attribute *attr, reg >>= 1; new_div++; } - data->fan_min[nr] = reg; + data->fan_min[channel] = reg; } /* * Write both the fan clock divider (if it changed) and the new * fan min (unconditionally) */ - if (new_div != data->fan_div[nr]) { + if (new_div != data->fan_div[channel]) { dev_dbg(dev, "fan%u clock divider changed from %u to %u\n", - nr + 1, div_from_reg(data->fan_div[nr]), + channel + 1, div_from_reg(data->fan_div[channel]), div_from_reg(new_div)); - data->fan_div[nr] = new_div; - w83627ehf_write_fan_div_common(dev, data, nr); + data->fan_div[channel] = new_div; + w83627ehf_write_fan_div(data, channel); /* Give the chip time to sample a new speed value */ data->last_updated = jiffies; } -done: - w83627ehf_write_value(data, data->REG_FAN_MIN[nr], - data->fan_min[nr]); - mutex_unlock(&data->update_lock); - return count; -} - -static struct sensor_device_attribute sda_fan_input[] = { - SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), - SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), - SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), - SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3), - SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4), -}; - -static struct sensor_device_attribute sda_fan_alarm[] = { - SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6), - SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7), - SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11), - SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10), - SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23), -}; - -static struct sensor_device_attribute sda_fan_min[] = { - SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 0), - SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 1), - SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 2), - SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 3), - SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 4), -}; - -static struct sensor_device_attribute sda_fan_div[] = { - SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0), - SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1), - SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2), - SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3), - SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4), -}; - -static ssize_t -show_temp_label(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]); -} + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel], + data->fan_min[channel]); + mutex_unlock(&data->update_lock); -#define show_temp_reg(addr, reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \ + return 0; } -show_temp_reg(reg_temp, temp); -show_temp_reg(reg_temp_over, temp_max); -show_temp_reg(reg_temp_hyst, temp_max_hyst); #define store_temp_reg(addr, reg) \ -static ssize_t \ -store_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - int err; \ - long val; \ - err = kstrtol(buf, 10, &val); \ - if (err < 0) \ - return err; \ mutex_lock(&data->update_lock); \ - data->reg[nr] = LM75_TEMP_TO_REG(val); \ - w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \ + data->reg[channel] = LM75_TEMP_TO_REG(val); \ + w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_temp_reg(reg_temp_over, temp_max); store_temp_reg(reg_temp_hyst, temp_max_hyst); -static ssize_t -show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) +static int +store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - - return sprintf(buf, "%d\n", - data->temp_offset[sensor_attr->index] * 1000); -} - -static ssize_t -store_temp_offset(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); mutex_lock(&data->update_lock); - data->temp_offset[nr] = val; - w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val); + data->temp_offset[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val); mutex_unlock(&data->update_lock); - return count; -} - -static ssize_t -show_temp_type(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", (int)data->temp_type[nr]); -} - -static struct sensor_device_attribute sda_temp_input[] = { - SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0), - SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1), - SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2), - SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3), - SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4), - SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5), - SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6), - SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7), - SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_label[] = { - SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0), - SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1), - SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2), - SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3), - SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4), - SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5), - SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6), - SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7), - SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_max[] = { - SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 0), - SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 1), - SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 2), - SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 3), - SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 4), - SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 5), - SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 6), - SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 7), - SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 8), -}; - -static struct sensor_device_attribute sda_temp_max_hyst[] = { - SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 0), - SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 1), - SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 2), - SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 3), - SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 4), - SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 5), - SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 6), - SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 7), - SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 8), -}; - -static struct sensor_device_attribute sda_temp_alarm[] = { - SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4), - SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5), - SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13), -}; - -static struct sensor_device_attribute sda_temp_type[] = { - SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0), - SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1), - SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2), -}; - -static struct sensor_device_attribute sda_temp_offset[] = { - SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 0), - SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 1), - SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 2), -}; - -#define show_pwm_reg(reg) \ -static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", data->reg[nr]); \ + return 0; } -show_pwm_reg(pwm_mode) -show_pwm_reg(pwm_enable) -show_pwm_reg(pwm) - -static ssize_t -store_pwm_mode(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (val > 1) - return -EINVAL; - - /* On NCT67766F, DC mode is only supported for pwm1 */ - if (sio_data->kind == nct6776 && nr && val != 1) + if (val < 0 || val > 1) return -EINVAL; mutex_lock(&data->update_lock); - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - data->pwm_mode[nr] = val; - reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]); + data->pwm_mode[channel] = val; + reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]); if (!val) - reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); + reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; - - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); - data->pwm[nr] = val; - w83627ehf_write_value(data, data->REG_PWM[nr], val); + data->pwm[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm_enable(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (!val || (val > 4 && val != data->pwm_enable_orig[nr])) - return -EINVAL; - /* SmartFan III mode is not supported on NCT6776F */ - if (sio_data->kind == nct6776 && val == 4) + if (!val || val < 0 || + (val > 4 && val != data->pwm_enable_orig[channel])) return -EINVAL; mutex_lock(&data->update_lock); - data->pwm_enable[nr] = val; - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - reg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[nr]); - reg &= 0x0f; - reg |= (val - 1) << 4; - w83627ehf_write_value(data, - NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]); - reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); - } + data->pwm_enable[channel] = val; + reg = w83627ehf_read_value(data, + W83627EHF_REG_PWM_ENABLE[channel]); + reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]); + reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], + reg); mutex_unlock(&data->update_lock); - return count; + return 0; } - #define show_tol_temp(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1510,7 +901,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); data->target_temp[nr] = val; - w83627ehf_write_value(data, data->REG_TARGET[nr], val); + w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val); mutex_unlock(&data->update_lock); return count; } @@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; u16 reg; @@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr, val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); mutex_lock(&data->update_lock); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* Limit tolerance further for NCT6776F */ - if (sio_data->kind == nct6776 && val > 7) - val = 7; - reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); + if (nr == 1) + reg = (reg & 0x0f) | (val << 4); + else reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); - if (nr == 1) - reg = (reg & 0x0f) | (val << 4); - else - reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); - } + w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); data->tolerance[nr] = val; mutex_unlock(&data->update_lock); return count; } -static struct sensor_device_attribute sda_pwm[] = { - SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0), - SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), - SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2), - SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3), -}; - -static struct sensor_device_attribute sda_pwm_mode[] = { - SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 0), - SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 1), - SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 2), - SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 3), -}; - -static struct sensor_device_attribute sda_pwm_enable[] = { - SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 0), - SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 1), - SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 2), - SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 3), -}; - -static struct sensor_device_attribute sda_target_temp[] = { - SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 0), - SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 1), - SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 2), - SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 3), -}; - -static struct sensor_device_attribute sda_tolerance[] = { - SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 0), - SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 1), - SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 2), - SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 3), -}; +static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp, + store_target_temp, 0); +static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp, + store_target_temp, 1); +static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp, + store_target_temp, 2); +static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp, + store_target_temp, 3); + +static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance, + store_tolerance, 0); +static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance, + store_tolerance, 1); +static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance, + store_tolerance, 2); +static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance, + store_tolerance, 3); /* Smart Fan registers */ @@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = { static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = clamp_val(val, 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } -fan_functions(fan_start_output, FAN_START_OUTPUT) -fan_functions(fan_stop_output, FAN_STOP_OUTPUT) -fan_functions(fan_max_output, FAN_MAX_OUTPUT) -fan_functions(fan_step_output, FAN_STEP_OUTPUT) +fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT) +fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT) +fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT) +fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT) #define fan_time_functions(reg, REG) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = step_time_to_reg(val, data->pwm_mode[nr]); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } \ -fan_time_functions(fan_stop_time, FAN_STOP_TIME) - -static ssize_t name_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - - return sprintf(buf, "%s\n", data->name); -} -static DEVICE_ATTR_RO(name); - -static struct sensor_device_attribute sda_sf3_arrays_fan4[] = { - SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 3), - SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 3), - SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 3), - SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 3), - SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 3), -}; - -static struct sensor_device_attribute sda_sf3_arrays_fan3[] = { - SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 2), - SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 2), - SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 2), -}; - -static struct sensor_device_attribute sda_sf3_arrays[] = { - SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 0), - SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 1), - SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 0), - SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 1), - SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 0), - SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 1), -}; +fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME) + +static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 3); +static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output, + store_fan_start_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output, + store_fan_max_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output, + store_fan_step_output, 3); + +static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 2); +static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output, + store_fan_start_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 2); + +static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 1); +static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output, + store_fan_start_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output, + store_fan_start_output, 1); +static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 1); /* * pwm1 and pwm3 don't support max and step settings on all chips. * Need to check support while generating/removing attribute files. */ -static struct sensor_device_attribute sda_sf3_max_step_arrays[] = { - SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 0), - SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 0), - SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 1), - SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 1), - SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 2), - SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 2), -}; +static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output, + store_fan_max_output, 0); +static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output, + store_fan_step_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output, + store_fan_max_output, 1); +static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output, + store_fan_step_output, 1); +static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output, + store_fan_max_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output, + store_fan_step_output, 2); static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) struct w83627ehf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } -static DEVICE_ATTR_RO(cpu0_vid); +DEVICE_ATTR_RO(cpu0_vid); /* Case open detection */ - -static ssize_t -show_caseopen(struct device *dev, struct device_attribute *attr, char *buf) +static int +clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - - return sprintf(buf, "%d\n", - !!(data->caseopen & to_sensor_dev_attr_2(attr)->index)); -} - -static ssize_t -clear_caseopen(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - unsigned long val; - u16 reg, mask; + const u16 mask = 0x80; + u16 reg; - if (kstrtoul(buf, 10, &val) || val != 0) + if (val != 0 || channel != 0) return -EINVAL; - mask = to_sensor_dev_attr_2(attr)->nr; - mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR); w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask); @@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr, data->valid = 0; /* Force cache refresh */ mutex_unlock(&data->update_lock); - return count; + return 0; } -static struct sensor_device_attribute_2 sda_caseopen[] = { - SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x80, 0x10), - SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x40, 0x40), -}; - -/* - * Driver and device management - */ - -static void w83627ehf_device_remove_files(struct device *dev) +static umode_t w83627ehf_attrs_visible(struct kobject *kobj, + struct attribute *a, int n) { - /* - * some entries in the following arrays may not have been used in - * device_create_file(), but device_remove_file() will ignore them - */ - int i; + struct device *dev = container_of(kobj, struct device, kobj); struct w83627ehf_data *data = dev_get_drvdata(dev); + struct device_attribute *devattr; + struct sensor_device_attribute *sda; + + devattr = container_of(a, struct device_attribute, attr); + + /* Not sensor */ + if (devattr->show == cpu0_vid_show && data->have_vid) + return a->mode; + + sda = (struct sensor_device_attribute *)devattr; + + if (sda->index < 2 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index < 3 && + (devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output) && + data->REG_FAN_STEP_OUTPUT && + data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff) + return a->mode; + + /* if fan3 and fan4 are enabled create the files for them */ + if (sda->index == 2 && + (data->has_fan & (1 << 2)) && data->pwm_num >= 3 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index == 3 && + (data->has_fan & (1 << 3)) && data->pwm_num >= 4 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output || + devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output)) + return a->mode; + + if ((devattr->show == show_target_temp || + devattr->show == show_tolerance) && + (data->has_fan & (1 << sda->index)) && + sda->index < data->pwm_num) + return a->mode; - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) - device_remove_file(dev, &sda_sf3_arrays[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) - device_remove_file(dev, &attr->dev_attr); - } - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) - device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) - device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr); - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - device_remove_file(dev, &sda_in_input[i].dev_attr); - device_remove_file(dev, &sda_in_alarm[i].dev_attr); - device_remove_file(dev, &sda_in_min[i].dev_attr); - device_remove_file(dev, &sda_in_max[i].dev_attr); - } - for (i = 0; i < 5; i++) { - device_remove_file(dev, &sda_fan_input[i].dev_attr); - device_remove_file(dev, &sda_fan_alarm[i].dev_attr); - device_remove_file(dev, &sda_fan_div[i].dev_attr); - device_remove_file(dev, &sda_fan_min[i].dev_attr); - } - for (i = 0; i < data->pwm_num; i++) { - device_remove_file(dev, &sda_pwm[i].dev_attr); - device_remove_file(dev, &sda_pwm_mode[i].dev_attr); - device_remove_file(dev, &sda_pwm_enable[i].dev_attr); - device_remove_file(dev, &sda_target_temp[i].dev_attr); - device_remove_file(dev, &sda_tolerance[i].dev_attr); - } - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - device_remove_file(dev, &sda_temp_input[i].dev_attr); - device_remove_file(dev, &sda_temp_label[i].dev_attr); - if (i == 2 && data->temp3_val_only) - continue; - device_remove_file(dev, &sda_temp_max[i].dev_attr); - device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr); - if (i > 2) - continue; - device_remove_file(dev, &sda_temp_alarm[i].dev_attr); - device_remove_file(dev, &sda_temp_type[i].dev_attr); - device_remove_file(dev, &sda_temp_offset[i].dev_attr); - } + return 0; +} + +/* These groups handle non-standard attributes used in this device */ +static struct attribute *w83627ehf_attrs[] = { + + &sensor_dev_attr_pwm1_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm1_start_output.dev_attr.attr, + &sensor_dev_attr_pwm1_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm1_max_output.dev_attr.attr, + &sensor_dev_attr_pwm1_step_output.dev_attr.attr, + &sensor_dev_attr_pwm1_target.dev_attr.attr, + &sensor_dev_attr_pwm1_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm2_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm2_start_output.dev_attr.attr, + &sensor_dev_attr_pwm2_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm2_max_output.dev_attr.attr, + &sensor_dev_attr_pwm2_step_output.dev_attr.attr, + &sensor_dev_attr_pwm2_target.dev_attr.attr, + &sensor_dev_attr_pwm2_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm3_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm3_start_output.dev_attr.attr, + &sensor_dev_attr_pwm3_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm3_max_output.dev_attr.attr, + &sensor_dev_attr_pwm3_step_output.dev_attr.attr, + &sensor_dev_attr_pwm3_target.dev_attr.attr, + &sensor_dev_attr_pwm3_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm4_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm4_start_output.dev_attr.attr, + &sensor_dev_attr_pwm4_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm4_max_output.dev_attr.attr, + &sensor_dev_attr_pwm4_step_output.dev_attr.attr, + &sensor_dev_attr_pwm4_target.dev_attr.attr, + &sensor_dev_attr_pwm4_tolerance.dev_attr.attr, + + &dev_attr_cpu0_vid.attr, + NULL +}; - device_remove_file(dev, &sda_caseopen[0].dev_attr); - device_remove_file(dev, &sda_caseopen[1].dev_attr); +static const struct attribute_group w83627ehf_group = { + .attrs = w83627ehf_attrs, + .is_visible = w83627ehf_attrs_visible, +}; - device_remove_file(dev, &dev_attr_name); - device_remove_file(dev, &dev_attr_cpu0_vid); -} +static const struct attribute_group *w83627ehf_groups[] = { + &w83627ehf_group, + NULL +}; + +/* + * Driver and device management + */ /* Get the monitoring functions started */ static inline void w83627ehf_init_device(struct w83627ehf_data *data, @@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data, } } -static void w82627ehf_swap_tempreg(struct w83627ehf_data *data, - int r1, int r2) -{ - swap(data->temp_src[r1], data->temp_src[r2]); - swap(data->reg_temp[r1], data->reg_temp[r2]); - swap(data->reg_temp_over[r1], data->reg_temp_over[r2]); - swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]); - swap(data->reg_temp_config[r1], data->reg_temp_config[r2]); -} - static void w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp) { @@ -1954,7 +1293,7 @@ static void w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, struct w83627ehf_data *data) { - int fan3pin, fan4pin, fan4min, fan5pin, regval; + int fan3pin, fan4pin, fan5pin, regval; /* The W83627UHG is simple, only two fan inputs, no config */ if (sio_data->kind == w83627uhg) { @@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, } /* fan4 and fan5 share some pins with the GPIO and serial flash */ - if (sio_data->kind == nct6775) { - /* On NCT6775, fan4 shares pins with the fdc interface */ - fan3pin = 1; - fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80); - fan4min = 0; - fan5pin = 0; - } else if (sio_data->kind == nct6776) { - bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE); - - if (regval & 0x80) - fan3pin = gpok; - else - fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); - - if (regval & 0x40) - fan4pin = gpok; - else - fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); - - if (regval & 0x20) - fan5pin = gpok; - else - fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); - - fan4min = fan4pin; - } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { fan3pin = 1; fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40; fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20; - fan4min = fan4pin; } else { fan3pin = 1; fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06); fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02); - fan4min = fan4pin; } data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */ data->has_fan |= (fan3pin << 2); data->has_fan_min |= (fan3pin << 2); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* - * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 - * register - */ - data->has_fan |= (fan4pin << 3) | (fan5pin << 4); - data->has_fan_min |= (fan4min << 3) | (fan5pin << 4); - } else { - /* - * It looks like fan4 and fan5 pins can be alternatively used - * as fan on/off switches, but fan5 control is write only :/ - * We assume that if the serial interface is disabled, designers - * connected fan5 as input unless they are emitting log 1, which - * is not the default. - */ - regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); - if ((regval & (1 << 2)) && fan4pin) { - data->has_fan |= (1 << 3); - data->has_fan_min |= (1 << 3); + /* + * It looks like fan4 and fan5 pins can be alternatively used + * as fan on/off switches, but fan5 control is write only :/ + * We assume that if the serial interface is disabled, designers + * connected fan5 as input unless they are emitting log 1, which + * is not the default. + */ + regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); + if ((regval & (1 << 2)) && fan4pin) { + data->has_fan |= (1 << 3); + data->has_fan_min |= (1 << 3); + } + if (!(regval & (1 << 1)) && fan5pin) { + data->has_fan |= (1 << 4); + data->has_fan_min |= (1 << 4); + } +} + +static umode_t +w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct w83627ehf_data *data = drvdata; + + switch (type) { + case hwmon_temp: + /* channel 0.., name 1.. */ + if (!(data->have_temp & (1 << channel))) + return 0; + if (attr == hwmon_temp_input || attr == hwmon_temp_label) + return 0444; + if (channel == 2 && data->temp3_val_only) + return 0; + if (attr == hwmon_temp_max) { + if (data->reg_temp_over[channel]) + return 0644; + else + return 0; + } + if (attr == hwmon_temp_max_hyst) { + if (data->reg_temp_hyst[channel]) + return 0644; + else + return 0; + } + if (channel > 2) + return 0; + if (attr == hwmon_temp_alarm || attr == hwmon_temp_type) + return 0444; + if (attr == hwmon_temp_offset) { + if (data->have_temp_offset & (1 << channel)) + return 0644; + else + return 0; + } + break; + + case hwmon_fan: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel))) + return 0; + if (attr == hwmon_fan_input || attr == hwmon_fan_alarm) + return 0444; + if (attr == hwmon_fan_div) { + return 0444; } - if (!(regval & (1 << 1)) && fan5pin) { - data->has_fan |= (1 << 4); - data->has_fan_min |= (1 << 4); + if (attr == hwmon_fan_min) { + if (data->has_fan_min & (1 << channel)) + return 0644; + else + return 0; } + break; + + case hwmon_in: + /* channel 0.., name 0.. */ + if (channel >= data->in_num) + return 0; + if (channel == 6 && data->in6_skip) + return 0; + if (attr == hwmon_in_alarm || attr == hwmon_in_input) + return 0444; + if (attr == hwmon_in_min || attr == hwmon_in_max) + return 0644; + break; + + case hwmon_pwm: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel)) || + channel >= data->pwm_num) + return 0; + if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable || + attr == hwmon_pwm_input) + return 0644; + break; + + case hwmon_intrusion: + return 0644; + + default: /* Shouldn't happen */ + return 0; } + + return 0; /* Shouldn't happen */ } +static int +w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_temp_input: + *val = LM75_TEMP_FROM_REG(data->temp[channel]); + return 0; + case hwmon_temp_max: + *val = LM75_TEMP_FROM_REG(data->temp_max[channel]); + return 0; + case hwmon_temp_max_hyst: + *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]); + return 0; + case hwmon_temp_offset: + *val = data->temp_offset[channel] * 1000; + return 0; + case hwmon_temp_type: + *val = (int)data->temp_type[channel]; + return 0; + case hwmon_temp_alarm: + if (channel < 3) { + int bit[] = { 4, 5, 13 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_in_input: + *val = in_from_reg(data->in[channel], channel, data->scale_in); + return 0; + case hwmon_in_min: + *val = in_from_reg(data->in_min[channel], channel, + data->scale_in); + return 0; + case hwmon_in_max: + *val = in_from_reg(data->in_max[channel], channel, + data->scale_in); + return 0; + case hwmon_in_alarm: + if (channel < 10) { + int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_fan_input: + *val = data->rpm[channel]; + return 0; + case hwmon_fan_min: + *val = fan_from_reg8(data->fan_min[channel], + data->fan_div[channel]); + return 0; + case hwmon_fan_div: + *val = div_from_reg(data->fan_div[channel]); + return 0; + case hwmon_fan_alarm: + if (channel < 5) { + int bit[] = { 6, 7, 11, 10, 23 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_pwm_input: + *val = data->pwm[channel]; + return 0; + case hwmon_pwm_enable: + *val = data->pwm_enable[channel]; + return 0; + case hwmon_pwm_mode: + *val = data->pwm_enable[channel]; + return 0; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + if (attr != hwmon_intrusion_alarm || channel != 0) + return -EOPNOTSUPP; /* shouldn't happen */ + + *val = !!(data->caseopen & 0x10); + return 0; +} + +static int +w83627ehf_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); + + switch (type) { + case hwmon_fan: + return w83627ehf_do_read_fan(data, attr, channel, val); + + case hwmon_in: + return w83627ehf_do_read_in(data, attr, channel, val); + + case hwmon_pwm: + return w83627ehf_do_read_pwm(data, attr, channel, val); + + case hwmon_temp: + return w83627ehf_do_read_temp(data, attr, channel, val); + + case hwmon_intrusion: + return w83627ehf_do_read_intrusion(data, attr, channel, val); + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + if (attr == hwmon_temp_label) { + *str = data->temp_label[data->temp_src[channel]]; + return 0; + } + break; + + default: + break; + } + /* Nothing else should be read as a string */ + return -EOPNOTSUPP; +} + +static int +w83627ehf_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + if (type == hwmon_in && attr == hwmon_in_min) + return store_in_min(dev, data, channel, val); + if (type == hwmon_in && attr == hwmon_in_max) + return store_in_max(dev, data, channel, val); + + if (type == hwmon_fan && attr == hwmon_fan_min) + return store_fan_min(dev, data, channel, val); + + if (type == hwmon_temp && attr == hwmon_temp_max) + return store_temp_max(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_max_hyst) + return store_temp_max_hyst(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_offset) + return store_temp_offset(dev, data, channel, val); + + if (type == hwmon_pwm && attr == hwmon_pwm_mode) + return store_pwm_mode(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_enable) + return store_pwm_enable(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_input) + return store_pwm(dev, data, channel, val); + + if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm) + return clear_caseopen(dev, data, channel, val); + + return -EOPNOTSUPP; +} + +static const struct hwmon_ops w83627ehf_ops = { + .is_visible = w83627ehf_is_visible, + .read = w83627ehf_read, + .read_string = w83627ehf_read_string, + .write = w83627ehf_write, +}; + +static const struct hwmon_channel_info *w83627ehf_info[] = { + HWMON_CHANNEL_INFO(fan, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN), + HWMON_CHANNEL_INFO(in, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE), + HWMON_CHANNEL_INFO(temp, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE), + HWMON_CHANNEL_INFO(intrusion, + HWMON_INTRUSION_ALARM), + NULL +}; + +static const struct hwmon_chip_info w83627ehf_chip_info = { + .ops = &w83627ehf_ops, + .info = w83627ehf_info, +}; + static int w83627ehf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev) struct resource *res; u8 en_vrm10; int i, err = 0; + struct device *hwmon_dev; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) { @@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev) /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9; - /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */ + /* 667HG has 3 pwms, and 627UHG has only 2 */ switch (sio_data->kind) { default: data->pwm_num = 4; break; case w83667hg: case w83667hg_b: - case nct6775: - case nct6776: data->pwm_num = 3; break; case w83627uhg: @@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp = 0x07; /* Deal with temperature register setup first. */ - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - int mask = 0; - - /* - * Display temperature sensor output only if it monitors - * a source other than one already reported. Always display - * first three temperature registers, though. - */ - for (i = 0; i < NUM_REG_TEMP; i++) { - u8 src; - - data->reg_temp[i] = NCT6775_REG_TEMP[i]; - data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i]; - data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i]; - data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i]; - - src = w83627ehf_read_value(data, - NCT6775_REG_TEMP_SOURCE[i]); - src &= 0x1f; - if (src && !(mask & (1 << src))) { - data->have_temp |= 1 << i; - mask |= 1 << src; - } - - data->temp_src[i] = src; - - /* - * Now do some register swapping if index 0..2 don't - * point to SYSTIN(1), CPUIN(2), and AUXIN(3). - * Idea is to have the first three attributes - * report SYSTIN, CPUIN, and AUXIN if possible - * without overriding the basic system configuration. - */ - if (i > 0 && data->temp_src[0] != 1 - && data->temp_src[i] == 1) - w82627ehf_swap_tempreg(data, 0, i); - if (i > 1 && data->temp_src[1] != 2 - && data->temp_src[i] == 2) - w82627ehf_swap_tempreg(data, 1, i); - if (i > 2 && data->temp_src[2] != 3 - && data->temp_src[i] == 3) - w82627ehf_swap_tempreg(data, 2, i); - } - if (sio_data->kind == nct6776) { - /* - * On NCT6776, AUXTIN and VIN3 pins are shared. - * Only way to detect it is to check if AUXTIN is used - * as a temperature source, and if that source is - * enabled. - * - * If that is the case, disable in6, which reports VIN3. - * Otherwise disable temp3. - */ - if (data->temp_src[2] == 3) { - u8 reg; - - if (data->reg_temp_config[2]) - reg = w83627ehf_read_value(data, - data->reg_temp_config[2]); - else - reg = 0; /* Assume AUXTIN is used */ - - if (reg & 0x01) - data->have_temp &= ~(1 << 2); - else - data->in6_skip = 1; - } - data->temp_label = nct6776_temp_label; - } else { - data->temp_label = nct6775_temp_label; - } - data->have_temp_offset = data->have_temp & 0x07; - for (i = 0; i < 3; i++) { - if (data->temp_src[i] > 3) - data->have_temp_offset &= ~(1 << i); - } - } else if (sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg_b) { u8 reg; w83627ehf_set_temp_reg_ehf(data, 4); @@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp_offset = data->have_temp & 0x07; } - if (sio_data->kind == nct6775) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg16; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT; - data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT; - } else if (sio_data->kind == nct6776) { - data->has_fan_div = false; - data->fan_from_reg = fan_from_reg13; - data->fan_from_reg_min = fan_from_reg13; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = NCT6776_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - } else if (sio_data->kind == w83667hg_b) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; + if (sio_data->kind == w83667hg_b) { data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B; data->REG_FAN_STEP_OUTPUT = W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B; } else { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_COMMON; data->REG_FAN_STEP_OUTPUT = @@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev) goto exit_release; /* Read VID value */ - if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b || - sio_data->kind == nct6775 || sio_data->kind == nct6776) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { /* * W83667HG has different pins for VID input and output, so * we can get the VID input values directly at logical device D @@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev) */ superio_select(sio_data->sioreg, W83667HG_LD_VID); data->vid = superio_inb(sio_data->sioreg, 0xe3); - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else if (sio_data->kind != w83627uhg) { superio_select(sio_data->sioreg, W83627EHF_LD_HWM); if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) { @@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev) SIO_REG_VID_DATA); if (sio_data->kind == w83627ehf) /* 6 VID pins only */ data->vid &= 0x3f; - - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else { dev_info(dev, "VID pins in output mode, CPU VID not available\n"); } } - if (fan_debounce && - (sio_data->kind == nct6775 || sio_data->kind == nct6776)) { - u8 tmp; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE); - if (sio_data->kind == nct6776) - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x3e | tmp); - else - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x1e | tmp); - pr_info("Enabled fan debounce for chip %s\n", data->name); - } - w83627ehf_check_fan_inputs(sio_data, data); superio_exit(sio_data->sioreg); /* Read fan clock dividers immediately */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Read pwm data to save original values */ - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) data->pwm_enable_orig[i] = data->pwm_enable[i]; - /* Register sysfs hooks */ - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) { - err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { - err = device_create_file(dev, &attr->dev_attr); - if (err) - goto exit_remove; - } - } - /* if fan3 and fan4 are enabled create the sf3 files for them */ - if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan3[i].dev_attr); - if (err) - goto exit_remove; - } - if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan4[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - if ((err = device_create_file(dev, &sda_in_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_min[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_max[i].dev_attr))) - goto exit_remove; - } - - for (i = 0; i < 5; i++) { - if (data->has_fan & (1 << i)) { - if ((err = device_create_file(dev, - &sda_fan_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_fan_alarm[i].dev_attr))) - goto exit_remove; - if (sio_data->kind != nct6776) { - err = device_create_file(dev, - &sda_fan_div[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->has_fan_min & (1 << i)) { - err = device_create_file(dev, - &sda_fan_min[i].dev_attr); - if (err) - goto exit_remove; - } - if (i < data->pwm_num && - ((err = device_create_file(dev, - &sda_pwm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_mode[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_enable[i].dev_attr)) - || (err = device_create_file(dev, - &sda_target_temp[i].dev_attr)) - || (err = device_create_file(dev, - &sda_tolerance[i].dev_attr)))) - goto exit_remove; - } - } + hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, + data->name, + data, + &w83627ehf_chip_info, + w83627ehf_groups); - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - err = device_create_file(dev, &sda_temp_input[i].dev_attr); - if (err) - goto exit_remove; - if (data->temp_label) { - err = device_create_file(dev, - &sda_temp_label[i].dev_attr); - if (err) - goto exit_remove; - } - if (i == 2 && data->temp3_val_only) - continue; - if (data->reg_temp_over[i]) { - err = device_create_file(dev, - &sda_temp_max[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->reg_temp_hyst[i]) { - err = device_create_file(dev, - &sda_temp_max_hyst[i].dev_attr); - if (err) - goto exit_remove; - } - if (i > 2) - continue; - if ((err = device_create_file(dev, - &sda_temp_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_temp_type[i].dev_attr))) - goto exit_remove; - if (data->have_temp_offset & (1 << i)) { - err = device_create_file(dev, - &sda_temp_offset[i].dev_attr); - if (err) - goto exit_remove; - } - } - - err = device_create_file(dev, &sda_caseopen[0].dev_attr); - if (err) - goto exit_remove; - - if (sio_data->kind == nct6776) { - err = device_create_file(dev, &sda_caseopen[1].dev_attr); - if (err) - goto exit_remove; - } - - err = device_create_file(dev, &dev_attr_name); - if (err) - goto exit_remove; - - data->hwmon_dev = hwmon_device_register(dev); - if (IS_ERR(data->hwmon_dev)) { - err = PTR_ERR(data->hwmon_dev); - goto exit_remove; - } + return PTR_ERR_OR_ZERO(hwmon_dev); - return 0; - -exit_remove: - w83627ehf_device_remove_files(dev); exit_release: release_region(res->start, IOREGION_LENGTH); exit: @@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev) { struct w83627ehf_data *data = platform_get_drvdata(pdev); - hwmon_device_unregister(data->hwmon_dev); - w83627ehf_device_remove_files(&pdev->dev); release_region(data->addr, IOREGION_LENGTH); return 0; @@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev) static int w83627ehf_suspend(struct device *dev) { struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); mutex_lock(&data->update_lock); data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT); - if (sio_data->kind == nct6775) { - data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - } mutex_unlock(&data->update_lock); return 0; @@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev) static int w83627ehf_resume(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); int i; mutex_lock(&data->update_lock); @@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev) if (!(data->has_fan_min & (1 << i))) continue; - w83627ehf_write_value(data, data->REG_FAN_MIN[i], + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i], data->fan_min[i]); } @@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev) /* Restore other settings */ w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat); - if (sio_data->kind == nct6775) { - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2); - } /* Force re-reading all values */ data->valid = 0; @@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, static const char sio_name_W83627UHG[] __initconst = "W83627UHG"; static const char sio_name_W83667HG[] __initconst = "W83667HG"; static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B"; - static const char sio_name_NCT6775[] __initconst = "NCT6775F"; - static const char sio_name_NCT6776[] __initconst = "NCT6776F"; u16 val; const char *sio_name; @@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, sio_data->kind = w83667hg_b; sio_name = sio_name_W83667HG_B; break; - case SIO_NCT6775_ID: - sio_data->kind = nct6775; - sio_name = sio_name_NCT6775; - break; - case SIO_NCT6776_ID: - sio_data->kind = nct6776; - sio_name = sio_name_NCT6776; - break; default: if (val != 0xffff) pr_debug("unsupported chip ID: 0x%04x\n", val); diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index ff340d7ae2e5..abfe3094c047 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -369,7 +369,7 @@ static int highlander_i2c_probe(struct platform_device *pdev) if (unlikely(!dev)) return -ENOMEM; - dev->base = ioremap_nocache(res->start, resource_size(res)); + dev->base = ioremap(res->start, resource_size(res)); if (unlikely(!dev->base)) { ret = -ENXIO; goto err; diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 0829cb696d9d..4fde74eb34a7 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -281,7 +281,7 @@ static int pmcmsptwi_probe(struct platform_device *pldev) } /* remap the memory */ - pmcmsptwi_data.iobase = ioremap_nocache(res->start, + pmcmsptwi_data.iobase = ioremap(res->start, resource_size(res)); if (!pmcmsptwi_data.iobase) { dev_err(&pldev->dev, diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 043691656245..7f8f896fa0c3 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = { .groups = i3c_masterdev_groups, }; -int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, - unsigned long max_i2c_scl_rate) +static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, + unsigned long max_i2c_scl_rate) { struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus); diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b0ff0e12d84c..bd26c3b9634e 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); struct i3c_master_controller *m = i3c_dev_get_master(dev); struct dw_i3c_master *master = to_dw_i3c_master(m); + int pos; + + pos = dw_i3c_master_get_free_pos(master); + + if (data->index > pos && pos > 0) { + writel(0, + master->regs + + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); + + master->addrs[data->index] = 0; + master->free_pos |= BIT(data->index); + + data->index = pos; + master->addrs[pos] = dev->info.dyn_addr; + master->free_pos &= ~BIT(pos); + } writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), master->regs + @@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { static int dw_i3c_probe(struct platform_device *pdev) { struct dw_i3c_master *master; - struct resource *res; int ret, irq; master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 10db0bf0655a..54712793709e 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> +#include <linux/of_device.h> #define DEV_ID 0x0 #define DEV_ID_I3C_MASTER 0x5034 @@ -60,6 +61,7 @@ #define CTRL_HALT_EN BIT(30) #define CTRL_MCS BIT(29) #define CTRL_MCS_EN BIT(28) +#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24)) #define CTRL_HJ_DISEC BIT(8) #define CTRL_MST_ACK BIT(7) #define CTRL_HJ_ACK BIT(6) @@ -70,6 +72,7 @@ #define CTRL_MIXED_FAST_BUS_MODE 2 #define CTRL_MIXED_SLOW_BUS_MODE 3 #define CTRL_BUS_MODE_MASK GENMASK(1, 0) +#define THD_DELAY_MAX 3 #define PRESCL_CTRL0 0x14 #define PRESCL_CTRL0_I2C(x) ((x) << 16) @@ -388,6 +391,10 @@ struct cdns_i3c_xfer { struct cdns_i3c_cmd cmds[0]; }; +struct cdns_i3c_data { + u8 thd_delay_ns; +}; + struct cdns_i3c_master { struct work_struct hj_work; struct i3c_master_controller base; @@ -408,6 +415,7 @@ struct cdns_i3c_master { struct clk *pclk; struct cdns_i3c_master_caps caps; unsigned long i3c_scl_lim; + const struct cdns_i3c_data *devdata; }; static inline struct cdns_i3c_master * @@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m) return 0; } +static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master) +{ + unsigned long sysclk_rate = clk_get_rate(master->sysclk); + u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns, + (NSEC_PER_SEC / sysclk_rate)); + + /* Every value greater than 3 is not valid. */ + if (thd_delay > THD_DELAY_MAX) + thd_delay = THD_DELAY_MAX; + + /* CTLR_THD_DEL value is encoded. */ + return (THD_DELAY_MAX - thd_delay); +} + static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) { struct cdns_i3c_master *master = to_cdns_i3c_master(m); @@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) * We will issue ENTDAA afterwards from the threaded IRQ handler. */ ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; + + /* + * Configure data hold delay based on device-specific data. + * + * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on + * master output. This setting allows to meet this timing on master's + * SoC outputs, regardless of PCB balancing. + */ + ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master)); writel(ctrl, master->regs + CTRL); cdns_i3c_master_enable(master); @@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work) i3c_master_do_daa(&master->base); } +static struct cdns_i3c_data cdns_i3c_devdata = { + .thd_delay_ns = 10, +}; + +static const struct of_device_id cdns_i3c_master_of_ids[] = { + { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata }, + { /* sentinel */ }, +}; + static int cdns_i3c_master_probe(struct platform_device *pdev) { struct cdns_i3c_master *master; - struct resource *res; int ret, irq; u32 val; @@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->devdata = of_device_get_match_data(&pdev->dev); + if (!master->devdata) + return -EINVAL; + + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); @@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id cdns_i3c_master_of_ids[] = { - { .compatible = "cdns,i3c-master" }, - { /* sentinel */ }, -}; - static struct platform_driver cdns_i3c_master = { .probe = cdns_i3c_master_probe, .remove = cdns_i3c_master_remove, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 75fd2a7b0842..7833e650789f 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -41,6 +41,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/tick.h> @@ -79,6 +80,7 @@ struct idle_cpu { unsigned long auto_demotion_disable_flags; bool byt_auto_demotion_disable_flag; bool disable_promotion_to_c1e; + bool use_acpi; }; static const struct idle_cpu *icpu; @@ -90,6 +92,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, static struct cpuidle_state *cpuidle_state_table; /* + * Enable this state by default even if the ACPI _CST does not list it. + */ +#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) + +/* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. * If this flag is set, SW flushes the TLB, so even if the @@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 80, .enter = &intel_idle, @@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 250, .enter = &intel_idle, @@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 500, .enter = &intel_idle, @@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -808,7 +815,7 @@ static struct cpuidle_state bxt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -869,7 +876,7 @@ static struct cpuidle_state dnv_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -944,37 +951,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, mwait_idle_with_hints(eax, ecx); } -static void __setup_broadcast_timer(bool on) -{ - if (on) - tick_broadcast_enable(); - else - tick_broadcast_disable(); -} - -static void auto_demotion_disable(void) -{ - unsigned long long msr_bits; - - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); - msr_bits &= ~(icpu->auto_demotion_disable_flags); - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); -} -static void c1e_promotion_disable(void) -{ - unsigned long long msr_bits; - - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); - msr_bits &= ~0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); -} - static const struct idle_cpu idle_cpu_nehalem = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_nhx = { + .state_table = nehalem_cstates, + .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_atom = { .state_table = atom_cstates, }; @@ -993,6 +982,12 @@ static const struct idle_cpu idle_cpu_snb = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_snx = { + .state_table = snb_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_byt = { .state_table = byt_cstates, .disable_promotion_to_c1e = true, @@ -1013,6 +1008,7 @@ static const struct idle_cpu idle_cpu_ivb = { static const struct idle_cpu idle_cpu_ivt = { .state_table = ivt_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_hsw = { @@ -1020,11 +1016,23 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_hsx = { + .state_table = hsw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_bdw = { .state_table = bdw_cstates, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_bdx = { + .state_table = bdw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_skl = { .state_table = skl_cstates, .disable_promotion_to_c1e = true, @@ -1033,15 +1041,18 @@ static const struct idle_cpu idle_cpu_skl = { static const struct idle_cpu idle_cpu_skx = { .state_table = skx_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_avn = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_knl = { .state_table = knl_cstates, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_bxt = { @@ -1052,20 +1063,21 @@ static const struct idle_cpu idle_cpu_bxt = { static const struct idle_cpu idle_cpu_dnv = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct x86_cpu_id intel_idle_ids[] __initconst = { - INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem), + INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx), INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem), INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem), INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem), - INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem), - INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx), + INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx), INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft), - INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx), INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb), - INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb), + INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx), INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt), INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier), @@ -1073,14 +1085,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb), INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt), INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), + INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx), INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw), INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn), INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw), + INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx), + INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx), INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl), @@ -1095,76 +1107,169 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { {} }; -/* - * intel_idle_probe() +#define INTEL_CPU_FAM6_MWAIT \ + { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 } + +static const struct x86_cpu_id intel_mwait_ids[] __initconst = { + INTEL_CPU_FAM6_MWAIT, + {} +}; + +static bool __init intel_idle_max_cstate_reached(int cstate) +{ + if (cstate + 1 > max_cstate) { + pr_info("max_cstate %d reached\n", max_cstate); + return true; + } + return false; +} + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +#include <acpi/processor.h> + +static bool no_acpi __read_mostly; +module_param(no_acpi, bool, 0444); +MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); + +static struct acpi_processor_power acpi_state_table __initdata; + +/** + * intel_idle_cst_usable - Check if the _CST information can be used. + * + * Check if all of the C-states listed by _CST in the max_cstate range are + * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT. */ -static int __init intel_idle_probe(void) +static bool __init intel_idle_cst_usable(void) { - unsigned int eax, ebx, ecx; - const struct x86_cpu_id *id; + int cstate, limit; - if (max_cstate == 0) { - pr_debug("disabled\n"); - return -EPERM; - } + limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), + acpi_state_table.count); - id = x86_match_cpu(intel_idle_ids); - if (!id) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6) - pr_debug("does not run on family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return -ENODEV; + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; + + if (cx->entry_method != ACPI_CSTATE_FFH) + return false; } - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { - pr_debug("Please enable MWAIT in BIOS SETUP\n"); - return -ENODEV; + return true; +} + +static bool __init intel_idle_acpi_cst_extract(void) +{ + unsigned int cpu; + + if (no_acpi) { + pr_debug("Not allowed to use ACPI _CST\n"); + return false; } - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return -ENODEV; + for_each_possible_cpu(cpu) { + struct acpi_processor *pr = per_cpu(processors, cpu); - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + if (!pr) + continue; - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || - !mwait_substates) - return -ENODEV; + if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) + continue; - pr_debug("MWAIT substates: 0x%x\n", mwait_substates); + acpi_state_table.count++; - icpu = (const struct idle_cpu *)id->driver_data; - cpuidle_state_table = icpu->state_table; + if (!intel_idle_cst_usable()) + continue; - pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", - boot_cpu_data.x86_model); + if (!acpi_processor_claim_cst_control()) { + acpi_state_table.count = 0; + return false; + } - return 0; + return true; + } + + pr_debug("ACPI _CST not found or not usable\n"); + return false; } -/* - * intel_idle_cpuidle_devices_uninit() - * Unregisters the cpuidle devices. - */ -static void intel_idle_cpuidle_devices_uninit(void) +static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { - int i; - struct cpuidle_device *dev; + int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx; + struct cpuidle_state *state; - for_each_online_cpu(i) { - dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); - cpuidle_unregister_device(dev); + if (intel_idle_max_cstate_reached(cstate)) + break; + + cx = &acpi_state_table.states[cstate]; + + state = &drv->states[drv->state_count++]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); + strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + state->exit_latency = cx->latency; + /* + * For C1-type C-states use the same number for both the exit + * latency and target residency, because that is the case for + * C1 in the majority of the static C-states tables above. + * For the other types of C-states, however, set the target + * residency to 3 times the exit latency which should lead to + * a reasonable balance between energy-efficiency and + * performance in the majority of interesting cases. + */ + state->target_residency = cx->latency; + if (cx->type > ACPI_STATE_C1) + state->target_residency *= 3; + + state->flags = MWAIT2flg(cx->address); + if (cx->type > ACPI_STATE_C2) + state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + + state->enter = intel_idle; + state->enter_s2idle = intel_idle_s2idle; } } +static bool __init intel_idle_off_by_default(u32 mwait_hint) +{ + int cstate, limit; + + /* + * If there are no _CST C-states, do not disable any C-states by + * default. + */ + if (!acpi_state_table.count) + return false; + + limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + if (acpi_state_table.states[cstate].address == mwait_hint) + return false; + } + return true; +} +#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static inline bool intel_idle_acpi_cst_extract(void) { return false; } +static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } +static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ + /* * ivt_idle_state_table_update(void) * * Tune IVT multi-socket targets * Assumption: num_sockets == (max_package_num + 1) */ -static void ivt_idle_state_table_update(void) +static void __init ivt_idle_state_table_update(void) { /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ int cpu, package_num, num_sockets = 1; @@ -1187,15 +1292,17 @@ static void ivt_idle_state_table_update(void) /* else, 1 and 2 socket systems use default ivt_cstates */ } -/* - * Translate IRTL (Interrupt Response Time Limit) MSR to usec +/** + * irtl_2_usec - IRTL to microseconds conversion. + * @irtl: IRTL MSR value. + * + * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds. */ - -static unsigned int irtl_ns_units[] = { - 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; - -static unsigned long long irtl_2_usec(unsigned long long irtl) +static unsigned long long __init irtl_2_usec(unsigned long long irtl) { + static const unsigned int irtl_ns_units[] __initconst = { + 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 + }; unsigned long long ns; if (!irtl) @@ -1203,15 +1310,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) ns = irtl_ns_units[(irtl >> 10) & 0x7]; - return div64_u64((irtl & 0x3FF) * ns, 1000); + return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC); } + /* * bxt_idle_state_table_update(void) * * On BXT, we trust the IRTL to show the definitive maximum latency * We use the same value for target_residency. */ -static void bxt_idle_state_table_update(void) +static void __init bxt_idle_state_table_update(void) { unsigned long long msr; unsigned int usec; @@ -1258,7 +1366,7 @@ static void bxt_idle_state_table_update(void) * On SKL-H (model 0x5e) disable C8 and C9 if: * C10 is enabled and SGX disabled */ -static void sklh_idle_state_table_update(void) +static void __init sklh_idle_state_table_update(void) { unsigned long long msr; unsigned int eax, ebx, ecx, edx; @@ -1284,7 +1392,7 @@ static void sklh_idle_state_table_update(void) /* if SGX is present */ if (ebx & (1 << 2)) { - rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); + rdmsrl(MSR_IA32_FEAT_CTL, msr); /* if SGX is enabled */ if (msr & (1 << 18)) @@ -1294,16 +1402,28 @@ static void sklh_idle_state_table_update(void) skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */ skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ } -/* - * intel_idle_state_table_update() - * - * Update the default state_table for this CPU-id - */ -static void intel_idle_state_table_update(void) +static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) { - switch (boot_cpu_data.x86_model) { + unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; + unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & + MWAIT_SUBSTATE_MASK; + + /* Ignore the C-state if there are NO sub-states in CPUID for it. */ + if (num_substates == 0) + return false; + + if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle states deeper than C2"); + return true; +} + +static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) +{ + int cstate; + + switch (boot_cpu_data.x86_model) { case INTEL_FAM6_IVYBRIDGE_X: ivt_idle_state_table_update(); break; @@ -1315,62 +1435,36 @@ static void intel_idle_state_table_update(void) sklh_idle_state_table_update(); break; } -} - -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states - */ -static void __init intel_idle_cpuidle_driver_init(void) -{ - int cstate; - struct cpuidle_driver *drv = &intel_idle_driver; - - intel_idle_state_table_update(); - - cpuidle_poll_state_init(drv); - drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { - int num_substates, mwait_hint, mwait_cstate; + unsigned int mwait_hint; - if ((cpuidle_state_table[cstate].enter == NULL) && - (cpuidle_state_table[cstate].enter_s2idle == NULL)) + if (intel_idle_max_cstate_reached(cstate)) break; - if (cstate + 1 > max_cstate) { - pr_info("max_cstate %d reached\n", max_cstate); + if (!cpuidle_state_table[cstate].enter && + !cpuidle_state_table[cstate].enter_s2idle) break; - } - - mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); - mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); - - /* number of sub-states for this state in CPUID.MWAIT */ - num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) - & MWAIT_SUBSTATE_MASK; - - /* if NO sub-states for this state in CPUID, skip it */ - if (num_substates == 0) - continue; - /* if state marked as disabled, skip it */ + /* If marked as unusable, skip this state. */ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) { pr_debug("state %s is disabled\n", cpuidle_state_table[cstate].name); continue; } + mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); + if (!intel_idle_verify_cstate(mwait_hint)) + continue; - if (((mwait_cstate + 1) > 2) && - !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - mark_tsc_unstable("TSC halts in idle" - " states deeper than C2"); + /* Structure copy. */ + drv->states[drv->state_count] = cpuidle_state_table[cstate]; - drv->states[drv->state_count] = /* structure copy */ - cpuidle_state_table[cstate]; + if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) && + !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)) + drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF; - drv->state_count += 1; + drv->state_count++; } if (icpu->byt_auto_demotion_disable_flag) { @@ -1379,6 +1473,38 @@ static void __init intel_idle_cpuidle_driver_init(void) } } +/* + * intel_idle_cpuidle_driver_init() + * allocate, initialize cpuidle_states + */ +static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) +{ + cpuidle_poll_state_init(drv); + drv->state_count = 1; + + if (icpu) + intel_idle_init_cstates_icpu(drv); + else + intel_idle_init_cstates_acpi(drv); +} + +static void auto_demotion_disable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); + msr_bits &= ~(icpu->auto_demotion_disable_flags); + wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); +} + +static void c1e_promotion_disable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits &= ~0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +} /* * intel_idle_cpu_init() @@ -1397,6 +1523,9 @@ static int intel_idle_cpu_init(unsigned int cpu) return -EIO; } + if (!icpu) + return 0; + if (icpu->auto_demotion_disable_flags) auto_demotion_disable(); @@ -1411,7 +1540,7 @@ static int intel_idle_cpu_online(unsigned int cpu) struct cpuidle_device *dev; if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) - __setup_broadcast_timer(true); + tick_broadcast_enable(); /* * Some systems can hotplug a cpu at runtime after @@ -1425,23 +1554,74 @@ static int intel_idle_cpu_online(unsigned int cpu) return 0; } +/** + * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices. + */ +static void __init intel_idle_cpuidle_devices_uninit(void) +{ + int i; + + for_each_online_cpu(i) + cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); +} + static int __init intel_idle_init(void) { + const struct x86_cpu_id *id; + unsigned int eax, ebx, ecx; int retval; /* Do not load intel_idle at all for now if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; - retval = intel_idle_probe(); - if (retval) - return retval; + if (max_cstate == 0) { + pr_debug("disabled\n"); + return -EPERM; + } + + id = x86_match_cpu(intel_idle_ids); + if (id) { + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_debug("Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } + } else { + id = x86_match_cpu(intel_mwait_ids); + if (!id) + return -ENODEV; + } + + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return -ENODEV; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || + !mwait_substates) + return -ENODEV; + + pr_debug("MWAIT substates: 0x%x\n", mwait_substates); + + icpu = (const struct idle_cpu *)id->driver_data; + if (icpu) { + cpuidle_state_table = icpu->state_table; + if (icpu->use_acpi) + intel_idle_acpi_cst_extract(); + } else if (!intel_idle_acpi_cst_extract()) { + return -ENODEV; + } + + pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", + boot_cpu_data.x86_model); intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (intel_idle_cpuidle_devices == NULL) + if (!intel_idle_cpuidle_devices) return -ENOMEM; - intel_idle_cpuidle_driver_init(); + intel_idle_cpuidle_driver_init(&intel_idle_driver); + retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { struct cpuidle_driver *drv = cpuidle_get_driver(); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 4d07d22bfa7b..020f70e6865e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -442,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, goto fail; } /* Unconditionally map 8 bytes to support 57500 series */ - nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8); + nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8); if (!nq->bar_reg_iomem) { rc = -ENOMEM; goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 5cdfa84faf85..1291b12287a5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -717,7 +717,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, if (!res_base) return -ENOMEM; - rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + + rcfw->cmdq_bar_reg_iomem = ioremap(res_base + RCFW_COMM_BASE_OFFSET, RCFW_COMM_SIZE); if (!rcfw->cmdq_bar_reg_iomem) { @@ -739,7 +739,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, "CREQ BAR region %d resc start is 0!\n", rcfw->creq_bar_reg); /* Unconditionally map 8 bytes to support 57500 series */ - rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, + rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off, 8); if (!rcfw->creq_bar_reg_iomem) { dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n", diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index bdbde8e22420..60ea1b924b67 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -704,7 +704,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, return -ENOMEM; } - dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset, + dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset, dbr_len); if (!dpit->dbr_bar_reg_iomem) { dev_err(&res->pdev->dev, diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 61362bd6d3ce..1a6268d61977 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -161,7 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) return -EINVAL; } - dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY); + dd->kregbase1 = ioremap(addr, RCV_ARRAY); if (!dd->kregbase1) { dd_dev_err(dd, "UC mapping of kregbase1 failed\n"); return -ENOMEM; @@ -179,7 +179,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count); dd->base2_start = RCV_ARRAY + rcv_array_count * 8; - dd->kregbase2 = ioremap_nocache( + dd->kregbase2 = ioremap( addr + dd->base2_start, TXE_PIO_SEND - dd->base2_start); if (!dd->kregbase2) { diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h index 343fb9894a82..985ffa9cc958 100644 --- a/drivers/infiniband/hw/hfi1/trace_tid.h +++ b/drivers/infiniband/hw/hfi1/trace_tid.h @@ -138,10 +138,10 @@ TRACE_EVENT(/* put_tid */ TP_ARGS(dd, index, type, pa, order), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd) - __field(unsigned long, pa); - __field(u32, index); - __field(u32, type); - __field(u16, order); + __field(unsigned long, pa) + __field(u32, index) + __field(u32, type) + __field(u16, order) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd); diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h index 09eb0c9ada00..769e5e4710c6 100644 --- a/drivers/infiniband/hw/hfi1/trace_tx.h +++ b/drivers/infiniband/hw/hfi1/trace_tx.h @@ -588,7 +588,7 @@ TRACE_EVENT(hfi1_sdma_user_reqinfo, TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i), TP_ARGS(dd, ctxt, subctxt, i), TP_STRUCT__entry( - DD_DEV_ENTRY(dd); + DD_DEV_ENTRY(dd) __field(u16, ctxt) __field(u8, subctxt) __field(u8, ver_opcode) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 904d508cc823..ab4ec33409b8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -40,7 +40,7 @@ #include <linux/slab.h> #include <linux/bitmap.h> #if defined(CONFIG_X86) -#include <asm/pat.h> +#include <asm/memtype.h> #endif #include <linux/sched.h> #include <linux/sched/mm.h> diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index dd4843379f51..91d64dd71a8a 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6630,7 +6630,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) /* vl15 buffers start just after the 4k buffers */ vl15off = dd->physaddr + (dd->piobufbase >> 32) + dd->piobcnt4k * dd->align4k; - dd->piovl15base = ioremap_nocache(vl15off, + dd->piovl15base = ioremap(vl15off, NUM_VL15_BUFS * dd->align4k); if (!dd->piovl15base) { ret = -ENOMEM; diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index d4fd8a6cff7b..43c8ee1f46e0 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1759,7 +1759,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) qib_userlen = dd->ureg_align * dd->cfgctxts; /* Sanity checks passed, now create the new mappings */ - qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); + qib_kregbase = ioremap(qib_physaddr, qib_kreglen); if (!qib_kregbase) goto bail; @@ -1768,7 +1768,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) goto bail_kregbase; if (qib_userlen) { - qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, + qib_userbase = ioremap(qib_physaddr + dd->uregbase, qib_userlen); if (!qib_userbase) goto bail_piobase; diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 864f2af171f7..3dc6ce033319 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -145,7 +145,7 @@ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev, addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); - dd->kregbase = ioremap_nocache(addr, len); + dd->kregbase = ioremap(addr, len); if (!dd->kregbase) return -ENOMEM; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a1a035270cab..b273e421e910 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn) } } -static void -isert_wait4cmds(struct iscsi_conn *conn) -{ - isert_info("iscsi_conn %p\n", conn); - - if (conn->sess) { - target_sess_cmd_list_set_waiting(conn->sess->se_sess); - target_wait_for_sess_cmds(conn->sess->se_sess); - } -} - /** * isert_put_unsol_pending_cmds() - Drop commands waiting for * unsolicitate dataout @@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) ib_drain_qp(isert_conn->qp); isert_put_unsol_pending_cmds(conn); - isert_wait4cmds(conn); isert_wait4logout(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c index f7414091d94e..2fe9dcfe0a6f 100644 --- a/drivers/input/keyboard/pxa930_rotary.c +++ b/drivers/input/keyboard/pxa930_rotary.c @@ -107,7 +107,7 @@ static int pxa930_rotary_probe(struct platform_device *pdev) if (!r) return -ENOMEM; - r->mmio_base = ioremap_nocache(res->start, resource_size(res)); + r->mmio_base = ioremap(res->start, resource_size(res)); if (r->mmio_base == NULL) { dev_err(&pdev->dev, "failed to remap IO memory\n"); err = -ENXIO; diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 27ad73f43451..c155adebf96e 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c @@ -195,7 +195,7 @@ static int sh_keysc_probe(struct platform_device *pdev) memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata)); pdata = &priv->pdata; - priv->iomem_base = ioremap_nocache(res->start, resource_size(res)); + priv->iomem_base = ioremap(res->start, resource_size(res)); if (priv->iomem_base == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c index 41acde60b60f..3332b77eef2a 100644 --- a/drivers/input/mouse/pxa930_trkball.c +++ b/drivers/input/mouse/pxa930_trkball.c @@ -167,7 +167,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev) goto failed; } - trkball->mmio_base = ioremap_nocache(res->start, resource_size(res)); + trkball->mmio_base = ioremap(res->start, resource_size(res)); if (!trkball->mmio_base) { dev_err(&pdev->dev, "failed to ioremap registers\n"); error = -ENXIO; diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c index 96f9b5397367..2f9775de3c5b 100644 --- a/drivers/input/serio/gscps2.c +++ b/drivers/input/serio/gscps2.c @@ -349,7 +349,7 @@ static int __init gscps2_probe(struct parisc_device *dev) ps2port->port = serio; ps2port->padev = dev; - ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4); + ps2port->addr = ioremap(hpa, GSC_STATUS + 4); spin_lock_init(&ps2port->lock); gscps2_reset(ps2port); diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 483f7bc379fa..823cc4ef51fd 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -440,7 +440,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) return NULL; } - return (u8 __iomem *)ioremap_nocache(address, end); + return (u8 __iomem *)ioremap(address, end); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index d246d74ec3a5..23445ebfda5c 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -298,7 +298,7 @@ static int tpci200_register(struct tpci200_board *tpci200) /* Map internal tpci200 driver user space */ tpci200->info->interface_regs = - ioremap_nocache(pci_resource_start(tpci200->info->pdev, + ioremap(pci_resource_start(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR), TPCI200_IFACE_SIZE); if (!tpci200->info->interface_regs) { @@ -541,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, ret = -EBUSY; goto out_err_pci_request; } - tpci200->info->cfg_regs = ioremap_nocache( + tpci200->info->cfg_regs = ioremap( pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), pci_resource_len(pdev, TPCI200_CFG_MEM_BAR)); if (!tpci200->info->cfg_regs) { diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c index 9c2a4b5d30cf..d480a514c983 100644 --- a/drivers/ipack/devices/ipoctal.c +++ b/drivers/ipack/devices/ipoctal.c @@ -276,7 +276,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, ipoctal->board_id = ipoctal->dev->id_device; region = &ipoctal->dev->region[IPACK_IO_SPACE]; - addr = devm_ioremap_nocache(&ipoctal->dev->dev, + addr = devm_ioremap(&ipoctal->dev->dev, region->start, region->size); if (!addr) { dev_err(&ipoctal->dev->dev, @@ -292,7 +292,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, region = &ipoctal->dev->region[IPACK_INT_SPACE]; ipoctal->int_space = - devm_ioremap_nocache(&ipoctal->dev->dev, + devm_ioremap(&ipoctal->dev->dev, region->start, region->size); if (!ipoctal->int_space) { dev_err(&ipoctal->dev->dev, @@ -303,7 +303,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, region = &ipoctal->dev->region[IPACK_MEM8_SPACE]; ipoctal->mem8_space = - devm_ioremap_nocache(&ipoctal->dev->dev, + devm_ioremap(&ipoctal->dev->dev, region->start, 0x8000); if (!ipoctal->mem8_space) { dev_err(&ipoctal->dev->dev, diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 697e6a8ccaae..1006c694d9fb 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -457,6 +457,12 @@ config IMX_IRQSTEER help Support for the i.MX IRQSTEER interrupt multiplexer/remapper. +config IMX_INTMUX + def_bool y if ARCH_MXC + select IRQ_DOMAIN + help + Support for the i.MX INTMUX interrupt multiplexer. + config LS1X_IRQ bool "Loongson-1 Interrupt Controller" depends on MACH_LOONGSON32 @@ -490,6 +496,7 @@ config TI_SCI_INTA_IRQCHIP config SIFIVE_PLIC bool "SiFive Platform-Level Interrupt Controller" depends on RISCV + select IRQ_DOMAIN_HIERARCHY help This enables support for the PLIC chip found in SiFive (and potentially other) RISC-V systems. The PLIC controls devices @@ -499,4 +506,11 @@ config SIFIVE_PLIC If you don't know what to do here, say Y. +config EXYNOS_IRQ_COMBINER + bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST + depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST + help + Say yes here to add support for the IRQ combiner devices embedded + in Samsung Exynos chips. + endmenu diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index e806dda690ea..eae0d78cbf22 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o -obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o +obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o @@ -87,7 +87,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o -obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o +obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o @@ -100,6 +100,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o +obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o obj-$(CONFIG_MADERA_IRQ) += irq-madera.o obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c new file mode 100644 index 000000000000..c90a3346b985 --- /dev/null +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller + * Copyright 2019 IBM Corporation + * + * Eddie James <eajames@linux.ibm.com> + */ + +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> + +#define ASPEED_SCU_IC_REG 0x018 +#define ASPEED_SCU_IC_SHIFT 0 +#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT) +#define ASPEED_SCU_IC_NUM_IRQS 7 +#define ASPEED_SCU_IC_STATUS_SHIFT 16 + +#define ASPEED_AST2600_SCU_IC0_REG 0x560 +#define ASPEED_AST2600_SCU_IC0_SHIFT 0 +#define ASPEED_AST2600_SCU_IC0_ENABLE \ + GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT) +#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6 + +#define ASPEED_AST2600_SCU_IC1_REG 0x570 +#define ASPEED_AST2600_SCU_IC1_SHIFT 4 +#define ASPEED_AST2600_SCU_IC1_ENABLE \ + GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT) +#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2 + +struct aspeed_scu_ic { + unsigned long irq_enable; + unsigned long irq_shift; + unsigned int num_irqs; + unsigned int reg; + struct regmap *scu; + struct irq_domain *irq_domain; +}; + +static void aspeed_scu_ic_irq_handler(struct irq_desc *desc) +{ + unsigned int irq; + unsigned int sts; + unsigned long bit; + unsigned long enabled; + unsigned long max; + unsigned long status; + struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT; + + chained_irq_enter(chip, desc); + + /* + * The SCU IC has just one register to control its operation and read + * status. The interrupt enable bits occupy the lower 16 bits of the + * register, while the interrupt status bits occupy the upper 16 bits. + * The status bit for a given interrupt is always 16 bits shifted from + * the enable bit for the same interrupt. + * Therefore, perform the IRQ operations in the enable bit space by + * shifting the status down to get the mapping and then back up to + * clear the bit. + */ + regmap_read(scu_ic->scu, scu_ic->reg, &sts); + enabled = sts & scu_ic->irq_enable; + status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled; + + bit = scu_ic->irq_shift; + max = scu_ic->num_irqs + bit; + + for_each_set_bit_from(bit, &status, max) { + irq = irq_find_mapping(scu_ic->irq_domain, + bit - scu_ic->irq_shift); + generic_handle_irq(irq); + + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, + BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT)); + } + + chained_irq_exit(chip, desc); +} + +static void aspeed_scu_ic_irq_mask(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) | + (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the mask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0); +} + +static void aspeed_scu_ic_irq_unmask(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); + unsigned int mask = bit | + (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the unmask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit); +} + +static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, + bool force) +{ + return -EINVAL; +} + +static struct irq_chip aspeed_scu_ic_chip = { + .name = "aspeed-scu-ic", + .irq_mask = aspeed_scu_ic_irq_mask, + .irq_unmask = aspeed_scu_ic_irq_unmask, + .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, +}; + +static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops aspeed_scu_ic_domain_ops = { + .map = aspeed_scu_ic_map, +}; + +static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, + struct device_node *node) +{ + int irq; + int rc = 0; + + if (!node->parent) { + rc = -ENODEV; + goto err; + } + + scu_ic->scu = syscon_node_to_regmap(node->parent); + if (IS_ERR(scu_ic->scu)) { + rc = PTR_ERR(scu_ic->scu); + goto err; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq < 0) { + rc = irq; + goto err; + } + + scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs, + &aspeed_scu_ic_domain_ops, + scu_ic); + if (!scu_ic->irq_domain) { + rc = -ENOMEM; + goto err; + } + + irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler, + scu_ic); + + return 0; + +err: + kfree(scu_ic); + + return rc; +} + +static int __init aspeed_scu_ic_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE; + scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT; + scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS; + scu_ic->reg = ASPEED_SCU_IC_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE; + scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT; + scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS; + scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE; + scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT; + scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS; + scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", + aspeed_ast2600_scu_ic0_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", + aspeed_ast2600_scu_ic1_of_init); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e05673bcd52b..f71758632f8d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -106,6 +106,7 @@ struct its_node { u64 typer; u64 cbaser_save; u32 ctlr_save; + u32 mpidr; struct list_head its_device_list; u64 flags; unsigned long list_nr; @@ -116,12 +117,22 @@ struct its_node { }; #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) #define ITS_ITT_ALIGN SZ_256 /* The maximum number of VPEID bits supported by VLPI commands */ -#define ITS_MAX_VPEID_BITS (16) +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) /* Convert page order to size in bytes */ @@ -216,11 +227,27 @@ static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, return &its_dev->event_map.vlpi_maps[event]; } -static struct its_collection *irq_to_col(struct irq_data *d) +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int irq_to_cpuid(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map = get_vlpi_map(d); - return dev_event_to_col(its_dev, its_get_event_id(d)); + if (map) + return map->vpe->col_idx; + + return its_dev->event_map.col_map[its_get_event_id(d)]; } static struct its_collection *valid_col(struct its_collection *col) @@ -322,6 +349,10 @@ struct its_cmd_desc { u16 seq_num; u16 its_list; } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; }; }; @@ -438,6 +469,38 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); } +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ @@ -621,19 +684,45 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, struct its_cmd_block *cmd, struct its_cmd_desc *desc) { - unsigned long vpt_addr; + unsigned long vpt_addr, vconf_addr; u64 target; - - vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); - target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + bool alloc; its_encode_cmd(cmd, GITS_CMD_VMAPP); its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_target(cmd, target); its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_size(cmd, LPI_NRBITS - 1); + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* We can only signal PTZ when alloc==1. Why do we have two bits? */ + its_encode_ptz(cmd, alloc); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vmapp_cmd.vpe); @@ -645,7 +734,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, { u32 db; - if (desc->its_vmapti_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -668,7 +757,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, { u32 db; - if (desc->its_vmovi_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -698,6 +787,11 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); its_encode_target(cmd, target); + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vmovp_cmd.vpe); @@ -757,6 +851,21 @@ static struct its_vpe *its_build_vclear_cmd(struct its_node *its, return valid_vpe(its, map->vpe); } +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { @@ -1165,20 +1274,17 @@ static void its_send_vclear(struct its_device *dev, u32 event_id) its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); } -/* - * irqchip functions - assumes MSI, mostly. - */ -static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); - u32 event = its_get_event_id(d); - - if (!irqd_is_forwarded_to_vcpu(d)) - return NULL; + struct its_cmd_desc desc; - return dev_event_to_vlpi_map(its_dev, event); + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); } +/* + * irqchip functions - assumes MSI, mostly. + */ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) { struct its_vlpi_map *map = get_vlpi_map(d); @@ -1221,13 +1327,25 @@ static void wait_for_syncr(void __iomem *rdbase) static void direct_lpi_inv(struct irq_data *d) { - struct its_collection *col; + struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } /* Target the redistributor this LPI is currently routed to */ - col = irq_to_col(d); - rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base; - gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR); + rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); } @@ -1237,7 +1355,8 @@ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) struct its_device *its_dev = irq_data_get_irq_chip_data(d); lpi_write_config(d, clr, set); - if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d)) + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) direct_lpi_inv(d); else if (!irqd_is_forwarded_to_vcpu(d)) its_send_inv(its_dev, its_get_event_id(d)); @@ -1251,6 +1370,13 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) u32 event = its_get_event_id(d); struct its_vlpi_map *map; + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; + map = dev_event_to_vlpi_map(its_dev, event); if (map->db_enabled == enable) @@ -2090,6 +2216,65 @@ static bool its_parse_indirect_baser(struct its_node *its, return indirect; } +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + static void its_free_tables(struct its_node *its) { int i; @@ -2132,6 +2317,17 @@ static int its_alloc_tables(struct its_node *its) break; case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + if ((sibling = find_sibling_its(its))) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + indirect = its_parse_indirect_baser(its, baser, psz, &order, ITS_MAX_VPEID_BITS); @@ -2153,6 +2349,220 @@ static int its_alloc_tables(struct its_node *its) return 0; } +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + u32 tmp; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + tmp = compute_common_aff(val); + if (tmp != aff) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gits_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + /* fall through */ + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_page = page; + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + static int its_alloc_collections(struct its_node *its) { int i; @@ -2244,7 +2654,7 @@ static int __init allocate_lpi_tables(void) return 0; } -static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) { u32 count = 1000000; /* 1s! */ bool clean; @@ -2252,6 +2662,8 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); do { @@ -2264,6 +2676,11 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) } } while (!clean && count); + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + val |= GICR_VPENDBASER_PendingLast; + } + return val; } @@ -2352,7 +2769,7 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); - if (gic_rdists->has_vlpis) { + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); /* @@ -2372,10 +2789,20 @@ static void its_cpu_init_lpis(void) * ancient programming gets left in and has possibility of * corrupting memory. */ - val = its_clear_vpend_valid(vlpi_base); + val = its_clear_vpend_valid(vlpi_base, 0, 0); WARN_ON(val & GICR_VPENDBASER_Dirty); } + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + /* Make sure the GIC has seen the above */ dsb(sy); out: @@ -2859,7 +3286,7 @@ static const struct irq_domain_ops its_domain_ops = { /* * This is insane. * - * If a GICv4 doesn't implement Direct LPIs (which is extremely + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely * likely), the only way to perform an invalidate is to use a fake * device to issue an INV command, implying that the LPI has first * been mapped to some event on that device. Since this is not exactly @@ -2867,9 +3294,20 @@ static const struct irq_domain_ops its_domain_ops = { * only issue an UNMAP if we're short on available slots. * * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). */ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already unmapped? */ if (vpe->vpe_proxy_event == -1) return; @@ -2892,6 +3330,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (!gic_rdists->has_direct_lpi) { unsigned long flags; @@ -2903,6 +3345,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already mapped? */ if (vpe->vpe_proxy_event != -1) return; @@ -2925,6 +3371,10 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) unsigned long flags; struct its_collection *target_col; + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (gic_rdists->has_direct_lpi) { void __iomem *rdbase; @@ -2951,7 +3401,7 @@ static int its_vpe_set_affinity(struct irq_data *d, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - int cpu = cpumask_first(mask_val); + int from, cpu = cpumask_first(mask_val); /* * Changing affinity is mega expensive, so let's be as lazy as @@ -2959,14 +3409,24 @@ static int its_vpe_set_affinity(struct irq_data *d, * into the proxy device, we need to move the doorbell * interrupt to its new location. */ - if (vpe->col_idx != cpu) { - int from = vpe->col_idx; + if (vpe->col_idx == cpu) + goto out; - vpe->col_idx = cpu; - its_send_vmovp(vpe); - its_vpe_db_proxy_move(vpe, from, cpu); - } + from = vpe->col_idx; + vpe->col_idx = cpu; + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK_DONE; @@ -3009,16 +3469,10 @@ static void its_vpe_deschedule(struct its_vpe *vpe) void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); u64 val; - val = its_clear_vpend_valid(vlpi_base); + val = its_clear_vpend_valid(vlpi_base, 0, 0); - if (unlikely(val & GICR_VPENDBASER_Dirty)) { - pr_err_ratelimited("ITS virtual pending table not cleaning\n"); - vpe->idai = false; - vpe->pending_last = true; - } else { - vpe->idai = !!(val & GICR_VPENDBASER_IDAI); - vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); - } + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); } static void its_vpe_invall(struct its_vpe *vpe) @@ -3151,6 +3605,139 @@ static struct irq_chip its_vpe_irq_chip = { .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its = NULL; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + */ + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + u64 val; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + static int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); @@ -3186,7 +3773,10 @@ static int its_vpe_init(struct its_vpe *vpe) vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; - vpe->vpe_proxy_event = -1; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; return 0; } @@ -3228,6 +3818,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { + struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; struct page *vprop_page; @@ -3255,6 +3846,9 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->nr_db_lpis = nr_ids; vm->vprop_page = vprop_page; + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + for (i = 0; i < nr_irqs; i++) { vm->vpes[i]->vpe_db_lpi = base + i; err = its_vpe_init(vm->vpes[i]); @@ -3265,7 +3859,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (err) break; irq_domain_set_hwirq_and_chip(domain, virq + i, i, - &its_vpe_irq_chip, vm->vpes[i]); + irqchip, vm->vpes[i]); set_bit(i, bitmap); } @@ -3778,6 +4372,14 @@ static int __init its_probe_one(struct resource *res, } else { pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + its->mpidr = readl_relaxed(its_base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &res->start, its->mpidr, svpet); + } } its->numa_node = numa_node; @@ -4138,6 +4740,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, bool has_v4 = false; int err; + gic_rdists = rdists; + its_parent = parent_domain; of_node = to_of_node(handle); if (of_node) @@ -4150,8 +4754,6 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, return -ENXIO; } - gic_rdists = rdists; - err = allocate_lpi_tables(); if (err) return err; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index d6218012097b..286f98222878 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -858,8 +858,21 @@ static int __gic_update_rdist_properties(struct redist_region *region, void __iomem *ptr) { u64 typer = gic_read_typer(ptr + GICR_TYPER); + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); - gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); + + /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + gic_data.rdists.has_rvpeid); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); return 1; @@ -872,9 +885,10 @@ static void gic_update_rdist_properties(void) if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) gic_data.ppi_nr = 0; pr_info("%d PPIs implemented\n", gic_data.ppi_nr); - pr_info("%sVLPI support, %sdirect LPI support\n", + pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n", !gic_data.rdists.has_vlpis ? "no " : "", - !gic_data.rdists.has_direct_lpi ? "no " : ""); + !gic_data.rdists.has_direct_lpi ? "no " : "", + !gic_data.rdists.has_rvpeid ? "no " : ""); } /* Check whether it's single security state view */ @@ -1562,10 +1576,14 @@ static int __init gic_init_bases(void __iomem *dist_base, pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, &gic_data); irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c new file mode 100644 index 000000000000..c27577c81126 --- /dev/null +++ b/drivers/irqchip/irq-imx-intmux.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2017 NXP + +/* INTMUX Block Diagram + * + * ________________ + * interrupt source # 0 +---->| | + * | | | + * interrupt source # 1 +++-->| | + * ... | | | channel # 0 |--------->interrupt out # 0 + * ... | | | | + * ... | | | | + * interrupt source # X-1 +++-->|________________| + * | | | + * | | | + * | | | ________________ + * +---->| | + * | | | | | + * | +-->| | + * | | | | channel # 1 |--------->interrupt out # 1 + * | | +>| | + * | | | | | + * | | | |________________| + * | | | + * | | | + * | | | ... + * | | | ... + * | | | + * | | | ________________ + * +---->| | + * | | | | + * +-->| | + * | | channel # N |--------->interrupt out # N + * +>| | + * | | + * |________________| + * + * + * N: Interrupt Channel Instance Number (N=7) + * X: Interrupt Source Number for each channel (X=32) + * + * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 + * interrupt sources and generates 1 interrupt output. + * + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/spinlock.h> + +#define CHANIER(n) (0x10 + (0x40 * n)) +#define CHANIPR(n) (0x20 + (0x40 * n)) + +#define CHAN_MAX_NUM 0x8 + +struct intmux_irqchip_data { + int chanidx; + int irq; + struct irq_domain *domain; +}; + +struct intmux_data { + raw_spinlock_t lock; + void __iomem *regs; + struct clk *ipg_clk; + int channum; + struct intmux_irqchip_data irqchip_data[]; +}; + +static void imx_intmux_irq_mask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* disable the interrupt source of this channel */ + val &= ~BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static void imx_intmux_irq_unmask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* enable the interrupt source of this channel */ + val |= BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static struct irq_chip imx_intmux_irq_chip = { + .name = "intmux", + .irq_mask = imx_intmux_irq_mask, + .irq_unmask = imx_intmux_irq_unmask, +}; + +static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, h->host_data); + irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); + + return 0; +} + +static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + + /* + * two cells needed in interrupt specifier: + * the 1st cell: hw interrupt number + * the 2nd cell: channel index + */ + if (WARN_ON(intsize != 2)) + return -EINVAL; + + if (WARN_ON(intspec[1] >= data->channum)) + return -EINVAL; + + *out_hwirq = intspec[0]; + *out_type = IRQ_TYPE_LEVEL_HIGH; + + return 0; +} + +static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return false; + + return irqchip_data->chanidx == fwspec->param[1]; +} + +static const struct irq_domain_ops imx_intmux_domain_ops = { + .map = imx_intmux_irq_map, + .xlate = imx_intmux_irq_xlate, + .select = imx_intmux_irq_select, +}; + +static void imx_intmux_irq_handler(struct irq_desc *desc) +{ + struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long irqstat; + int pos, virq; + + chained_irq_enter(irq_desc_get_chip(desc), desc); + + /* read the interrupt source pending status of this channel */ + irqstat = readl_relaxed(data->regs + CHANIPR(idx)); + + for_each_set_bit(pos, &irqstat, 32) { + virq = irq_find_mapping(irqchip_data->domain, pos); + if (virq) + generic_handle_irq(virq); + } + + chained_irq_exit(irq_desc_get_chip(desc), desc); +} + +static int imx_intmux_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct irq_domain *domain; + struct intmux_data *data; + int channum; + int i, ret; + + channum = platform_irq_count(pdev); + if (channum == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (channum > CHAN_MAX_NUM) { + dev_err(&pdev->dev, "supports up to %d multiplex channels\n", + CHAN_MAX_NUM); + return -EINVAL; + } + + data = devm_kzalloc(&pdev->dev, sizeof(*data) + + channum * sizeof(data->irqchip_data[0]), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->regs)) { + dev_err(&pdev->dev, "failed to initialize reg\n"); + return PTR_ERR(data->regs); + } + + data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(data->ipg_clk)) { + ret = PTR_ERR(data->ipg_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); + return ret; + } + + data->channum = channum; + raw_spin_lock_init(&data->lock); + + ret = clk_prepare_enable(data->ipg_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); + return ret; + } + + for (i = 0; i < channum; i++) { + data->irqchip_data[i].chanidx = i; + + data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); + if (data->irqchip_data[i].irq <= 0) { + ret = -EINVAL; + dev_err(&pdev->dev, "failed to get irq\n"); + goto out; + } + + domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, + &data->irqchip_data[i]); + if (!domain) { + ret = -ENOMEM; + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + goto out; + } + data->irqchip_data[i].domain = domain; + + /* disable all interrupt sources of this channel firstly */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + imx_intmux_irq_handler, + &data->irqchip_data[i]); + } + + platform_set_drvdata(pdev, data); + + return 0; +out: + clk_disable_unprepare(data->ipg_clk); + return ret; +} + +static int imx_intmux_remove(struct platform_device *pdev) +{ + struct intmux_data *data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < data->channum; i++) { + /* disable all interrupt sources of this channel */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + NULL, NULL); + + irq_domain_remove(data->irqchip_data[i].domain); + } + + clk_disable_unprepare(data->ipg_clk); + + return 0; +} + +static const struct of_device_id imx_intmux_id[] = { + { .compatible = "fsl,imx-intmux", }, + { /* sentinel */ }, +}; + +static struct platform_driver imx_intmux_driver = { + .driver = { + .name = "imx-intmux", + .of_match_table = imx_intmux_id, + }, + .probe = imx_intmux_probe, + .remove = imx_intmux_remove, +}; +builtin_platform_driver(imx_intmux_driver); diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 3f09f658e8e2..6b566bba263b 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = { .name = "Hisilicon MBIGEN-V2", .of_match_table = mbigen_of_match, .acpi_match_table = ACPI_PTR(mbigen_acpi_match), + .suppress_bind_attrs = true, }, .probe = mbigen_device_probe, }; diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 829084b568fa..ccc7f823911b 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -24,50 +24,101 @@ #define REG_PIN_47_SEL 0x08 #define REG_FILTER_SEL 0x0c +/* use for A1 like chips */ +#define REG_PIN_A1_SEL 0x04 + /* * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by * bits 24 to 31. Tests on the actual HW show that these bits are * stuck at 0. Bits 8 to 15 are responsive and have the expected * effect. */ -#define REG_EDGE_POL_EDGE(x) BIT(x) -#define REG_EDGE_POL_LOW(x) BIT(16 + (x)) -#define REG_BOTH_EDGE(x) BIT(8 + (x)) -#define REG_EDGE_POL_MASK(x) ( \ - REG_EDGE_POL_EDGE(x) | \ - REG_EDGE_POL_LOW(x) | \ - REG_BOTH_EDGE(x)) +#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x)) +#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x)) +#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x)) +#define REG_EDGE_POL_MASK(params, x) ( \ + REG_EDGE_POL_EDGE(params, x) | \ + REG_EDGE_POL_LOW(params, x) | \ + REG_BOTH_EDGE(params, x)) #define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) #define REG_FILTER_SEL_SHIFT(x) ((x) * 4) +struct meson_gpio_irq_controller; +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl); +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq); +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl); + +struct irq_ctl_ops { + void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); + void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl); +}; + struct meson_gpio_irq_params { unsigned int nr_hwirq; bool support_edge_both; + unsigned int edge_both_offset; + unsigned int edge_single_offset; + unsigned int pol_low_offset; + unsigned int pin_sel_mask; + struct irq_ctl_ops ops; }; +#define INIT_MESON_COMMON(irqs, init, sel) \ + .nr_hwirq = irqs, \ + .ops = { \ + .gpio_irq_init = init, \ + .gpio_irq_sel_pin = sel, \ + }, + +#define INIT_MESON8_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \ + meson8_gpio_irq_sel_pin) \ + .edge_single_offset = 0, \ + .pol_low_offset = 16, \ + .pin_sel_mask = 0xff, \ + +#define INIT_MESON_A1_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ + meson_a1_gpio_irq_sel_pin) \ + .support_edge_both = true, \ + .edge_both_offset = 16, \ + .edge_single_offset = 8, \ + .pol_low_offset = 0, \ + .pin_sel_mask = 0x7f, \ + static const struct meson_gpio_irq_params meson8_params = { - .nr_hwirq = 134, + INIT_MESON8_COMMON_DATA(134) }; static const struct meson_gpio_irq_params meson8b_params = { - .nr_hwirq = 119, + INIT_MESON8_COMMON_DATA(119) }; static const struct meson_gpio_irq_params gxbb_params = { - .nr_hwirq = 133, + INIT_MESON8_COMMON_DATA(133) }; static const struct meson_gpio_irq_params gxl_params = { - .nr_hwirq = 110, + INIT_MESON8_COMMON_DATA(110) }; static const struct meson_gpio_irq_params axg_params = { - .nr_hwirq = 100, + INIT_MESON8_COMMON_DATA(100) }; static const struct meson_gpio_irq_params sm1_params = { - .nr_hwirq = 100, + INIT_MESON8_COMMON_DATA(100) .support_edge_both = true, + .edge_both_offset = 8, +}; + +static const struct meson_gpio_irq_params a1_params = { + INIT_MESON_A1_COMMON_DATA(62) }; static const struct of_device_id meson_irq_gpio_matches[] = { @@ -78,6 +129,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = { { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, + { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, { } }; @@ -100,9 +152,43 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, writel_relaxed(tmp, ctl->base + reg); } -static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel) +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) +{ +} + +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq) +{ + unsigned int reg_offset; + unsigned int bit_offset; + + reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; + bit_offset = REG_PIN_SEL_SHIFT(channel); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq) { - return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; + unsigned int reg_offset; + unsigned int bit_offset; + + bit_offset = ((channel % 2) == 0) ? 0 : 16; + reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +/* For a1 or later chips like a1 there is a switch to enable/disable irq */ +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl) +{ + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31)); } static int @@ -110,7 +196,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, unsigned long hwirq, u32 **channel_hwirq) { - unsigned int reg, idx; + unsigned int idx; spin_lock(&ctl->lock); @@ -129,10 +215,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, * Setup the mux of the channel to route the signal of the pad * to the appropriate input of the GIC */ - reg = meson_gpio_irq_channel_to_reg(idx); - meson_gpio_irq_update_bits(ctl, reg, - 0xff << REG_PIN_SEL_SHIFT(idx), - hwirq << REG_PIN_SEL_SHIFT(idx)); + ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq); /* * Get the hwirq number assigned to this channel through @@ -173,7 +256,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, { u32 val = 0; unsigned int idx; + const struct meson_gpio_irq_params *params; + params = ctl->params; idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); /* @@ -190,22 +275,22 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, * precedence over the other edge/polarity settings */ if (type == IRQ_TYPE_EDGE_BOTH) { - if (!ctl->params->support_edge_both) + if (!params->support_edge_both) return -EINVAL; - val |= REG_BOTH_EDGE(idx); + val |= REG_BOTH_EDGE(params, idx); } else { if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_EDGE(idx); + val |= REG_EDGE_POL_EDGE(params, idx); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_LOW(idx); + val |= REG_EDGE_POL_LOW(params, idx); } spin_lock(&ctl->lock); meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, - REG_EDGE_POL_MASK(idx), val); + REG_EDGE_POL_MASK(params, idx), val); spin_unlock(&ctl->lock); @@ -371,6 +456,8 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node, return ret; } + ctl->params->ops.gpio_irq_init(ctl); + return 0; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index f3985469c221..d70507133c1d 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -716,7 +716,7 @@ static int __init gic_of_init(struct device_node *node, __sync(); } - mips_gic_base = ioremap_nocache(gic_base, gic_len); + mips_gic_base = ioremap(gic_base, gic_len); gicconfig = read_gic_config(); gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index a166d30deea2..f747e2209ea9 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -45,17 +45,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) handle_IRQ(irq, regs); } -static int nvic_irq_domain_translate(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *hwirq, unsigned int *type) -{ - if (WARN_ON(fwspec->param_count < 1)) - return -EINVAL; - *hwirq = fwspec->param[0]; - *type = IRQ_TYPE_NONE; - return 0; -} - static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -64,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = arg; - ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type); + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); if (ret) return ret; @@ -75,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops nvic_irq_domain_ops = { - .translate = nvic_irq_domain_translate, + .translate = irq_domain_translate_onecell, .alloc = nvic_irq_domain_alloc, .free = irq_domain_free_irqs_top, }; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index f82bc60a6793..6e5e3172796b 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -460,7 +460,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) goto err0; } - i->iomem = devm_ioremap_nocache(dev, io[k]->start, + i->iomem = devm_ioremap(dev, io[k]->start, resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 0aca5807a119..aa4af886e43a 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -154,15 +154,37 @@ static struct irq_chip plic_chip = { static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq); - irq_set_chip_data(irq, NULL); + irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); irq_set_noprobe(irq); return 0; } +static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type; + struct irq_fwspec *fwspec = arg; + + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = plic_irqdomain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + static const struct irq_domain_ops plic_irqdomain_ops = { - .map = plic_irqdomain_map, - .xlate = irq_domain_xlate_onecell, + .translate = irq_domain_translate_onecell, + .alloc = plic_irq_domain_alloc, + .free = irq_domain_free_irqs_top, }; static struct irq_domain *plic_irqdomain; diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h index 9534503b69d9..47b67c6bff7a 100644 --- a/drivers/lightnvm/pblk-trace.h +++ b/drivers/lightnvm/pblk-trace.h @@ -46,7 +46,7 @@ TRACE_EVENT(pblk_chunk_reset, TP_STRUCT__entry( __string(name, name) __field(u64, ppa) - __field(int, state); + __field(int, state) ), TP_fast_assign( @@ -72,7 +72,7 @@ TRACE_EVENT(pblk_chunk_state, TP_STRUCT__entry( __string(name, name) __field(u64, ppa) - __field(int, state); + __field(int, state) ), TP_fast_assign( @@ -98,7 +98,7 @@ TRACE_EVENT(pblk_line_state, TP_STRUCT__entry( __string(name, name) __field(int, line) - __field(int, state); + __field(int, state) ), TP_fast_assign( @@ -121,7 +121,7 @@ TRACE_EVENT(pblk_state, TP_STRUCT__entry( __string(name, name) - __field(int, state); + __field(int, state) ), TP_fast_assign( diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 9198c1b480d9..adf26a21fcd1 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -301,6 +301,7 @@ struct cached_dev { struct block_device *bdev; struct cache_sb sb; + struct cache_sb_disk *sb_disk; struct bio sb_bio; struct bio_vec sb_bv[1]; struct closure sb_write; @@ -403,6 +404,7 @@ enum alloc_reserve { struct cache { struct cache_set *set; struct cache_sb sb; + struct cache_sb_disk *sb_disk; struct bio sb_bio; struct bio_vec sb_bv[1]; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index cffcdc9feefb..4385303836d8 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1257,6 +1257,11 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, * Our temporary buffer is the same size as the btree node's * buffer, we can just swap buffers instead of doing a big * memcpy() + * + * Don't worry event 'out' is allocated from mempool, it can + * still be swapped here. Because state->pool is a page mempool + * creaated by by mempool_init_page_pool(), which allocates + * pages by alloc_pages() indeed. */ out->magic = b->set->data->magic; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 14d6c33b0957..fa872df4e770 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -734,34 +734,32 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, i = 0; btree_cache_used = c->btree_cache_used; - list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { + list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { if (nr <= 0) goto out; - if (++i > 3 && - !mca_reap(b, 0, false)) { + if (!mca_reap(b, 0, false)) { mca_data_free(b); rw_unlock(true, b); freed++; } nr--; + i++; } - for (; (nr--) && i < btree_cache_used; i++) { - if (list_empty(&c->btree_cache)) + list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { + if (nr <= 0 || i >= btree_cache_used) goto out; - b = list_first_entry(&c->btree_cache, struct btree, list); - list_rotate_left(&c->btree_cache); - - if (!b->accessed && - !mca_reap(b, 0, false)) { + if (!mca_reap(b, 0, false)) { mca_bucket_free(b); mca_data_free(b); rw_unlock(true, b); freed++; - } else - b->accessed = 0; + } + + nr--; + i++; } out: mutex_unlock(&c->bucket_lock); @@ -1069,7 +1067,6 @@ retry: BUG_ON(!b->written); b->parent = parent; - b->accessed = 1; for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { prefetch(b->keys.set[i].tree); @@ -1160,7 +1157,6 @@ retry: goto retry; } - b->accessed = 1; b->parent = parent; bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 76cfd121a486..f4dcca449391 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -121,8 +121,6 @@ struct btree { /* Key/pointer for this btree node */ BKEY_PADDED(key); - /* Single bit - set when accessed, cleared by shrinker */ - unsigned long accessed; unsigned long seq; struct rw_semaphore lock; struct cache_set *c; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index be2a2a201603..33ddc5269e8d 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -417,10 +417,14 @@ err: /* Journalling */ +#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask)) + static void btree_flush_write(struct cache_set *c) { struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR]; - unsigned int i, n; + unsigned int i, nr, ref_nr; + atomic_t *fifo_front_p, *now_fifo_front_p; + size_t mask; if (c->journal.btree_flushing) return; @@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c) c->journal.btree_flushing = true; spin_unlock(&c->journal.flush_write_lock); + /* get the oldest journal entry and check its refcount */ + spin_lock(&c->journal.lock); + fifo_front_p = &fifo_front(&c->journal.pin); + ref_nr = atomic_read(fifo_front_p); + if (ref_nr <= 0) { + /* + * do nothing if no btree node references + * the oldest journal entry + */ + spin_unlock(&c->journal.lock); + goto out; + } + spin_unlock(&c->journal.lock); + + mask = c->journal.pin.mask; + nr = 0; atomic_long_inc(&c->flush_write); memset(btree_nodes, 0, sizeof(btree_nodes)); - n = 0; mutex_lock(&c->bucket_lock); list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { + /* + * It is safe to get now_fifo_front_p without holding + * c->journal.lock here, because we don't need to know + * the exactly accurate value, just check whether the + * front pointer of c->journal.pin is changed. + */ + now_fifo_front_p = &fifo_front(&c->journal.pin); + /* + * If the oldest journal entry is reclaimed and front + * pointer of c->journal.pin changes, it is unnecessary + * to scan c->btree_cache anymore, just quit the loop and + * flush out what we have already. + */ + if (now_fifo_front_p != fifo_front_p) + break; + /* + * quit this loop if all matching btree nodes are + * scanned and record in btree_nodes[] already. + */ + ref_nr = atomic_read(fifo_front_p); + if (nr >= ref_nr) + break; + if (btree_node_journal_flush(b)) pr_err("BUG: flush_write bit should not be set here!"); @@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c) continue; } + /* + * Only select the btree node which exactly references + * the oldest journal entry. + * + * If the journal entry pointed by fifo_front_p is + * reclaimed in parallel, don't worry: + * - the list_for_each_xxx loop will quit when checking + * next now_fifo_front_p. + * - If there are matched nodes recorded in btree_nodes[], + * they are clean now (this is why and how the oldest + * journal entry can be reclaimed). These selected nodes + * will be ignored and skipped in the folowing for-loop. + */ + if (nr_to_fifo_front(btree_current_write(b)->journal, + fifo_front_p, + mask) != 0) { + mutex_unlock(&b->write_lock); + continue; + } + set_btree_node_journal_flush(b); mutex_unlock(&b->write_lock); - btree_nodes[n++] = b; - if (n == BTREE_FLUSH_NR) + btree_nodes[nr++] = b; + /* + * To avoid holding c->bucket_lock too long time, + * only scan for BTREE_FLUSH_NR matched btree nodes + * at most. If there are more btree nodes reference + * the oldest journal entry, try to flush them next + * time when btree_flush_write() is called. + */ + if (nr == BTREE_FLUSH_NR) break; } mutex_unlock(&c->bucket_lock); - for (i = 0; i < n; i++) { + for (i = 0; i < nr; i++) { b = btree_nodes[i]; if (!b) { pr_err("BUG: btree_nodes[%d] is NULL", i); @@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c) mutex_unlock(&b->write_lock); } +out: spin_lock(&c->journal.flush_write_lock); c->journal.btree_flushing = false; spin_unlock(&c->journal.flush_write_lock); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 77e9869345e7..3dea1d5acd5c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -15,7 +15,6 @@ #include "writeback.h" #include <linux/blkdev.h> -#include <linux/buffer_head.h> #include <linux/debugfs.h> #include <linux/genhd.h> #include <linux/idr.h> @@ -60,17 +59,18 @@ struct workqueue_struct *bch_journal_wq; /* Superblock */ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, - struct page **res) + struct cache_sb_disk **res) { const char *err; - struct cache_sb *s; - struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); + struct cache_sb_disk *s; + struct page *page; unsigned int i; - if (!bh) + page = read_cache_page_gfp(bdev->bd_inode->i_mapping, + SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); + if (IS_ERR(page)) return "IO error"; - - s = (struct cache_sb *) bh->b_data; + s = page_address(page) + offset_in_page(SB_OFFSET); sb->offset = le64_to_cpu(s->offset); sb->version = le64_to_cpu(s->version); @@ -188,12 +188,10 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, } sb->last_mount = (u32)ktime_get_real_seconds(); - err = NULL; - - get_page(bh->b_page); - *res = bh->b_page; + *res = s; + return NULL; err: - put_bh(bh); + put_page(page); return err; } @@ -207,15 +205,15 @@ static void write_bdev_super_endio(struct bio *bio) closure_put(&dc->sb_write); } -static void __write_super(struct cache_sb *sb, struct bio *bio) +static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, + struct bio *bio) { - struct cache_sb *out = page_address(bio_first_page_all(bio)); unsigned int i; + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; bio->bi_iter.bi_sector = SB_SECTOR; - bio->bi_iter.bi_size = SB_SIZE; - bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); - bch_bio_map(bio, NULL); + __bio_add_page(bio, virt_to_page(out), SB_SIZE, + offset_in_page(out)); out->offset = cpu_to_le64(sb->offset); out->version = cpu_to_le64(sb->version); @@ -257,14 +255,14 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) down(&dc->sb_write_mutex); closure_init(cl, parent); - bio_reset(bio); + bio_init(bio, dc->sb_bv, 1); bio_set_dev(bio, dc->bdev); bio->bi_end_io = write_bdev_super_endio; bio->bi_private = dc; closure_get(cl); /* I/O request sent to backing device */ - __write_super(&dc->sb, bio); + __write_super(&dc->sb, dc->sb_disk, bio); closure_return_with_destructor(cl, bch_write_bdev_super_unlock); } @@ -306,13 +304,13 @@ void bcache_write_super(struct cache_set *c) SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); - bio_reset(bio); + bio_init(bio, ca->sb_bv, 1); bio_set_dev(bio, ca->bdev); bio->bi_end_io = write_super_endio; bio->bi_private = ca; closure_get(cl); - __write_super(&ca->sb, bio); + __write_super(&ca->sb, ca->sb_disk, bio); } closure_return_with_destructor(cl, bcache_write_super_unlock); @@ -1275,6 +1273,9 @@ static void cached_dev_free(struct closure *cl) mutex_unlock(&bch_register_lock); + if (dc->sb_disk) + put_page(virt_to_page(dc->sb_disk)); + if (!IS_ERR_OR_NULL(dc->bdev)) blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); @@ -1350,7 +1351,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) /* Cached device - bcache superblock */ -static int register_bdev(struct cache_sb *sb, struct page *sb_page, +static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, struct block_device *bdev, struct cached_dev *dc) { @@ -1362,11 +1363,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page, memcpy(&dc->sb, sb, sizeof(struct cache_sb)); dc->bdev = bdev; dc->bdev->bd_holder = dc; - - bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); - bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; - get_page(sb_page); - + dc->sb_disk = sb_disk; if (cached_dev_init(dc, sb->block_size << 9)) goto err; @@ -2136,8 +2133,8 @@ void bch_cache_release(struct kobject *kobj) for (i = 0; i < RESERVE_NR; i++) free_fifo(&ca->free[i]); - if (ca->sb_bio.bi_inline_vecs[0].bv_page) - put_page(bio_first_page_all(&ca->sb_bio)); + if (ca->sb_disk) + put_page(virt_to_page(ca->sb_disk)); if (!IS_ERR_OR_NULL(ca->bdev)) blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); @@ -2259,7 +2256,7 @@ err_free: return ret; } -static int register_cache(struct cache_sb *sb, struct page *sb_page, +static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, struct block_device *bdev, struct cache *ca) { const char *err = NULL; /* must be set for any error case */ @@ -2269,10 +2266,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; - - bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); - bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; - get_page(sb_page); + ca->sb_disk = sb_disk; if (blk_queue_discard(bdev_get_queue(bdev))) ca->discard = CACHE_DISCARD(&ca->sb); @@ -2372,29 +2366,35 @@ static bool bch_is_open(struct block_device *bdev) static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, const char *buffer, size_t size) { - ssize_t ret = -EINVAL; - const char *err = "cannot allocate memory"; + const char *err; char *path = NULL; - struct cache_sb *sb = NULL; - struct block_device *bdev = NULL; - struct page *sb_page = NULL; + struct cache_sb *sb; + struct cache_sb_disk *sb_disk; + struct block_device *bdev; + ssize_t ret; + ret = -EBUSY; + err = "failed to reference bcache module"; if (!try_module_get(THIS_MODULE)) - return -EBUSY; + goto out; /* For latest state of bcache_is_reboot */ smp_mb(); + err = "bcache is in reboot"; if (bcache_is_reboot) - return -EBUSY; + goto out_module_put; + ret = -ENOMEM; + err = "cannot allocate memory"; path = kstrndup(buffer, size, GFP_KERNEL); if (!path) - goto err; + goto out_module_put; sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); if (!sb) - goto err; + goto out_free_path; + ret = -EINVAL; err = "failed to open device"; bdev = blkdev_get_by_path(strim(path), FMODE_READ|FMODE_WRITE|FMODE_EXCL, @@ -2411,57 +2411,63 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!IS_ERR(bdev)) bdput(bdev); if (attr == &ksysfs_register_quiet) - goto quiet_out; + goto done; } - goto err; + goto out_free_sb; } err = "failed to set blocksize"; if (set_blocksize(bdev, 4096)) - goto err_close; + goto out_blkdev_put; - err = read_super(sb, bdev, &sb_page); + err = read_super(sb, bdev, &sb_disk); if (err) - goto err_close; + goto out_blkdev_put; err = "failed to register device"; if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); if (!dc) - goto err_close; + goto out_put_sb_page; mutex_lock(&bch_register_lock); - ret = register_bdev(sb, sb_page, bdev, dc); + ret = register_bdev(sb, sb_disk, bdev, dc); mutex_unlock(&bch_register_lock); /* blkdev_put() will be called in cached_dev_free() */ if (ret < 0) - goto err; + goto out_free_sb; } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) - goto err_close; + goto out_put_sb_page; /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(sb, sb_page, bdev, ca) != 0) - goto err; + if (register_cache(sb, sb_disk, bdev, ca) != 0) + goto out_free_sb; } -quiet_out: - ret = size; -out: - if (sb_page) - put_page(sb_page); + +done: kfree(sb); kfree(path); module_put(THIS_MODULE); - return ret; + return size; -err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -err: - pr_info("error %s: %s", path, err); - goto out; +out_put_sb_page: + put_page(virt_to_page(sb_disk)); +out_blkdev_put: + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); +out_free_sb: + kfree(sb); +out_free_path: + kfree(path); + path = NULL; +out_module_put: + module_put(THIS_MODULE); +out: + pr_info("error %s: %s", path?path:"", err); + return ret; } diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 3ad18246fcb3..e230052c2107 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1019,8 +1019,6 @@ void md_bitmap_unplug(struct bitmap *bitmap) /* look at each page to see if there are any set bits that need to be * flushed out to disk */ for (i = 0; i < bitmap->storage.file_pages; i++) { - if (!bitmap->storage.filemap) - return; dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); need_write = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); @@ -1338,7 +1336,8 @@ void md_bitmap_daemon_work(struct mddev *mddev) BITMAP_PAGE_DIRTY)) /* bitmap_unplug will handle the rest */ break; - if (test_and_clear_page_attr(bitmap, j, + if (bitmap->storage.filemap && + test_and_clear_page_attr(bitmap, j, BITMAP_PAGE_NEEDWRITE)) { write_page(bitmap, bitmap->storage.filemap[j], 0); } @@ -1790,8 +1789,8 @@ void md_bitmap_destroy(struct mddev *mddev) return; md_bitmap_wait_behind_writes(mddev); - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; + if (!mddev->serialize_policy) + mddev_destroy_serial_pool(mddev, NULL, true); mutex_lock(&mddev->bitmap_info.mutex); spin_lock(&mddev->lock); @@ -1908,7 +1907,7 @@ int md_bitmap_load(struct mddev *mddev) goto out; rdev_for_each(rdev, mddev) - mddev_create_wb_pool(mddev, rdev, true); + mddev_create_serial_pool(mddev, rdev, true); if (mddev_is_clustered(mddev)) md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); @@ -2475,16 +2474,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len) if (backlog > COUNTER_MAX) return -EINVAL; mddev->bitmap_info.max_write_behind = backlog; - if (!backlog && mddev->wb_info_pool) { - /* wb_info_pool is not needed if backlog is zero */ - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; - } else if (backlog && !mddev->wb_info_pool) { - /* wb_info_pool is needed since backlog is not zero */ + if (!backlog && mddev->serial_info_pool) { + /* serial_info_pool is not needed if backlog is zero */ + if (!mddev->serialize_policy) + mddev_destroy_serial_pool(mddev, NULL, false); + } else if (backlog && !mddev->serial_info_pool) { + /* serial_info_pool is needed since backlog is not zero */ struct md_rdev *rdev; rdev_for_each(rdev, mddev) - mddev_create_wb_pool(mddev, rdev, false); + mddev_create_serial_pool(mddev, rdev, false); } if (old_mwb != backlog) md_bitmap_update_sb(mddev->bitmap); diff --git a/drivers/md/md.c b/drivers/md/md.c index 4e7c9f398bc6..4824d50526fa 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -125,74 +125,165 @@ static inline int speed_max(struct mddev *mddev) mddev->sync_speed_max : sysctl_speed_limit_max; } -static int rdev_init_wb(struct md_rdev *rdev) +static void rdev_uninit_serial(struct md_rdev *rdev) { - if (rdev->bdev->bd_queue->nr_hw_queues == 1) + if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) + return; + + kvfree(rdev->serial); + rdev->serial = NULL; +} + +static void rdevs_uninit_serial(struct mddev *mddev) +{ + struct md_rdev *rdev; + + rdev_for_each(rdev, mddev) + rdev_uninit_serial(rdev); +} + +static int rdev_init_serial(struct md_rdev *rdev) +{ + /* serial_nums equals with BARRIER_BUCKETS_NR */ + int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); + struct serial_in_rdev *serial = NULL; + + if (test_bit(CollisionCheck, &rdev->flags)) return 0; - spin_lock_init(&rdev->wb_list_lock); - INIT_LIST_HEAD(&rdev->wb_list); - init_waitqueue_head(&rdev->wb_io_wait); - set_bit(WBCollisionCheck, &rdev->flags); + serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, + GFP_KERNEL); + if (!serial) + return -ENOMEM; - return 1; + for (i = 0; i < serial_nums; i++) { + struct serial_in_rdev *serial_tmp = &serial[i]; + + spin_lock_init(&serial_tmp->serial_lock); + serial_tmp->serial_rb = RB_ROOT_CACHED; + init_waitqueue_head(&serial_tmp->serial_io_wait); + } + + rdev->serial = serial; + set_bit(CollisionCheck, &rdev->flags); + + return 0; +} + +static int rdevs_init_serial(struct mddev *mddev) +{ + struct md_rdev *rdev; + int ret = 0; + + rdev_for_each(rdev, mddev) { + ret = rdev_init_serial(rdev); + if (ret) + break; + } + + /* Free all resources if pool is not existed */ + if (ret && !mddev->serial_info_pool) + rdevs_uninit_serial(mddev); + + return ret; } /* - * Create wb_info_pool if rdev is the first multi-queue device flaged - * with writemostly, also write-behind mode is enabled. + * rdev needs to enable serial stuffs if it meets the conditions: + * 1. it is multi-queue device flaged with writemostly. + * 2. the write-behind mode is enabled. */ -void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, - bool is_suspend) +static int rdev_need_serial(struct md_rdev *rdev) { - if (mddev->bitmap_info.max_write_behind == 0) - return; + return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && + rdev->bdev->bd_queue->nr_hw_queues != 1 && + test_bit(WriteMostly, &rdev->flags)); +} + +/* + * Init resource for rdev(s), then create serial_info_pool if: + * 1. rdev is the first device which return true from rdev_enable_serial. + * 2. rdev is NULL, means we want to enable serialization for all rdevs. + */ +void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend) +{ + int ret = 0; - if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev)) + if (rdev && !rdev_need_serial(rdev) && + !test_bit(CollisionCheck, &rdev->flags)) return; - if (mddev->wb_info_pool == NULL) { + if (!is_suspend) + mddev_suspend(mddev); + + if (!rdev) + ret = rdevs_init_serial(mddev); + else + ret = rdev_init_serial(rdev); + if (ret) + goto abort; + + if (mddev->serial_info_pool == NULL) { unsigned int noio_flag; - if (!is_suspend) - mddev_suspend(mddev); noio_flag = memalloc_noio_save(); - mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS, - sizeof(struct wb_info)); + mddev->serial_info_pool = + mempool_create_kmalloc_pool(NR_SERIAL_INFOS, + sizeof(struct serial_info)); memalloc_noio_restore(noio_flag); - if (!mddev->wb_info_pool) - pr_err("can't alloc memory pool for writemostly\n"); - if (!is_suspend) - mddev_resume(mddev); + if (!mddev->serial_info_pool) { + rdevs_uninit_serial(mddev); + pr_err("can't alloc memory pool for serialization\n"); + } } + +abort: + if (!is_suspend) + mddev_resume(mddev); } -EXPORT_SYMBOL_GPL(mddev_create_wb_pool); /* - * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck. + * Free resource from rdev(s), and destroy serial_info_pool under conditions: + * 1. rdev is the last device flaged with CollisionCheck. + * 2. when bitmap is destroyed while policy is not enabled. + * 3. for disable policy, the pool is destroyed only when no rdev needs it. */ -static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev) +void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend) { - if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags)) + if (rdev && !test_bit(CollisionCheck, &rdev->flags)) return; - if (mddev->wb_info_pool) { + if (mddev->serial_info_pool) { struct md_rdev *temp; - int num = 0; + int num = 0; /* used to track if other rdevs need the pool */ - /* - * Check if other rdevs need wb_info_pool. - */ - rdev_for_each(temp, mddev) - if (temp != rdev && - test_bit(WBCollisionCheck, &temp->flags)) + if (!is_suspend) + mddev_suspend(mddev); + rdev_for_each(temp, mddev) { + if (!rdev) { + if (!mddev->serialize_policy || + !rdev_need_serial(temp)) + rdev_uninit_serial(temp); + else + num++; + } else if (temp != rdev && + test_bit(CollisionCheck, &temp->flags)) num++; - if (!num) { - mddev_suspend(rdev->mddev); - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; - mddev_resume(rdev->mddev); } + + if (rdev) + rdev_uninit_serial(rdev); + + if (num) + pr_info("The mempool could be used by other devices\n"); + else { + mempool_destroy(mddev->serial_info_pool); + mddev->serial_info_pool = NULL; + } + if (!is_suspend) + mddev_resume(mddev); } } @@ -2337,7 +2428,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) pr_debug("md: bind<%s>\n", b); if (mddev->raid_disks) - mddev_create_wb_pool(mddev, rdev, false); + mddev_create_serial_pool(mddev, rdev, false); if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) goto fail; @@ -2375,7 +2466,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev) bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); list_del_rcu(&rdev->same_set); pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); - mddev_destroy_wb_pool(rdev->mddev, rdev); + mddev_destroy_serial_pool(rdev->mddev, rdev, false); rdev->mddev = NULL; sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); @@ -2888,10 +2979,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) } } else if (cmd_match(buf, "writemostly")) { set_bit(WriteMostly, &rdev->flags); - mddev_create_wb_pool(rdev->mddev, rdev, false); + mddev_create_serial_pool(rdev->mddev, rdev, false); err = 0; } else if (cmd_match(buf, "-writemostly")) { - mddev_destroy_wb_pool(rdev->mddev, rdev); + mddev_destroy_serial_pool(rdev->mddev, rdev, false); clear_bit(WriteMostly, &rdev->flags); err = 0; } else if (cmd_match(buf, "blocked")) { @@ -5277,6 +5368,57 @@ static struct md_sysfs_entry md_fail_last_dev = __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, fail_last_dev_store); +static ssize_t serialize_policy_show(struct mddev *mddev, char *page) +{ + if (mddev->pers == NULL || (mddev->pers->level != 1)) + return sprintf(page, "n/a\n"); + else + return sprintf(page, "%d\n", mddev->serialize_policy); +} + +/* + * Setting serialize_policy to true to enforce write IO is not reordered + * for raid1. + */ +static ssize_t +serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) +{ + int err; + bool value; + + err = kstrtobool(buf, &value); + if (err) + return err; + + if (value == mddev->serialize_policy) + return len; + + err = mddev_lock(mddev); + if (err) + return err; + if (mddev->pers == NULL || (mddev->pers->level != 1)) { + pr_err("md: serialize_policy is only effective for raid1\n"); + err = -EINVAL; + goto unlock; + } + + mddev_suspend(mddev); + if (value) + mddev_create_serial_pool(mddev, NULL, true); + else + mddev_destroy_serial_pool(mddev, NULL, true); + mddev->serialize_policy = value; + mddev_resume(mddev); +unlock: + mddev_unlock(mddev); + return err ?: len; +} + +static struct md_sysfs_entry md_serialize_policy = +__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, + serialize_policy_store); + + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_layout.attr, @@ -5294,6 +5436,7 @@ static struct attribute *md_default_attrs[] = { &max_corr_read_errors.attr, &md_consistency_policy.attr, &md_fail_last_dev.attr, + &md_serialize_policy.attr, NULL, }; @@ -5769,18 +5912,18 @@ int md_run(struct mddev *mddev) goto bitmap_abort; if (mddev->bitmap_info.max_write_behind > 0) { - bool creat_pool = false; + bool create_pool = false; rdev_for_each(rdev, mddev) { if (test_bit(WriteMostly, &rdev->flags) && - rdev_init_wb(rdev)) - creat_pool = true; - } - if (creat_pool && mddev->wb_info_pool == NULL) { - mddev->wb_info_pool = - mempool_create_kmalloc_pool(NR_WB_INFOS, - sizeof(struct wb_info)); - if (!mddev->wb_info_pool) { + rdev_init_serial(rdev)) + create_pool = true; + } + if (create_pool && mddev->serial_info_pool == NULL) { + mddev->serial_info_pool = + mempool_create_kmalloc_pool(NR_SERIAL_INFOS, + sizeof(struct serial_info)); + if (!mddev->serial_info_pool) { err = -ENOMEM; goto bitmap_abort; } @@ -6025,8 +6168,9 @@ static void __md_stop_writes(struct mddev *mddev) mddev->in_sync = 1; md_update_sb(mddev, 1); } - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; + /* disable policy to guarantee rdevs free resources for serialization */ + mddev->serialize_policy = 0; + mddev_destroy_serial_pool(mddev, NULL, true); } void md_stop_writes(struct mddev *mddev) diff --git a/drivers/md/md.h b/drivers/md/md.h index 5f86f8adb0a4..acd681939112 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -32,6 +32,16 @@ * be retried. */ #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) + +/* + * The struct embedded in rdev is used to serialize IO. + */ +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + /* * MD's 'extended' device */ @@ -110,12 +120,7 @@ struct md_rdev { * in superblock. */ - /* - * The members for check collision of write behind IOs. - */ - struct list_head wb_list; - spinlock_t wb_list_lock; - wait_queue_head_t wb_io_wait; + struct serial_in_rdev *serial; /* used for raid1 io serialization */ struct work_struct del_work; /* used for delayed sysfs removal */ @@ -201,9 +206,9 @@ enum flag_bits { * it didn't fail, so don't use FailFast * any more for metadata */ - WBCollisionCheck, /* - * multiqueue device should check if there - * is collision between write behind bios. + CollisionCheck, /* + * check if there is collision between raid1 + * serial bios. */ }; @@ -263,12 +268,13 @@ enum mddev_sb_flags { MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ }; -#define NR_WB_INFOS 8 -/* record current range of write behind IOs */ -struct wb_info { - sector_t lo; - sector_t hi; - struct list_head list; +#define NR_SERIAL_INFOS 8 +/* record current range of serialize IOs */ +struct serial_info { + struct rb_node node; + sector_t start; /* start sector of rb node */ + sector_t last; /* end sector of rb node */ + sector_t _subtree_last; /* highest sector in subtree of rb node */ }; struct mddev { @@ -487,13 +493,14 @@ struct mddev { */ struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ - mempool_t *wb_info_pool; + mempool_t *serial_info_pool; void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; unsigned int good_device_nr; /* good device num within cluster raid */ bool has_superblocks:1; bool fail_last_dev:1; + bool serialize_policy:1; }; enum recovery_flags { @@ -737,8 +744,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); extern void md_kick_rdev_from_array(struct md_rdev * rdev); -extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, - bool is_suspend); +extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend); +extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 201fd8aec59a..cd810e195086 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> +#include <linux/interval_tree_generic.h> #include <trace/events/block.h> @@ -50,55 +51,71 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr); #include "raid1-10.c" -static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) +INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last, + START, LAST, static inline, raid1_rb); + +static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio, + struct serial_info *si, int idx) { - struct wb_info *wi, *temp_wi; unsigned long flags; int ret = 0; - struct mddev *mddev = rdev->mddev; - - wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO); - - spin_lock_irqsave(&rdev->wb_list_lock, flags); - list_for_each_entry(temp_wi, &rdev->wb_list, list) { - /* collision happened */ - if (hi > temp_wi->lo && lo < temp_wi->hi) { - ret = -EBUSY; - break; - } + sector_t lo = r1_bio->sector; + sector_t hi = lo + r1_bio->sectors; + struct serial_in_rdev *serial = &rdev->serial[idx]; + + spin_lock_irqsave(&serial->serial_lock, flags); + /* collision happened */ + if (raid1_rb_iter_first(&serial->serial_rb, lo, hi)) + ret = -EBUSY; + else { + si->start = lo; + si->last = hi; + raid1_rb_insert(si, &serial->serial_rb); } - - if (!ret) { - wi->lo = lo; - wi->hi = hi; - list_add(&wi->list, &rdev->wb_list); - } else - mempool_free(wi, mddev->wb_info_pool); - spin_unlock_irqrestore(&rdev->wb_list_lock, flags); + spin_unlock_irqrestore(&serial->serial_lock, flags); return ret; } -static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) +static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio) +{ + struct mddev *mddev = rdev->mddev; + struct serial_info *si; + int idx = sector_to_idx(r1_bio->sector); + struct serial_in_rdev *serial = &rdev->serial[idx]; + + if (WARN_ON(!mddev->serial_info_pool)) + return; + si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); + wait_event(serial->serial_io_wait, + check_and_add_serial(rdev, r1_bio, si, idx) == 0); +} + +static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi) { - struct wb_info *wi; + struct serial_info *si; unsigned long flags; int found = 0; struct mddev *mddev = rdev->mddev; - - spin_lock_irqsave(&rdev->wb_list_lock, flags); - list_for_each_entry(wi, &rdev->wb_list, list) - if (hi == wi->hi && lo == wi->lo) { - list_del(&wi->list); - mempool_free(wi, mddev->wb_info_pool); + int idx = sector_to_idx(lo); + struct serial_in_rdev *serial = &rdev->serial[idx]; + + spin_lock_irqsave(&serial->serial_lock, flags); + for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi); + si; si = raid1_rb_iter_next(si, lo, hi)) { + if (si->start == lo && si->last == hi) { + raid1_rb_remove(si, &serial->serial_rb); + mempool_free(si, mddev->serial_info_pool); found = 1; break; } - + } if (!found) - WARN(1, "The write behind IO is not recorded\n"); - spin_unlock_irqrestore(&rdev->wb_list_lock, flags); - wake_up(&rdev->wb_io_wait); + WARN(1, "The write IO is not recorded for serialization\n"); + spin_unlock_irqrestore(&serial->serial_lock, flags); + wake_up(&serial->serial_io_wait); } /* @@ -430,6 +447,8 @@ static void raid1_end_write_request(struct bio *bio) int mirror = find_bio_disk(r1_bio, bio); struct md_rdev *rdev = conf->mirrors[mirror].rdev; bool discard_error; + sector_t lo = r1_bio->sector; + sector_t hi = r1_bio->sector + r1_bio->sectors; discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; @@ -499,12 +518,8 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { - if (test_bit(WBCollisionCheck, &rdev->flags)) { - sector_t lo = r1_bio->sector; - sector_t hi = r1_bio->sector + r1_bio->sectors; - - remove_wb(rdev, lo, hi); - } + if (test_bit(CollisionCheck, &rdev->flags)) + remove_serial(rdev, lo, hi); if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); @@ -527,7 +542,8 @@ static void raid1_end_write_request(struct bio *bio) call_bio_endio(r1_bio); } } - } + } else if (rdev->mddev->serialize_policy) + remove_serial(rdev, lo, hi); if (r1_bio->bios[mirror] == NULL) rdev_dec_pending(rdev, conf->mddev); @@ -1479,6 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, for (i = 0; i < disks; i++) { struct bio *mbio = NULL; + struct md_rdev *rdev = conf->mirrors[i].rdev; if (!r1_bio->bios[i]) continue; @@ -1506,18 +1523,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); if (r1_bio->behind_master_bio) { - struct md_rdev *rdev = conf->mirrors[i].rdev; - - if (test_bit(WBCollisionCheck, &rdev->flags)) { - sector_t lo = r1_bio->sector; - sector_t hi = r1_bio->sector + r1_bio->sectors; - - wait_event(rdev->wb_io_wait, - check_and_add_wb(rdev, lo, hi) == 0); - } + if (test_bit(CollisionCheck, &rdev->flags)) + wait_for_serialization(rdev, r1_bio); if (test_bit(WriteMostly, &rdev->flags)) atomic_inc(&r1_bio->behind_remaining); - } + } else if (mddev->serialize_policy) + wait_for_serialization(rdev, r1_bio); r1_bio->bios[i] = mbio; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d4d3b67ffbba..ba00e9877f02 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6598,7 +6598,6 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page) static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, - int *worker_cnt_per_group, struct r5worker_group **worker_groups); static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) @@ -6607,7 +6606,7 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) unsigned int new; int err; struct r5worker_group *new_groups, *old_groups; - int group_cnt, worker_cnt_per_group; + int group_cnt; if (len >= PAGE_SIZE) return -EINVAL; @@ -6630,13 +6629,11 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) if (old_groups) flush_workqueue(raid5_wq); - err = alloc_thread_groups(conf, new, - &group_cnt, &worker_cnt_per_group, - &new_groups); + err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); if (!err) { spin_lock_irq(&conf->device_lock); conf->group_cnt = group_cnt; - conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_cnt_per_group = new; conf->worker_groups = new_groups; spin_unlock_irq(&conf->device_lock); @@ -6672,16 +6669,13 @@ static struct attribute_group raid5_attrs_group = { .attrs = raid5_attrs, }; -static int alloc_thread_groups(struct r5conf *conf, int cnt, - int *group_cnt, - int *worker_cnt_per_group, +static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, struct r5worker_group **worker_groups) { int i, j, k; ssize_t size; struct r5worker *workers; - *worker_cnt_per_group = cnt; if (cnt == 0) { *group_cnt = 0; *worker_groups = NULL; @@ -6882,7 +6876,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) struct disk_info *disk; char pers_name[6]; int i; - int group_cnt, worker_cnt_per_group; + int group_cnt; struct r5worker_group *new_group; int ret; @@ -6928,10 +6922,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) for (i = 0; i < PENDING_IO_MAX; i++) list_add(&conf->pending_data[i].sibling, &conf->free_list); /* Don't enable multi-threading by default*/ - if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, - &new_group)) { + if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { conf->group_cnt = group_cnt; - conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_cnt_per_group = 0; conf->worker_groups = new_group; } else goto abort; diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c index 04d51ca63223..4c8c96a35282 100644 --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c @@ -105,7 +105,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, if (nums[i-1] + 1 != nums[i]) goto fail_map; buf->vaddr = (__force void *) - ioremap_nocache(__pfn_to_phys(nums[0]), size + offset); + ioremap(__pfn_to_phys(nums[0]), size + offset); } else { buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, PAGE_KERNEL); diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index fd47bd07ffd8..2f1eeeb6e7c7 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -938,7 +938,7 @@ static int cx18_probe(struct pci_dev *pci_dev, /* map io memory */ CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE); - cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET, + cx->enc_mem = ioremap(cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE); if (!cx->enc_mem) { CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n"); diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 3f3f40ea890b..1f79700a6307 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -1042,7 +1042,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) /* map io memory */ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); - itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET, + itv->enc_mem = ioremap(itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); if (!itv->enc_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n"); @@ -1056,7 +1056,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) if (itv->has_cx23415) { IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); - itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET, + itv->dec_mem = ioremap(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); if (!itv->dec_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n"); @@ -1075,7 +1075,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); itv->reg_mem = - ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + ioremap(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (!itv->reg_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n"); IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n"); diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 95a56cce9b65..1daf9e07cad7 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -37,7 +37,7 @@ #include <linux/ivtvfb.h> #ifdef CONFIG_X86_64 -#include <asm/pat.h> +#include <asm/memtype.h> #endif /* card parameters */ diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c index f299baf7cbe0..e06d113dfe96 100644 --- a/drivers/media/platform/davinci/dm355_ccdc.c +++ b/drivers/media/platform/davinci/dm355_ccdc.c @@ -883,7 +883,7 @@ static int dm355_ccdc_probe(struct platform_device *pdev) goto fail_nores; } - ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); + ccdc_cfg.base_addr = ioremap(res->start, resource_size(res)); if (!ccdc_cfg.base_addr) { status = -ENOMEM; goto fail_nomem; diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c index 2fc6c9c38f9c..c6378c4e0074 100644 --- a/drivers/media/platform/davinci/dm644x_ccdc.c +++ b/drivers/media/platform/davinci/dm644x_ccdc.c @@ -817,7 +817,7 @@ static int dm644x_ccdc_probe(struct platform_device *pdev) goto fail_nores; } - ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); + ccdc_cfg.base_addr = ioremap(res->start, resource_size(res)); if (!ccdc_cfg.base_addr) { status = -ENOMEM; goto fail_nomem; diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c index e2e7ab7b7f45..b49378b18e5d 100644 --- a/drivers/media/platform/davinci/isif.c +++ b/drivers/media/platform/davinci/isif.c @@ -1045,7 +1045,7 @@ static int isif_probe(struct platform_device *pdev) status = -EBUSY; goto fail_nobase_res; } - addr = ioremap_nocache(res->start, resource_size(res)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { status = -ENOMEM; goto fail_base_iomap; diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c index a99caac59f44..1ac0c70a5981 100644 --- a/drivers/media/platform/tegra-cec/tegra_cec.c +++ b/drivers/media/platform/tegra-cec/tegra_cec.c @@ -351,7 +351,7 @@ static int tegra_cec_probe(struct platform_device *pdev) if (cec->tegra_cec_irq <= 0) return -EBUSY; - cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start, + cec->cec_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!cec->cec_base) { diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index fd7b2167103d..06038b325b02 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1512,7 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, bar = 1; len = pci_resource_len(pcidev, bar); base = pci_resource_start(pcidev, bar); - pcr->remap_addr = ioremap_nocache(base, len); + pcr->remap_addr = ioremap(base, len); if (!pcr->remap_addr) { ret = -ENOMEM; goto free_handle; diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c index c25fd40f3bd0..fcd999f50d14 100644 --- a/drivers/misc/mic/scif/scif_nodeqp.c +++ b/drivers/misc/mic/scif/scif_nodeqp.c @@ -788,7 +788,7 @@ scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg) "failed to setup interrupts for %d\n", msg->src.node); goto interrupt_setup_error; } - newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len); + newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len); if (!newdev->mmio.va) { dev_err(&scifdev->sdev->dev, "failed to map mmio for %d\n", msg->src.node); diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 359c5bab45ac..063e4419cd7e 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -834,7 +834,7 @@ static int pti_pci_probe(struct pci_dev *pdev, } drv_data->aperture_base = drv_data->pti_addr+APERTURE_14; drv_data->pti_ioaddr = - ioremap_nocache((u32)drv_data->aperture_base, + ioremap((u32)drv_data->aperture_base, APERTURE_LEN); if (!drv_data->pti_ioaddr) { retval = -ENOMEM; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 5e6be1527571..b837e7eba5f7 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -17,6 +17,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 95b41c0891d0..663d87924e5e 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) card->erase_arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, - 0); + card->ext_csd.generic_cmd6_time); } if (!err) err = mmc_erase(card, from, nr, card->erase_arg); @@ -1149,7 +1149,7 @@ retry: arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } @@ -1167,7 +1167,7 @@ retry: err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index abf8f5eb0a1c..aa54d359dab7 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work) } for (i = 0; i < ARRAY_SIZE(freqs); i++) { - if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) + unsigned int freq = freqs[i]; + if (freq > host->f_max) { + if (i + 1 < ARRAY_SIZE(freqs)) + continue; + freq = host->f_max; + } + if (!mmc_rescan_try_freq(host, max(freq, host->f_min))) break; if (freqs[i] <= host->f_min) break; @@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work) void mmc_start_host(struct mmc_host *host) { - host->f_init = max(freqs[0], host->f_min); + host->f_init = max(min(freqs[0], host->f_max), host->f_min); host->rescan_disable = 0; host->ios.power_mode = MMC_POWER_UNDEFINED; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 105b7a7c0251..c8768726d925 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host) struct device *dev = host->parent; u32 bus_width, drv_type, cd_debounce_delay_ms; int ret; - bool cd_cap_invert, cd_gpio_invert = false; - bool ro_cap_invert, ro_gpio_invert = false; if (!dev || !dev_fwnode(dev)) return 0; @@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host) */ /* Parse Card Detection */ + if (device_property_read_bool(dev, "non-removable")) { host->caps |= MMC_CAP_NONREMOVABLE; } else { - cd_cap_invert = device_property_read_bool(dev, "cd-inverted"); + if (device_property_read_bool(dev, "cd-inverted")) + host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; if (device_property_read_u32(dev, "cd-debounce-delay-ms", &cd_debounce_delay_ms)) @@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host) host->caps |= MMC_CAP_NEEDS_POLL; ret = mmc_gpiod_request_cd(host, "cd", 0, false, - cd_debounce_delay_ms * 1000, - &cd_gpio_invert); + cd_debounce_delay_ms * 1000); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) return ret; - - /* - * There are two ways to flag that the CD line is inverted: - * through the cd-inverted flag and by the GPIO line itself - * being inverted from the GPIO subsystem. This is a leftover - * from the times when the GPIO subsystem did not make it - * possible to flag a line as inverted. - * - * If the capability on the host AND the GPIO line are - * both inverted, the end result is that the CD line is - * not inverted. - */ - if (cd_cap_invert ^ cd_gpio_invert) - host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; } /* Parse Write Protection */ - ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); + if (device_property_read_bool(dev, "wp-inverted")) + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(host, "wp", 0, 0); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) @@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "disable-wp")) host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; - /* See the comment on CD inversion above */ - if (ro_cap_invert ^ ro_gpio_invert) - host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - if (device_property_read_bool(dev, "cap-sd-highspeed")) host->caps |= MMC_CAP_SD_HIGHSPEED; if (device_property_read_bool(dev, "cap-mmc-highspeed")) diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 09113b9ad679..da425ee2d9bf 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -19,7 +19,9 @@ #include "host.h" #include "mmc_ops.h" -#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ +#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ +#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ static const u8 tuning_blk_pattern_4bit[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, @@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, bool expired = false; bool busy = false; - /* We have an unspecified cmd timeout, use the fallback value. */ - if (!timeout_ms) - timeout_ms = MMC_OPS_TIMEOUT_MS; - /* * In cases when not allowed to poll by using CMD13 or because we aren't * capable of polling by using ->card_busy(), then rely on waiting the @@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, mmc_retune_hold(host); + if (!timeout_ms) { + pr_warn("%s: unspecified timeout for CMD6 - use generic\n", + mmc_hostname(host)); + timeout_ms = card->ext_csd.generic_cmd6_time; + } + /* - * If the cmd timeout and the max_busy_timeout of the host are both - * specified, let's validate them. A failure means we need to prevent - * the host from doing hw busy detection, which is done by converting - * to a R1 response instead of a R1B. + * If the max_busy_timeout of the host is specified, make sure it's + * enough to fit the used timeout_ms. In case it's not, let's instruct + * the host to avoid HW busy detection, by converting to a R1 response + * instead of a R1B. */ - if (timeout_ms && host->max_busy_timeout && - (timeout_ms > host->max_busy_timeout)) + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = MMC_SWITCH; @@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, cmd.flags = MMC_CMD_AC; if (use_r1b_resp) { cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; - /* - * A busy_timeout of zero means the host can decide to use - * whatever value it finds suitable. - */ cmd.busy_timeout = timeout_ms; } else { cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; @@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card) * urgent levels by using an asynchronous background task, when idle. */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS); + EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); if (err) pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); @@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card) (card->ext_csd.cache_size > 0) && (card->ext_csd.cache_ctrl & 1)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_FLUSH_CACHE, 1, 0); + EXT_CSD_FLUSH_CACHE, 1, + MMC_CACHE_FLUSH_TIMEOUT_MS); if (err) pr_err("%s: cache flush error %d\n", mmc_hostname(card->host), err); diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index da2596c5fa28..05e907451df9 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -19,7 +19,6 @@ struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; - bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; char *cd_label; @@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host) return -ENOSYS; cansleep = gpiod_cansleep(ctx->cd_gpio); - if (ctx->override_cd_active_level) { - int value = cansleep ? - gpiod_get_raw_value_cansleep(ctx->cd_gpio) : - gpiod_get_raw_value(ctx->cd_gpio); - return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); - } - return cansleep ? gpiod_get_value_cansleep(ctx->cd_gpio) : gpiod_get_value(ctx->cd_gpio); @@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); * @idx: index of the GPIO to obtain in the consumer * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, set - * to NULL to ignore * * Note that this must be called prior to mmc_add_host() * otherwise the caller must also call mmc_gpiod_request_cd_irq(). @@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); */ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce, bool *gpio_invert) + unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, ctx->cd_debounce_delay_ms = debounce / 1000; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + /* override forces default (active-low) polarity ... */ + if (override_active_level && !gpiod_is_active_low(desc)) + gpiod_toggle_active_low(desc); + + /* ... or active-high */ + if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); - ctx->override_cd_active_level = override_active_level; ctx->cd_gpio = desc; return 0; @@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, - * set to NULL to ignore * * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, - unsigned int debounce, bool *gpio_invert) + unsigned int idx, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, return ret; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); ctx->ro_gpio = desc; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index d06b2dfe3c95..3a5089f0332c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -501,6 +501,7 @@ config MMC_SDHCI_MSM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Secure Digital Host Controller Interface (SDHCI) support present in Qualcomm SOCs. The controller supports @@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB tristate "Broadcom SDIO/SD/MMC support" depends on ARCH_BRCMSTB || BMIPS_GENERIC depends on MMC_SDHCI_PLTFM + select MMC_CQHCI default y help This selects support for the SDIO/SD/MMC Host Controller on @@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP depends on MMC_SDHCI_PLTFM && OF select THERMAL imply TI_SOC_THERMAL + select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's DRA7 SOCs. The controller supports @@ -1040,3 +1043,6 @@ config MMC_OWL help This selects support for the SD/MMC Host Controller on Actions Semi Owl SoCs. + +config MMC_SDHCI_EXTERNAL_DMA + bool diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 6f065bb5c55a..aeaaa5314924 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev) { struct atmel_mci *host = dev_get_drvdata(dev); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); return clk_prepare_enable(host->mck); } diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bc8aeb47a7b4..8823680ca42c 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev) goto out2; } - r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!r) { - dev_err(&pdev->dev, "no IRQ defined\n"); + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) goto out3; - } - host->irq = r->start; mmc->ops = &au1xmmc_ops; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 99f61fd2a658..c3d949847cbd 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev) host->dma_chan = NULL; host->dma_desc = NULL; - host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx"); + host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx"); + if (IS_ERR(host->dma_chan_rxtx)) { + ret = PTR_ERR(host->dma_chan_rxtx); + host->dma_chan_rxtx = NULL; + + if (ret == -EPROBE_DEFER) + goto err; + + /* Ignore errors to fall back to PIO mode */ + } + clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index eee08d81b242..76013bbbcff3 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev, return ret; host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); - if (!host->base) - return -EINVAL; + if (!host->base) { + ret = -EINVAL; + goto error; + } /* On ThunderX these are identical */ host->dma_base = host->base; @@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev, host->reg_off_dma = 0x160; host->clk = devm_clk_get(dev, NULL); - if (IS_ERR(host->clk)) - return PTR_ERR(host->clk); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto error; + } ret = clk_prepare_enable(host->clk); if (ret) - return ret; + goto error; host->sys_freq = clk_get_rate(host->clk); spin_lock_init(&host->irq_handler_lock); @@ -157,6 +161,7 @@ error: } } clk_disable_unprepare(host->clk); + pci_release_regions(pdev); return ret; } @@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev) writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); clk_disable_unprepare(host->clk); + pci_release_regions(pdev); } static const struct pci_device_id thunder_mmc_id_table[] = { diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index ebfaeb33bc8c..f01fecd75833 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc) mmc->caps |= pdata->caps; /* Register a cd gpio, if there is not one, enable polling */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; else if (ret) mmc->caps |= MMC_CAP_NEEDS_POLL; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index fc9d4d000f97..bc5278ab5707 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host) if (!host->dms) return -ENOMEM; - host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); - if (!host->dms->ch) { + host->dms->ch = dma_request_chan(host->dev, "rx-tx"); + if (IS_ERR(host->dms->ch)) { + int ret = PTR_ERR(host->dms->ch); + dev_err(host->dev, "Failed to get external DMA channel.\n"); kfree(host->dms); host->dms = NULL; - return -ENXIO; + return ret; } return 0; diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 78383f60a3dc..fbae87d1f017 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev) static int jz4740_mmc_resume(struct device *dev) { - return pinctrl_pm_select_default_state(dev); + return pinctrl_select_default_state(dev); } static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e712315c7e8d..35400cf2a2e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -161,7 +161,6 @@ struct meson_host { bool dram_access_quirk; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_clk_gate; unsigned int bounce_buf_size; @@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host) u32 cfg; if (host->pins_clk_gate) - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(host->dev); /* Make sure the clock is not stopped in the controller */ cfg = readl(host->regs + SD_EMMC_CFG); @@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - ret = PTR_ERR(host->pins_default); - goto free_host; - } - host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, "clk-gate"); if (IS_ERR(host->pins_clk_gate)) { diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index ba9a63db73da..8b038e7b2cd3 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) struct platform_device *slot_pdev; struct mmc_host *mmc; struct meson_mx_mmc_host *host; - struct resource *res; int ret, irq; u32 conf; @@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(host->controller_dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto error_free_mmc; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 74c6cfbf9172..951f76dc1ddd 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host) * SPI protocol. Another is that when chipselect is released while * the card returns BUSY status, the clock must issue several cycles * with chipselect high before the card will stop driving its output. + * + * SPI_CS_HIGH means "asserted" here. In some cases like when using + * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically + * inverted by gpiolib, so if we want to ascertain to drive it high + * we should toggle the default with an XOR as we do here. */ - host->spi->mode |= SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Just warn; most cards work without it. */ dev_warn(&host->spi->dev, "can't change chip-select polarity\n"); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; } else { mmc_spi_readbytes(host, 18); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Wot, we can't get the same setup we had before? */ dev_err(&host->spi->dev, @@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi) * Index 0 is card detect * Old boardfiles were specifying 1 ms as debounce */ - status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL); + status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) { @@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi) mmc_detect_change(mmc, 0); /* Index 1 is write protect/read only */ - status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); + status = mmc_gpiod_request_ro(mmc, NULL, 1, 0); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 40e72c30ea84..e9ffce8d41ea 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = { .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = { .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = { .datacnt_useless = true, .datalength_bits = 25, .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, .stm32_idmabsize_mask = GENMASK(12, 5), .busy_timeout = true, .busy_detect = true, @@ -284,6 +289,7 @@ static struct variant_data variant_qcom = { .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, .pwrreg_powerup = MCI_PWR_UP, .f_max = 208000000, .explicit_mclk_control = true, @@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host) static int mmci_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; + if (!data) return 0; - - if (!is_power_of_2(data->blksz)) { + if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) { dev_err(mmc_dev(host->mmc), "unsupported block size (%d bytes)\n", data->blksz); return -EINVAL; @@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", data->sg_len, data->blksz, data->blocks, data->flags); - host->ops->dma_start(host, &datactrl); + ret = host->ops->dma_start(host, &datactrl); + if (ret) + return ret; /* Trigger the DMA transfer */ mmci_write_datactrlreg(host, datactrl); @@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host) host->dma_priv = dmae; - dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "rx"); - dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "tx"); + dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx"); + if (IS_ERR(dmae->rx_channel)) { + int ret = PTR_ERR(dmae->rx_channel); + dmae->rx_channel = NULL; + return ret; + } + + dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx"); + if (IS_ERR(dmae->tx_channel)) { + if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER) + dev_warn(mmc_dev(host->mmc), + "Deferred probe for TX channel ignored\n"); + dmae->tx_channel = NULL; + } /* * If only an RX channel is specified, the driver will @@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, if (data->blksz * data->blocks <= variant->fifosize) return -EINVAL; + /* + * This is necessary to get SDIO working on the Ux500. We do not yet + * know if this is a bug in: + * - The Ux500 DMA controller (DMA40) + * - The MMCI DMA interface on the Ux500 + * some power of two blocks (such as 64 bytes) are sent regularly + * during SDIO traffic and those work fine so for these we enable DMA + * transfers. + */ + if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz)) + return -EINVAL; + device = chan->device; nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); @@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host, int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) { struct mmci_dmae_priv *dmae = host->dma_priv; + int ret; host->dma_in_progress = true; - dmaengine_submit(dmae->desc_current); + ret = dma_submit_error(dmaengine_submit(dmae->desc_current)); + if (ret < 0) { + host->dma_in_progress = false; + return ret; + } dma_async_issue_pending(dmae->cur); *datactrl |= MCI_DPSM_DMAENABLE; @@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, } else if (host->variant->busy_timeout && busy_resp && status & MCI_DATATIMEOUT) { cmd->error = -ETIMEDOUT; + host->irq_action = IRQ_WAKE_THREAD; } else { cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); @@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, return; } } - mmci_request_end(host, host->mrq); + + if (host->irq_action != IRQ_WAKE_THREAD) + mmci_request_end(host, host->mrq); + } else if (sbc) { mmci_start_command(host, host->mrq->cmd, 0); } else if (!host->variant->datactrl_first && @@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; u32 status; - int ret = 0; spin_lock(&host->lock); + host->irq_action = IRQ_HANDLED; do { status = readl(host->base + MMCISTATUS); @@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) if (host->variant->busy_detect_flag) status &= ~host->variant->busy_detect_flag; - ret = 1; } while (status); spin_unlock(&host->lock); - return IRQ_RETVAL(ret); + return host->irq_action; +} + +/* + * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW. + * + * A reset is needed for some variants, where a datatimeout for a R1B request + * causes the DPSM to stay busy (non-functional). + */ +static irqreturn_t mmci_irq_thread(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + unsigned long flags; + + if (host->rst) { + reset_control_assert(host->rst); + udelay(2); + reset_control_deassert(host->rst); + } + + spin_lock_irqsave(&host->lock, flags); + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->pwr_reg, host->base + MMCIPOWER); + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + + host->irq_action = IRQ_HANDLED; + mmci_request_end(host, host->mrq); + spin_unlock_irqrestore(&host->lock, flags); + + return host->irq_action; } static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) pinctrl_select_state(host->pinctrl, host->pins_opendrain); else - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(mmc_dev(mmc)); } /* @@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev, goto host_free; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - dev_err(mmc_dev(mmc), "Can't select default pins\n"); - ret = PTR_ERR(host->pins_default); - goto host_free; - } - host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, MMCI_PINCTRL_STATE_OPENDRAIN); if (IS_ERR(host->pins_opendrain)) { @@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev, * silently of these do not exist */ if (!np) { - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto clk_disable; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) goto clk_disable; } - ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, - DRIVER_NAME " (cmd)", host); + ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq, + mmci_irq_thread, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); if (ret) goto clk_disable; @@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev) struct mmci_host *host = mmc_priv(mmc); clk_prepare_enable(host->clk); mmci_restore(host); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); } return 0; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 158e1231aa23..ea6a0b5779d4 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -279,7 +279,11 @@ struct mmci_host; * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @datactrl_mask_sdio: SDIO enable mask in datactrl register - * @datactrl_blksz: block size in power of two + * @datactrl_blocksz: block size in power of two + * @datactrl_any_blocksz: true if block any block sizes are accepted by + * hardware, such as with some SDIO traffic that send + * odd packets. + * @dma_power_of_2: DMA only works with blocks that are a power of 2. * @datactrl_first: true if data must be setup before send command * @datacnt_useless: true if you could not use datacnt register to read * remaining data @@ -326,6 +330,8 @@ struct variant_data { unsigned int datactrl_mask_ddrmode; unsigned int datactrl_mask_sdio; unsigned int datactrl_blocksz; + u8 datactrl_any_blocksz:1; + u8 dma_power_of_2:1; u8 datactrl_first:1; u8 datacnt_useless:1; u8 st_sdio:1; @@ -404,7 +410,6 @@ struct mmci_host { struct mmci_host_ops *ops; struct variant_data *variant; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_opendrain; u8 hw_designer; @@ -412,6 +417,7 @@ struct mmci_host { struct timer_list timer; unsigned int oldstat; + u32 irq_action; /* pio stuff */ struct sg_mapping_iter sg_miter; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 010fe29a4888..7726dcf48f2c 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2194,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev) if (ret) goto host_free; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(&pdev->dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto host_free; diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 74a0a7fbbf7f..203b61712601 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev) struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; const struct mbus_dram_target_info *dram; - struct resource *r; int ret, irq; if (!np) { dev_err(&pdev->dev, "no DT node\n"); return -ENODEV; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) + if (irq < 0) return -ENXIO; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); @@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev) spin_lock_init(&host->lock); - host->base = devm_ioremap_resource(&pdev->dev, r); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto out; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 011b59a3602e..b3d654c688e5 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev) mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); if (!host->pdata) { - host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); + host->dma = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(host->dma)) { + if (PTR_ERR(host->dma) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk_put; + } + + /* Ignore errors to fall back to PIO mode */ + host->dma = NULL; + } } else { res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (res) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 4031217d21c3..d82674aed447 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev) goto out_clk_disable; } - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_clk_disable; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 767e964ca5a2..a379c45b985c 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) ret = PTR_ERR(p); goto err_free_irq; } - if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { - dev_info(host->dev, "missing default pinctrl state\n"); - devm_pinctrl_put(p); - ret = -EINVAL; - goto err_free_irq; - } if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { dev_info(host->dev, "missing idle pinctrl state\n"); @@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev) if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); /* irq lost, if pinmux incorrect */ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); } else { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); } spin_unlock_irqrestore(&host->irq_lock, flags); return 0; diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 771e3d00f1bb..01ffe51f413d 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc"); - if (!owl_host->dma) { + owl_host->dma = dma_request_chan(&pdev->dev, "mmc"); + if (IS_ERR(owl_host->dma)) { dev_err(owl_host->dev, "Failed to get external DMA channel.\n"); - ret = -ENXIO; + ret = PTR_ERR(owl_host->dma); goto err_free_host; } diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 024acc1b0a2e..3a9333475a2b 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mmc); - host->dma_chan_rx = dma_request_slave_channel(dev, "rx"); - if (host->dma_chan_rx == NULL) { + host->dma_chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->dma_chan_rx)) { dev_err(dev, "unable to request rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_rx); + host->dma_chan_rx = NULL; goto out; } - host->dma_chan_tx = dma_request_slave_channel(dev, "tx"); - if (host->dma_chan_tx == NULL) { + host->dma_chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->dma_chan_tx)) { dev_err(dev, "unable to request tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_tx); + host->dma_chan_tx = NULL; goto out; } @@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev) } /* FIXME: should we pass detection delay to debounce? */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_cd\n"); goto out; } - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + if (!host->pdata->gpio_card_ro_invert) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_ro\n"); goto out; } - if (!ret) { + if (!ret) host->use_ro_gpio = true; - mmc->caps2 |= host->pdata->gpio_card_ro_invert ? - 0 : MMC_CAP2_RO_ACTIVE_HIGH; - } if (host->pdata->init) host->pdata->init(dev, pxamci_detect_irq, mmc); diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index c0504aa90857..f524251d5113 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -14,8 +14,8 @@ struct renesas_sdhi_scc { unsigned long clk_rate; /* clock rate for SDR104 */ - u32 tap; /* sampling clock position for SDR104 */ - u32 tap_hs400; /* sampling clock position for HS400 */ + u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */ + u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */ }; struct renesas_sdhi_of_data { @@ -33,6 +33,11 @@ struct renesas_sdhi_of_data { unsigned short max_segs; }; +struct renesas_sdhi_quirks { + bool hs400_disabled; + bool hs400_4taps; +}; + struct tmio_mmc_dma { enum dma_slave_buswidth dma_buswidth; bool (*filter)(struct dma_chan *chan, void *arg); @@ -46,6 +51,7 @@ struct renesas_sdhi { struct clk *clk_cd; struct tmio_mmc_data mmc_data; struct tmio_mmc_dma dma_priv; + const struct renesas_sdhi_quirks *quirks; struct pinctrl *pinctrl; struct pinctrl_state *pins_default, *pins_uhs; void __iomem *scc_ctl; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 234551a68739..35cb24cd45b4 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -46,11 +46,6 @@ #define SDHI_VER_GEN3_SD 0xcc10 #define SDHI_VER_GEN3_SDMMC 0xcd10 -struct renesas_sdhi_quirks { - bool hs400_disabled; - bool hs400_4taps; -}; - static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) { u32 val; @@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host) 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); - if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400) + if (priv->quirks && priv->quirks->hs400_4taps) sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set / 2); @@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host) static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host) { struct renesas_sdhi *priv = host_to_priv(host); - bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; /* * Skip checking SCC errors when running on 4 taps in HS400 mode as @@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = { }; static const struct soc_device_attribute sdhi_quirks_match[] = { + { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap }, { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, - { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 }, { /* Sentinel. */ }, }; @@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!priv) return -ENOMEM; + priv->quirks = quirks; mmc_data = &priv->mmc_data; dma_priv = &priv->dma_priv; @@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (quirks && quirks->hs400_disabled) host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); - if (quirks && quirks->hs400_4taps) - mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400; - /* For some SoC, we disable internal WP. GPIO may override this */ if (mmc_can_gpio_ro(host->mmc)) mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT; @@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) { const struct renesas_sdhi_scc *taps = of_data->taps; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; bool hit = false; for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { priv->scc_tappos = taps->tap; - priv->scc_tappos_hs400 = taps->tap_hs400; + priv->scc_tappos_hs400 = use_4tap ? + taps->tap_hs400_4tap : + taps->tap; hit = true; break; } } if (!hit) - dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n"); + dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); host->init_tuning = renesas_sdhi_init_tuning; host->prepare_tuning = renesas_sdhi_prepare_tuning; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 18839a10594c..47ac53e91241 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { .clk_rate = 0, .tap = 0x00000300, - .tap_hs400 = 0x00000704, + .tap_hs400_4tap = 0x00000100, }, }; @@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC * implementation as others may use a different implementation. */ -static const struct soc_device_attribute soc_whitelist[] = { - /* specific ones */ +static const struct soc_device_attribute soc_dma_quirks[] = { { .soc_id = "r7s9210", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, { .soc_id = "r8a7796", .revision = "ES1.0", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, - /* generic ones */ - { .soc_id = "r8a774a1" }, - { .soc_id = "r8a774b1" }, - { .soc_id = "r8a774c0" }, - { .soc_id = "r8a77470" }, - { .soc_id = "r8a7795" }, - { .soc_id = "r8a7796" }, - { .soc_id = "r8a77965" }, - { .soc_id = "r8a77970" }, - { .soc_id = "r8a77980" }, - { .soc_id = "r8a77990" }, - { .soc_id = "r8a77995" }, { /* sentinel */ } }; static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - const struct soc_device_attribute *soc = soc_device_match(soc_whitelist); + const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks); struct device *dev = &pdev->dev; - if (!soc) - return -ENODEV; - - global_flags |= (unsigned long)soc->data; + if (soc) + global_flags |= (unsigned long)soc->data; dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index bce9c33bc4b5..1e616ae56b13 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host) mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; /* If we get -ENOENT we have no card detect GPIO line */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for CD %d\n", ret); return ret; } - ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for WP %d\n", ret); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 105e73d4a3b9..9651dca6863e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -719,7 +719,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) goto err_free; } - host->ioaddr = devm_ioremap_nocache(dev, iomem->start, + host->ioaddr = devm_ioremap(dev, iomem->start, resource_size(iomem)); if (host->ioaddr == NULL) { err = -ENOMEM; @@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); - err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); if (err) { if (err == -EPROBE_DEFER) goto err_free; diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 73bb440aaf93..ad01f6451a95 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -9,29 +9,236 @@ #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/bitops.h> +#include <linux/delay.h> #include "sdhci-pltfm.h" +#include "cqhci.h" -static const struct sdhci_ops sdhci_brcmstb_ops = { +#define SDHCI_VENDOR 0x78 +#define SDHCI_VENDOR_ENHANCED_STRB 0x1 + +#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0) +#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1) + +#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 + +struct sdhci_brcmstb_priv { + void __iomem *cfg_regs; + bool has_cqe; +}; + +struct brcmstb_match_priv { + void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); + struct sdhci_ops *ops; + unsigned int flags; +}; + +static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + + u32 reg; + + dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n", + __func__); + reg = readl(host->ioaddr + SDHCI_VENDOR); + if (ios->enhanced_strobe) + reg |= SDHCI_VENDOR_ENHANCED_STRB; + else + reg &= ~SDHCI_VENDOR_ENHANCED_STRB; + writel(reg, host->ioaddr + SDHCI_VENDOR); +} + +static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + sdhci_enable_clk(host, clk); +} + +static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n", + __func__, timing); + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_SD_HS || + timing == MMC_TIMING_MMC_HS || + timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = { + .enable = sdhci_brcmstb_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_brcmstb_dumpregs, +}; + +static struct sdhci_ops sdhci_brcmstb_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = { +static struct sdhci_ops sdhci_brcmstb_ops_7216 = { + .set_clock = sdhci_brcmstb_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, +}; + +static struct brcmstb_match_priv match_priv_7425 = { + .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT | + BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; +static struct brcmstb_match_priv match_priv_7445 = { + .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .ops = &sdhci_brcmstb_ops, +}; + +static const struct brcmstb_match_priv match_priv_7216 = { + .hs400es = sdhci_brcmstb_hs400es, + .ops = &sdhci_brcmstb_ops_7216, +}; + +static const struct of_device_id sdhci_brcm_of_match[] = { + { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, + { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, + { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, + {}, +}; + +static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static int sdhci_brcmstb_add_host(struct sdhci_host *host, + struct sdhci_brcmstb_priv *priv) +{ + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!priv->has_cqe) + return sdhci_add_host(host); + + dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n"); + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(mmc_dev(host->mmc), + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR; + cq_host->ops = &sdhci_brcmstb_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) { + dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n"); + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; + } + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + static int sdhci_brcmstb_probe(struct platform_device *pdev) { - struct sdhci_host *host; + const struct brcmstb_match_priv *match_priv; + struct sdhci_pltfm_data brcmstb_pdata; struct sdhci_pltfm_host *pltfm_host; + const struct of_device_id *match; + struct sdhci_brcmstb_priv *priv; + struct sdhci_host *host; + struct resource *iomem; + bool has_cqe = false; struct clk *clk; int res; + match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node); + match_priv = match->data; + + dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; dev_err(&pdev->dev, "Clock not found in Device Tree\n"); clk = NULL; } @@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) if (res) return res; - host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0); + memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata)); + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + has_cqe = true; + match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; + } + brcmstb_pdata.ops = match_priv->ops; + host = sdhci_pltfm_init(pdev, &brcmstb_pdata, + sizeof(struct sdhci_brcmstb_priv)); if (IS_ERR(host)) { res = PTR_ERR(host); goto err_clk; } + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->has_cqe = has_cqe; + + /* Map in the non-standard CFG registers */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem); + if (IS_ERR(priv->cfg_regs)) { + res = PTR_ERR(priv->cfg_regs); + goto err; + } + sdhci_get_of_property(pdev); res = mmc_of_parse(host->mmc); if (res) goto err; /* + * If the chip has enhanced strobe and it's enabled, add + * callback + */ + if (match_priv->hs400es && + (host->mmc->caps2 & MMC_CAP2_HS400_ES)) + host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es; + + /* * Supply the existing CAPS, but clear the UHS modes. This * will allow these modes to be specified by device tree * properties through mmc_of_parse(). */ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); - if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci")) + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT) host->caps &= ~SDHCI_CAN_64BIT; host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | - SDHCI_SUPPORT_DDR50); - host->quirks |= SDHCI_QUIRK_MISSING_CAPS | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + SDHCI_SUPPORT_DDR50); + host->quirks |= SDHCI_QUIRK_MISSING_CAPS; + + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - res = sdhci_add_host(host); + res = sdhci_brcmstb_add_host(host, priv); if (res) goto err; - pltfm_host = sdhci_priv(host); pltfm_host->clk = clk; return res; @@ -79,11 +314,15 @@ err_clk: return res; } -static const struct of_device_id sdhci_brcm_of_match[] = { - { .compatible = "brcm,bcm7425-sdhci" }, - { .compatible = "brcm,bcm7445-sdhci" }, - {}, -}; +static void sdhci_brcmstb_shutdown(struct platform_device *pdev) +{ + int ret; + + ret = sdhci_pltfm_unregister(pdev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); static struct platform_driver sdhci_brcmstb_driver = { @@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = { }, .probe = sdhci_brcmstb_probe, .remove = sdhci_pltfm_unregister, + .shutdown = sdhci_brcmstb_shutdown, }; module_platform_driver(sdhci_brcmstb_driver); diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index ae0ec27dd7cc..5827d3751b81 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) return 0; } -static inline void *sdhci_cdns_priv(struct sdhci_host *host) +static void *sdhci_cdns_priv(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1c988d6a2433..382f25b2fa45 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = { struct pltfm_imx_data { u32 scratchpad; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_100mhz; struct pinctrl_state *pins_200mhz; const struct esdhc_soc_data *socdata; @@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host, dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); if (IS_ERR(imx_data->pinctrl) || - IS_ERR(imx_data->pins_default) || IS_ERR(imx_data->pins_100mhz) || IS_ERR(imx_data->pins_200mhz)) return -EINVAL; @@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host, break; default: /* back to default state for other legacy timing */ - pinctrl = imx_data->pins_default; + return pinctrl_select_default_state(mmc_dev(host->mmc)); } return pinctrl_select_state(imx_data->pinctrl, pinctrl); @@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); - if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) { + if (esdhc_is_usdhc(imx_data)) { imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, ESDHC_PINCTRL_STATE_100MHZ); imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, @@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, host->mmc->parent->platform_data); /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request write-protect gpio!\n"); return err; } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } /* card_detect */ switch (boarddata->cd_type) { case ESDHC_CD_GPIO: - err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request card-detect gpio!\n"); @@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } - imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(imx_data->pins_default)) - dev_warn(mmc_dev(host->mmc), "could not get default state\n"); - if (esdhc_is_usdhc(imx_data)) { host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c index a1aa21b9ae1c..92f30a1db435 100644 --- a/drivers/mmc/host/sdhci-milbeaut.c +++ b/drivers/mmc/host/sdhci-milbeaut.c @@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ret = 0; struct f_sdhost_priv *priv; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "%s: no irq specified\n", __func__); + if (irq < 0) return irq; - } host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) @@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) host->ops = &sdhci_milbeaut_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 3d0bb5e2e09b..c3a160c18047 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -15,6 +15,7 @@ #include <linux/regulator/consumer.h> #include "sdhci-pltfm.h" +#include "cqhci.h" #define CORE_MCI_VERSION 0x50 #define CORE_VERSION_MAJOR_SHIFT 28 @@ -122,6 +123,10 @@ #define msm_host_writel(msm_host, val, host, offset) \ msm_host->var_ops->msm_writel_relaxed(val, host, offset) +/* CQHCI vendor specific registers */ +#define CQHCI_VENDOR_CFG1 0xA00 +#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) + struct sdhci_msm_offset { u32 core_hc_mode; u32 core_mci_data_cnt; @@ -1567,6 +1572,127 @@ out: __sdhci_msm_set_clock(host, clock); } +/*****************************************************************************\ + * * + * MSM Command Queue Engine (CQE) * + * * +\*****************************************************************************/ + +static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + return 0; +} + +void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 ctrl; + + /* + * When CQE is halted, the legacy SDHCI path operates only + * on 16-byte descriptors in 64bit mode. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 16; + + spin_lock_irqsave(&host->lock, flags); + + /* + * During CQE command transfers, command complete bit gets latched. + * So s/w should clear command complete interrupt status when CQE is + * either halted or disabled. Otherwise unexpected SDCHI legacy + * interrupt gets triggered when CQE is halted/disabled. + */ + ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); + ctrl |= SDHCI_INT_RESPONSE; + sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); + + spin_unlock_irqrestore(&host->lock, flags); + + sdhci_cqe_disable(mmc, recovery); +} + +static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_msm_cqe_disable, +}; + +static int sdhci_msm_cqe_add_host(struct sdhci_host *host, + struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + u32 cqcfg; + int ret; + + /* + * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. + * So ensure ADMA table is allocated for 16byte descriptors. + */ + if (host->caps & SDHCI_CAN_64BIT) + host->alloc_desc_sz = 16; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = cqhci_pltfm_init(pdev); + if (IS_ERR(cq_host)) { + ret = PTR_ERR(cq_host); + dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); + goto cleanup; + } + + msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host->ops = &sdhci_msm_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) { + dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", + mmc_hostname(host->mmc), ret); + goto cleanup; + } + + /* Disable cqe reset due to cqe enable signal */ + cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); + cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; + cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); + + /* + * SDHC expects 12byte ADMA descriptors till CQE is enabled. + * So limit desc_sz to 12 so that the data commands that are sent + * during card initialization (before CQE gets enabled) would + * get executed without any issues. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 12; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + dev_info(&pdev->dev, "%s: CQE init: success\n", + mmc_hostname(host->mmc)); + return ret; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + /* * Platform specific register write functions. This is so that, if any * register write needs to be followed up by platform specific actions, @@ -1731,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .set_uhs_signaling = sdhci_msm_set_uhs_signaling, .write_w = sdhci_msm_writew, .write_b = sdhci_msm_writeb, + .irq = sdhci_msm_cqe_irq, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { @@ -1746,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; - struct resource *core_memres; struct clk *clk; int ret; u16 host_version, core_minor; @@ -1754,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) u8 core_major; const struct sdhci_msm_offset *msm_offset; const struct sdhci_msm_variant_info *var_info; + struct device_node *node = pdev->dev.of_node; host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); if (IS_ERR(host)) @@ -1847,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) } if (!msm_host->mci_removed) { - core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); - msm_host->core_mem = devm_ioremap_resource(&pdev->dev, - core_memres); - + msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(msm_host->core_mem)) { ret = PTR_ERR(msm_host->core_mem); goto clk_disable; @@ -1952,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(&pdev->dev); host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; - ret = sdhci_add_host(host); + if (of_property_read_bool(node, "supports-cqe")) + ret = sdhci_msm_cqe_add_host(host, pdev); + else + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; sdhci_msm_set_regulator_caps(msm_host); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 5959e394b416..ab2bd314a390 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -33,7 +33,14 @@ #define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */ +struct sdhci_at91_soc_data { + const struct sdhci_pltfm_data *pdata; + bool baseclk_is_generated_internally; + unsigned int divider_for_baseclk; +}; + struct sdhci_at91_priv { + const struct sdhci_at91_soc_data *soc_data; struct clk *hclock; struct clk *gck; struct clk *mainck; @@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_power = sdhci_at91_set_power, }; -static const struct sdhci_pltfm_data soc_data_sama5d2 = { +static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = { .ops = &sdhci_at91_sama5d2_ops, }; +static const struct sdhci_at91_soc_data soc_data_sama5d2 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = false, +}; + +static const struct sdhci_at91_soc_data soc_data_sam9x60 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = true, + .divider_for_baseclk = 2, +}; + static const struct of_device_id sdhci_at91_dt_match[] = { { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 }, + { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 }, {} }; MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match); @@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); - int ret; unsigned int caps0, caps1; unsigned int clk_base, clk_mul; - unsigned int gck_rate, real_gck_rate; + unsigned int gck_rate, clk_base_rate; unsigned int preset_div; - /* - * The mult clock is provided by as a generated clock by the PMC - * controller. In order to set the rate of gck, we have to get the - * base clock rate and the clock mult from capabilities. - */ clk_prepare_enable(priv->hclock); caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); - clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; - clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; - gck_rate = clk_base * 1000000 * (clk_mul + 1); - ret = clk_set_rate(priv->gck, gck_rate); - if (ret < 0) { - dev_err(dev, "failed to set gck"); - clk_disable_unprepare(priv->hclock); - return ret; - } - /* - * We need to check if we have the requested rate for gck because in - * some cases this rate could be not supported. If it happens, the rate - * is the closest one gck can provide. We have to update the value - * of clk mul. - */ - real_gck_rate = clk_get_rate(priv->gck); - if (real_gck_rate != gck_rate) { - clk_mul = real_gck_rate / (clk_base * 1000000) - 1; - caps1 &= (~SDHCI_CLOCK_MUL_MASK); - caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & - SDHCI_CLOCK_MUL_MASK); - /* Set capabilities in r/w mode. */ - writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, - host->ioaddr + SDMMC_CACR); - writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); - /* Set capabilities in ro mode. */ - writel(0, host->ioaddr + SDMMC_CACR); - dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n", - clk_mul, real_gck_rate); - } + + gck_rate = clk_get_rate(priv->gck); + if (priv->soc_data->baseclk_is_generated_internally) + clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk; + else + clk_base_rate = clk_get_rate(priv->mainck); + + clk_base = clk_base_rate / 1000000; + clk_mul = gck_rate / clk_base_rate - 1; + + caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK; + caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK; + caps1 &= ~SDHCI_CLOCK_MUL_MASK; + caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK; + /* Set capabilities in r/w mode. */ + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); + writel(caps0, host->ioaddr + SDHCI_CAPABILITIES); + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); + /* Set capabilities in ro mode. */ + writel(0, host->ioaddr + SDMMC_CACR); + + dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n", + clk_mul, gck_rate, clk_base_rate); /* * We have to set preset values because it depends on the clk_mul @@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev) * maximum sd clock value is 120 MHz instead of 208 MHz. For that * reason, we need to use presets to support SDR104. */ - preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR12); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR25); - preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR50); - preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR104); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_DDR50); @@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { static int sdhci_at91_probe(struct platform_device *pdev) { const struct of_device_id *match; - const struct sdhci_pltfm_data *soc_data; + const struct sdhci_at91_soc_data *soc_data; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_at91_priv *priv; @@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev) return -EINVAL; soc_data = match->data; - host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv)); + host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv)); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); priv = sdhci_pltfm_priv(pltfm_host); + priv->soc_data = soc_data; priv->mainck = devm_clk_get(&pdev->dev, "baseclk"); if (IS_ERR(priv->mainck)) { - dev_err(&pdev->dev, "failed to get baseclk\n"); - return PTR_ERR(priv->mainck); + if (soc_data->baseclk_is_generated_internally) { + priv->mainck = NULL; + } else { + dev_err(&pdev->dev, "failed to get baseclk\n"); + ret = PTR_ERR(priv->mainck); + goto sdhci_pltfm_free; + } } priv->hclock = devm_clk_get(&pdev->dev, "hclock"); if (IS_ERR(priv->hclock)) { dev_err(&pdev->dev, "failed to get hclock\n"); - return PTR_ERR(priv->hclock); + ret = PTR_ERR(priv->hclock); + goto sdhci_pltfm_free; } priv->gck = devm_clk_get(&pdev->dev, "multclk"); if (IS_ERR(priv->gck)) { dev_err(&pdev->dev, "failed to get multclk\n"); - return PTR_ERR(priv->gck); + ret = PTR_ERR(priv->gck); + goto sdhci_pltfm_free; } ret = sdhci_at91_set_clks_presets(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 500f70a6ee42..5d8dd870bd44 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -173,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host, u16 ret; int shift = (spec_reg & 0x2) * 8; + if (spec_reg == SDHCI_TRANSFER_MODE) + return pltfm_host->xfer_mode_shadow; + if (spec_reg == SDHCI_HOST_VERSION) ret = value & 0xffff; else @@ -562,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) static void esdhc_clock_enable(struct sdhci_host *host, bool enable) { - u32 val; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); ktime_t timeout; + u32 val, clk_en; + + clk_en = ESDHC_CLOCK_SDCLKEN; + + /* + * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version + * is 2.2 or lower. + */ + if (esdhc->vendor_ver <= VENDOR_V_22) + clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | + ESDHC_CLOCK_PEREN); val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); if (enable) - val |= ESDHC_CLOCK_SDCLKEN; + val |= clk_en; else - val &= ~ESDHC_CLOCK_SDCLKEN; + val &= ~clk_en; sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - /* Wait max 20 ms */ + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (1) { + while (esdhc->vendor_ver > VENDOR_V_22) { bool timedout = ktime_after(ktime_get(), timeout); - if (sdhci_readl(host, ESDHC_PRSSTAT) & val) + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) break; if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); break; } - udelay(10); + usleep_range(10, 20); } } @@ -621,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - int pre_div = 1; - int div = 1; - int division; + unsigned int pre_div = 1, div = 1; + unsigned int clock_fixup = 0; ktime_t timeout; - long fixup = 0; u32 temp; - host->mmc->actual_clock = 0; - if (clock == 0) { + host->mmc->actual_clock = 0; esdhc_clock_enable(host, false); return; } - /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ + /* Start pre_div at 2 for vendor version < 2.3. */ if (esdhc->vendor_ver < VENDOR_V_23) pre_div = 2; + /* Fix clock value. */ if (host->mmc->card && mmc_card_sd(host->mmc->card) && - esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) - fixup = esdhc->clk_fixup->sd_dflt_max_clk; + esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) + clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk; else if (esdhc->clk_fixup) - fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; - - if (fixup && clock > fixup) - clock = fixup; + clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | - ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + if (clock_fixup == 0 || clock < clock_fixup) + clock_fixup = clock; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + /* Calculate pre_div and div. */ + while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256) pre_div *= 2; - while (host->max_clk / pre_div / div > clock && div < 16) + while (host->max_clk / pre_div / div > clock_fixup && div < 16) div++; + esdhc->div_ratio = pre_div * div; + + /* Limit clock division for HS400 200MHz clock for quirk. */ if (esdhc->quirk_limited_clk_division && clock == MMC_HS200_MAX_DTR && (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 || host->flags & SDHCI_HS400_TUNING)) { - division = pre_div * div; - if (division <= 4) { + if (esdhc->div_ratio <= 4) { pre_div = 4; div = 1; - } else if (division <= 8) { + } else if (esdhc->div_ratio <= 8) { pre_div = 4; div = 2; - } else if (division <= 12) { + } else if (esdhc->div_ratio <= 12) { pre_div = 4; div = 3; } else { pr_warn("%s: using unsupported clock division.\n", mmc_hostname(host->mmc)); } + esdhc->div_ratio = pre_div * div; } + host->mmc->actual_clock = host->max_clk / esdhc->div_ratio; + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); - host->mmc->actual_clock = host->max_clk / pre_div / div; - esdhc->div_ratio = pre_div * div; + clock, host->mmc->actual_clock); + + /* Set clock division into register. */ pre_div >>= 1; div--; + esdhc_clock_enable(host, false); + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); + temp &= ~ESDHC_CLOCK_MASK; + temp |= ((div << ESDHC_DIVIDER_SHIFT) | + (pre_div << ESDHC_PREDIV_SHIFT)); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ + timeout = ktime_add_ms(ktime_get(), 20); + while (esdhc->vendor_ver > VENDOR_V_22) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + usleep_range(10, 20); + } + + /* Additional setting for HS400. */ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && clock == MMC_HS200_MAX_DTR) { temp = sdhci_readl(host, ESDHC_TBCTL); @@ -711,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) esdhc_clock_enable(host, false); esdhc_flush_async_fifo(host); } - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - while (1) { - bool timedout = ktime_after(ktime_get(), timeout); - - if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) - break; - if (timedout) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - return; - } - udelay(10); - } - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= ESDHC_CLOCK_SDCLKEN; - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + esdhc_clock_enable(host, true); } static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) @@ -758,23 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u32 val; + u32 val, bus_width = 0; + /* + * Add delay to make sure all the DMA transfers are finished + * for quirk. + */ if (esdhc->quirk_delay_before_data_reset && (mask & SDHCI_RESET_DATA) && (host->flags & SDHCI_REQ_USE_DMA)) mdelay(5); + /* + * Save bus-width for eSDHC whose vendor version is 2.2 + * or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; + } + sdhci_reset(host, mask); - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + /* + * Restore bus-width setting and interrupt registers for eSDHC + * whose vendor version is 2.2 or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + val &= ~ESDHC_CTRL_BUSWIDTH_MASK; + val |= bus_width; + sdhci_writel(host, val, ESDHC_PROCTL); - if (mask & SDHCI_RESET_ALL) { + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } + + /* + * Some bits have to be cleaned manually for eSDHC whose spec + * version is higher than 3.0 for all reset. + */ + if ((mask & SDHCI_RESET_ALL) && + (esdhc->spec_ver >= SDHCI_SPEC_300)) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; sdhci_writel(host, val, ESDHC_TBCTL); + /* + * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to + * 0 for quirk. + */ if (esdhc->quirk_unreliable_pulse_detection) { val = sdhci_readl(host, ESDHC_DLLCFG1); val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; @@ -854,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc, } static struct soc_device_attribute soc_tuning_erratum_type1[] = { - { .family = "QorIQ T1023", .revision = "1.0", }, - { .family = "QorIQ T1040", .revision = "1.0", }, - { .family = "QorIQ T2080", .revision = "1.0", }, - { .family = "QorIQ LS1021A", .revision = "1.0", }, + { .family = "QorIQ T1023", }, + { .family = "QorIQ T1040", }, + { .family = "QorIQ T2080", }, + { .family = "QorIQ LS1021A", }, { }, }; static struct soc_device_attribute soc_tuning_erratum_type2[] = { - { .family = "QorIQ LS1012A", .revision = "1.0", }, - { .family = "QorIQ LS1043A", .revision = "1.*", }, - { .family = "QorIQ LS1046A", .revision = "1.0", }, - { .family = "QorIQ LS1080A", .revision = "1.0", }, - { .family = "QorIQ LS2080A", .revision = "1.0", }, - { .family = "QorIQ LA1575A", .revision = "1.0", }, + { .family = "QorIQ LS1012A", }, + { .family = "QorIQ LS1043A", }, + { .family = "QorIQ LS1046A", }, + { .family = "QorIQ LS1080A", }, + { .family = "QorIQ LS2080A", }, + { .family = "QorIQ LA1575A", }, { }, }; @@ -888,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable) esdhc_clock_enable(host, true); } -static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, +static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start, u8 *window_end) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u8 tbstat_15_8, tbstat_7_0; u32 val; - if (esdhc->quirk_tuning_erratum_type1) { - *window_start = 5 * esdhc->div_ratio; - *window_end = 3 * esdhc->div_ratio; - return; - } - /* Write TBCTL[11:8]=4'h8 */ val = sdhci_readl(host, ESDHC_TBCTL); val &= ~(0xf << 8); @@ -920,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, val = sdhci_readl(host, ESDHC_TBSTAT); val = sdhci_readl(host, ESDHC_TBSTAT); + *window_end = val & 0xff; + *window_start = (val >> 8) & 0xff; +} + +static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, + u8 *window_end) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 start_ptr, end_ptr; + + if (esdhc->quirk_tuning_erratum_type1) { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + return; + } + + esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr); + /* Reset data lines by setting ESDHCCTL[RSTD] */ sdhci_reset(host, SDHCI_RESET_DATA); /* Write 32'hFFFF_FFFF to IRQSTAT register */ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS); - /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio - * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio, + /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2 + * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2, * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio. */ - tbstat_7_0 = val & 0xff; - tbstat_15_8 = (val >> 8) & 0xff; - if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) { + if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) { *window_start = 8 * esdhc->div_ratio; *window_end = 4 * esdhc->div_ratio; } else { @@ -1006,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) if (ret) break; + /* For type2 affected platforms of the tuning erratum, + * tuning may succeed although eSDHC might not have + * tuned properly. Need to check tuning window. + */ + if (esdhc->quirk_tuning_erratum_type2 && + !host->tuning_err) { + esdhc_tuning_window_ptr(host, &window_start, + &window_end); + if (abs(window_start - window_end) > + (4 * esdhc->div_ratio + 2)) + host->tuning_err = -EAGAIN; + } + /* If HW tuning fails and triggers erratum, * try workaround. */ @@ -1238,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) * 1/2 peripheral clock. */ if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") || - of_device_is_compatible(np, "fsl,ls1028a-esdhc")) + of_device_is_compatible(np, "fsl,ls1028a-esdhc") || + of_device_is_compatible(np, "fsl,ls1088a-esdhc")) esdhc->peripheral_clock = clk_get_rate(clk) / 2; else esdhc->peripheral_clock = clk_get_rate(clk); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 083e7e053c95..882053151a47 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -7,6 +7,7 @@ */ #include <linux/delay.h> +#include <linux/mmc/mmc.h> #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/of.h> @@ -85,6 +86,7 @@ /* sdhci-omap controller flags */ #define SDHCI_OMAP_REQUIRE_IODELAY BIT(0) +#define SDHCI_OMAP_SPECIAL_RESET BIT(1) struct sdhci_omap_data { u32 offset; @@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host) struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); - reg |= CON_DMA_MASTER; + reg &= ~CON_DMA_MASTER; + /* Switch to DMA slave mode when using external DMA */ + if (!host->use_external_dma) + reg |= CON_DMA_MASTER; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); return 0; @@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host, sdhci_omap_start_clock(omap_host); } +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long limit = MMC_TIMEOUT_US; + unsigned long i = 0; /* Don't reset data lines during tuning operation */ if (omap_host->is_tuning) mask &= ~SDHCI_RESET_DATA; + if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) { + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); + while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) && + (i++ < limit)) + udelay(1); + i = 0; + while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) && + (i++ < limit)) + udelay(1); + + if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) + dev_err(mmc_dev(host->mmc), + "Timeout waiting on controller reset in %s\n", + __func__); + return; + } + sdhci_reset(host, mask); } @@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) return intmask; } +static void sdhci_omap_set_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_ERASE) + sdhci_set_data_timeout_irq(host, false); + + __sdhci_set_timeout(host, cmd); +} + static struct sdhci_ops sdhci_omap_ops = { .set_clock = sdhci_omap_set_clock, .set_power = sdhci_omap_set_power, @@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = { .reset = sdhci_omap_reset, .set_uhs_signaling = sdhci_omap_set_uhs_signaling, .irq = sdhci_omap_irq, + .set_timeout = sdhci_omap_set_timeout, }; static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) @@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = { .offset = 0x200, }; +static const struct sdhci_omap_data am335_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + +static const struct sdhci_omap_data am437_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + static const struct sdhci_omap_data dra7_data = { .offset = 0x200, .flags = SDHCI_OMAP_REQUIRE_IODELAY, @@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = { static const struct of_device_id omap_sdhci_match[] = { { .compatible = "ti,dra7-sdhci", .data = &dra7_data }, { .compatible = "ti,k2g-sdhci", .data = &k2g_data }, + { .compatible = "ti,am335-sdhci", .data = &am335_data }, + { .compatible = "ti,am437-sdhci", .data = &am437_data }, {}, }; MODULE_DEVICE_TABLE(of, omap_sdhci_match); @@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) const struct of_device_id *match; struct sdhci_omap_data *data; const struct soc_device_attribute *soc; + struct resource *regs; match = of_match_device(omap_sdhci_match, dev); if (!match) @@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) } offset = data->offset; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata, sizeof(*omap_host)); if (IS_ERR(host)) { @@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) omap_host->timing = MMC_TIMING_LEGACY; omap_host->flags = data->flags; host->ioaddr += offset; + host->mapbase = regs->start + offset; mmc = host->mmc; sdhci_get_of_property(pdev); @@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning; host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq; + /* Switch to external DMA only if there is the "dmas" property */ + if (of_find_property(dev->of_node, "dmas", NULL)) + sdhci_switch_external_dma(host, true); + ret = sdhci_setup_host(host); if (ret) goto err_put_sync; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 5091e2c1c0e5..525de2454a4d 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1991,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( if (slot->cd_idx >= 0) { ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, - slot->cd_override_level, 0, NULL); + slot->cd_override_level, 0); if (ret && ret != -EPROBE_DEFER) ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, slot->cd_override_level, - 0, NULL); + 0); if (ret == -EPROBE_DEFER) goto remove; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 51e096f27388..64200c78e90d 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -117,7 +117,6 @@ struct sdhci_s3c { struct s3c_sdhci_platdata *pdata; int cur_clk; int ext_cd_irq; - int ext_cd_gpio; struct clk *clk_io; struct clk *clk_bus[MAX_BUS_CLK]; @@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sdhci_host *host; struct sdhci_s3c *sc; - struct resource *res; int ret, irq, ptr, clks; if (!pdev->dev.platform_data && !pdev->dev.of_node) { @@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_pdata_io_clk; } else { memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); - sc->ext_cd_gpio = -1; /* invalid gpio number */ } drv_data = sdhci_s3c_get_driver_data(pdev); @@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_no_busclks; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err_req_regs; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index e43143223320..f4b05dd6c20a 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev) * We must request the IRQ after sdhci_add_host(), as the tasklet only * gets setup in sdhci_add_host() and we oops. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto err_request_cd; if (!ret) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 916b5b09c3d1..b4b63089a4e2 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = { static int sdhci_probe(struct platform_device *pdev) { struct sdhci_host *host; - struct resource *iomem; struct spear_sdhci *sdhci; struct device *dev; int ret; @@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev) goto err; } - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret); @@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev) * It is optional to use GPIOs for sdhci card detection. If we * find a descriptor using slot GPIO, we use it. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto disable_clk; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 659a9459ace3..63db84481dff 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -10,6 +10,7 @@ */ #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/ktime.h> #include <linux/highmem.h> #include <linux/io.h> @@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } -static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) { if (enable) host->ier |= SDHCI_INT_DATA_TIMEOUT; @@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } +EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); -static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - u8 count; - - if (host->ops->set_timeout) { - host->ops->set_timeout(host, cmd); - } else { - bool too_big = false; - - count = sdhci_calc_timeout(host, cmd, &too_big); - - if (too_big && - host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { - sdhci_calc_sw_timeout(host, cmd); - sdhci_set_data_timeout_irq(host, false); - } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { - sdhci_set_data_timeout_irq(host, true); - } - - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + bool too_big = false; + u8 count = sdhci_calc_timeout(host, cmd, &too_big); + + if (too_big && + host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { + sdhci_calc_sw_timeout(host, cmd); + sdhci_set_data_timeout_irq(host, false); + } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { + sdhci_set_data_timeout_irq(host, true); } + + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); } +EXPORT_SYMBOL_GPL(__sdhci_set_timeout); -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - struct mmc_data *data = cmd->data; - - host->data_timeout = 0; - - if (sdhci_data_line_cmd(cmd)) - sdhci_set_timeout(host, cmd); - - if (!data) - return; + if (host->ops->set_timeout) + host->ops->set_timeout(host, cmd); + else + __sdhci_set_timeout(host, cmd); +} +static void sdhci_initialize_data(struct sdhci_host *host, + struct mmc_data *data) +{ WARN_ON(host->data); /* Sanity checks */ @@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) host->data = data; host->data_early = 0; host->data->bytes_xfered = 0; +} + +static inline void sdhci_set_block_info(struct sdhci_host *host, + struct mmc_data *data) +{ + /* Set the DMA boundary value and block size */ + sdhci_writew(host, + SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), + SDHCI_BLOCK_SIZE); + /* + * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count + * can be supported, in that case 16-bit block count register must be 0. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { + if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) + sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); + sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); + } else { + sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + } +} + +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { struct scatterlist *sg; @@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_set_transfer_irqs(host); - /* Set the DMA boundary value and block size */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), - SDHCI_BLOCK_SIZE); + sdhci_set_block_info(host, data); +} - /* - * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count - * can be supported, in that case 16-bit block count register must be 0. - */ - if (host->version >= SDHCI_SPEC_410 && host->v4_mode && - (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { - if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) - sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); - sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + +static int sdhci_external_dma_init(struct sdhci_host *host) +{ + int ret = 0; + struct mmc_host *mmc = host->mmc; + + host->tx_chan = dma_request_chan(mmc->parent, "tx"); + if (IS_ERR(host->tx_chan)) { + ret = PTR_ERR(host->tx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request TX DMA channel.\n"); + host->tx_chan = NULL; + return ret; + } + + host->rx_chan = dma_request_chan(mmc->parent, "rx"); + if (IS_ERR(host->rx_chan)) { + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + ret = PTR_ERR(host->rx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request RX DMA channel.\n"); + host->rx_chan = NULL; + } + + return ret; +} + +static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + +static int sdhci_external_dma_setup(struct sdhci_host *host, + struct mmc_command *cmd) +{ + int ret, i; + enum dma_transfer_direction dir; + struct dma_async_tx_descriptor *desc; + struct mmc_data *data = cmd->data; + struct dma_chan *chan; + struct dma_slave_config cfg; + dma_cookie_t cookie; + int sg_cnt; + + if (!host->mapbase) + return -EINVAL; + + cfg.src_addr = host->mapbase + SDHCI_BUFFER; + cfg.dst_addr = host->mapbase + SDHCI_BUFFER; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + /* Sanity check: all the SG entries must be aligned by block size. */ + for (i = 0; i < data->sg_len; i++) { + if ((data->sg + i)->length % data->blksz) + return -EINVAL; + } + + chan = sdhci_external_dma_channel(host, data); + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) + return ret; + + sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); + if (sg_cnt <= 0) + return -EINVAL; + + dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + desc->callback = NULL; + desc->callback_param = NULL; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + ret = cookie; + + return ret; +} + +static void sdhci_external_dma_release(struct sdhci_host *host) +{ + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + if (host->rx_chan) { + dma_release_channel(host->rx_chan); + host->rx_chan = NULL; + } + + sdhci_switch_external_dma(host, false); +} + +static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); + + host->flags |= SDHCI_REQ_USE_DMA; + sdhci_set_transfer_irqs(host); + + sdhci_set_block_info(host, data); +} + +static void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (!sdhci_external_dma_setup(host, cmd)) { + __sdhci_external_dma_prepare_data(host, cmd); } else { - sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + sdhci_external_dma_release(host); + pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", + mmc_hostname(host->mmc)); + sdhci_prepare_data(host, cmd); } } +static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct dma_chan *chan; + + if (!cmd->data) + return; + + chan = sdhci_external_dma_channel(host, cmd->data); + if (chan) + dma_async_issue_pending(chan); +} + +#else + +static inline int sdhci_external_dma_init(struct sdhci_host *host) +{ + return -EOPNOTSUPP; +} + +static inline void sdhci_external_dma_release(struct sdhci_host *host) +{ +} + +static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + /* This should never happen */ + WARN_ON_ONCE(1); +} + +static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ +} + +static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return NULL; +} + +#endif + +void sdhci_switch_external_dma(struct sdhci_host *host, bool en) +{ + host->use_external_dma = en; +} +EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); + static inline bool sdhci_auto_cmd12(struct sdhci_host *host, struct mmc_request *mrq) { @@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); } -static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) { int i; - if (host->cmd && host->cmd->mrq == mrq) - host->cmd = NULL; - - if (host->data_cmd && host->data_cmd->mrq == mrq) - host->data_cmd = NULL; - - if (host->data && host->data->mrq == mrq) - host->data = NULL; - - if (sdhci_needs_reset(host, mrq)) - host->pending_reset = true; - for (i = 0; i < SDHCI_MAX_MRQS; i++) { if (host->mrqs_done[i] == mrq) { WARN_ON(1); @@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) } WARN_ON(i >= SDHCI_MAX_MRQS); +} + +static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +{ + if (host->cmd && host->cmd->mrq == mrq) + host->cmd = NULL; + + if (host->data_cmd && host->data_cmd->mrq == mrq) + host->data_cmd = NULL; + + if (host->data && host->data->mrq == mrq) + host->data = NULL; + + if (sdhci_needs_reset(host, mrq)) + host->pending_reset = true; + + sdhci_set_mrq_done(host, mrq); sdhci_del_timer(host, mrq); @@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host) /* * Need to send CMD12 if - - * a) open-ended multiblock transfer (no CMD23) + * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) * b) error in multiblock transfer */ if (data->stop && - (data->error || - !data->mrq->sbc)) { + ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || + data->error)) { /* * 'cap_cmd_during_tfr' request must not use the command line * after mmc_command_done() has been called. It is upper layer's @@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) } host->cmd = cmd; + host->data_timeout = 0; if (sdhci_data_line_cmd(cmd)) { WARN_ON(host->data_cmd); host->data_cmd = cmd; + sdhci_set_timeout(host, cmd); } - sdhci_prepare_data(host, cmd); + if (cmd->data) { + if (host->use_external_dma) + sdhci_external_dma_prepare_data(host, cmd); + else + sdhci_prepare_data(host, cmd); + } sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); @@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) timeout += 10 * HZ; sdhci_mod_timer(host, cmd->mrq, timeout); + if (host->use_external_dma) + sdhci_external_dma_pre_transfer(host, cmd); + sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); } EXPORT_SYMBOL_GPL(sdhci_send_command); @@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_led_activate(host); - /* - * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED - * requests if Auto-CMD12 is enabled. - */ - if (sdhci_auto_cmd12(host, mrq)) { - if (mrq->stop) { - mrq->data->stop = NULL; - mrq->stop = NULL; - } - } - if (!present || host->flags & SDHCI_DEVICE_DEAD) { mrq->cmd->error = -ENOMEDIUM; sdhci_finish_mrq(host, mrq); @@ -2661,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host) if (host->flags & SDHCI_REQ_USE_DMA) { struct mmc_data *data = mrq->data; + if (host->use_external_dma && data && + (mrq->cmd->error || data->error)) { + struct dma_chan *chan = sdhci_external_dma_channel(host, data); + + host->mrqs_done[i] = NULL; + spin_unlock_irqrestore(&host->lock, flags); + dmaengine_terminate_sync(chan); + spin_lock_irqsave(&host->lock, flags); + sdhci_set_mrq_done(host, mrq); + } + if (data && data->host_cookie == COOKIE_MAPPED) { if (host->bounce_buffer) { /* @@ -3796,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host) if (sdhci_can_64bit_dma(host)) host->flags |= SDHCI_USE_64_BIT_DMA; + if (host->use_external_dma) { + ret = sdhci_external_dma_init(host); + if (ret == -EPROBE_DEFER) + goto unreg; + /* + * Fall back to use the DMA/PIO integrated in standard SDHCI + * instead of external DMA devices. + */ + else if (ret) + sdhci_switch_external_dma(host, false); + /* Disable internal DMA sources */ + else + host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); + } + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->set_dma_mask) ret = host->ops->set_dma_mask(host); @@ -3822,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host) dma_addr_t dma; void *buf; - if (host->flags & SDHCI_USE_64_BIT_DMA) { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_64_DESC_SZ(host); - host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); - } else { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_32_DESC_SZ; - host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; - } + if (!(host->flags & SDHCI_USE_64_BIT_DMA)) + host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; + else if (!host->alloc_desc_sz) + host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); + + host->desc_sz = host->alloc_desc_sz; + host->adma_table_sz = host->adma_table_cnt * host->desc_sz; host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; /* @@ -4278,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + host->adma_table_sz, host->align_buffer, host->align_addr); + + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } @@ -4323,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host) pr_info("%s: SDHCI controller on %s [%s] using %s\n", mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), + host->use_external_dma ? "External DMA" : (host->flags & SDHCI_USE_ADMA) ? (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); @@ -4411,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) host->adma_table_sz, host->align_buffer, host->align_addr); + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index fe83ece6965b..a6a3ddcf97e7 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -487,6 +487,7 @@ struct sdhci_host { int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ + phys_addr_t mapbase; /* physical address base */ char *bounce_buffer; /* For packing SDMA reads/writes */ dma_addr_t bounce_addr; unsigned int bounce_buffer_size; @@ -535,6 +536,7 @@ struct sdhci_host { bool pending_reset; /* Cmd/data reset is pending */ bool irq_wake_enabled; /* IRQ wakeup is enabled */ bool v4_mode; /* Host Version 4 Enable */ + bool use_external_dma; /* Host selects to use external DMA */ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ struct mmc_command *cmd; /* Current command */ @@ -556,7 +558,8 @@ struct sdhci_host { dma_addr_t adma_addr; /* Mapped ADMA descr. table */ dma_addr_t align_addr; /* Mapped bounce buffer */ - unsigned int desc_sz; /* ADMA descriptor size */ + unsigned int desc_sz; /* ADMA current descriptor size */ + unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */ struct workqueue_struct *complete_wq; /* Request completion wq */ struct work_struct complete_work; /* Request completion work */ @@ -564,6 +567,11 @@ struct sdhci_host { struct timer_list timer; /* Timer for timeouts */ struct timer_list data_timer; /* Timer for data timeouts */ +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; +#endif + u32 caps; /* CAPABILITY_0 */ u32 caps1; /* CAPABILITY_1 */ bool read_caps; /* Capability flags have been read */ @@ -795,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host); void sdhci_reset_tuning(struct sdhci_host *host); void sdhci_send_tuning(struct sdhci_host *host, u32 opcode); void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode); +void sdhci_switch_external_dma(struct sdhci_host *host, bool en); +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index b8fe94fd9525..3afea580fbea 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -505,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev) struct sdhci_am654_data *sdhci_am654; const struct of_device_id *match; struct sdhci_host *host; - struct resource *res; struct clk *clk_xin; struct device *dev = &pdev->dev; void __iomem *base; @@ -538,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_disable; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(base)) { ret = PTR_ERR(base); goto pm_runtime_put; diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index fa0dfc657c22..4625cc071b61 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ctrl = 0, ret = 0; struct f_sdhost_priv *priv; u32 reg = 0; @@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) host->ops = &sdhci_f_sdh30_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 98c575de43c7..7e1fd557109c 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host) host->chan_rx = sh_mmcif_request_dma_pdata(host, pdata->slave_id_rx); } else { - host->chan_tx = dma_request_slave_channel(dev, "tx"); - host->chan_rx = dma_request_slave_channel(dev, "rx"); + host->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->chan_tx)) + host->chan_tx = NULL; + host->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->chan_rx)) + host->chan_rx = NULL; } dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, host->chan_rx); @@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev) struct sh_mmcif_host *host; struct device *dev = &pdev->dev; struct sh_mmcif_plat_data *pd = dev->platform_data; - struct resource *res; void __iomem *reg; const char *name; @@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) if (irq[0] < 0) return -ENXIO; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(dev, res); + reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) return PTR_ERR(reg); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d577a6b0ceae..f87d7967457f 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, if (ret) return ret; - host->reg_base = devm_ioremap_resource(&pdev->dev, - platform_get_resource(pdev, IORESOURCE_MEM, 0)); + host->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->reg_base)) return PTR_ERR(host->reg_base); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index c4a1d49fbea4..1e424bcdbd5f 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, { struct tmio_mmc_host *host; struct mmc_host *mmc; - struct resource *res; void __iomem *ctl; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctl = devm_ioremap_resource(&pdev->dev, res); + ctl = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctl)) return ERR_CAST(ctl); @@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) * Look for a card detect GPIO, if it fails with anything * else than a probe deferral, just live without it. */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 0c72ec5546c3..a1683c49cb90 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -59,7 +59,6 @@ struct uniphier_sd_priv { struct tmio_mmc_data tmio_data; struct pinctrl *pinctrl; - struct pinctrl_state *pinstate_default; struct pinctrl_state *pinstate_uhs; struct clk *clk; struct reset_control *rst; @@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, { struct tmio_mmc_host *host = mmc_priv(mmc); struct uniphier_sd_priv *priv = uniphier_sd_priv(host); - struct pinctrl_state *pinstate; + struct pinctrl_state *pinstate = NULL; u32 val, tmp; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: val = UNIPHIER_SD_VOLT_330; - pinstate = priv->pinstate_default; break; case MMC_SIGNAL_VOLTAGE_180: val = UNIPHIER_SD_VOLT_180; @@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); writel(tmp, host->ctl + UNIPHIER_SD_VOLT); - pinctrl_select_state(priv->pinctrl, pinstate); + if (pinstate) + pinctrl_select_state(priv->pinctrl, pinstate); + else + pinctrl_select_default_state(mmc_dev(mmc)); return 0; } @@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, if (IS_ERR(priv->pinctrl)) return PTR_ERR(priv->pinctrl); - priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(priv->pinstate_default)) - return PTR_ERR(priv->pinstate_default); - priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); if (IS_ERR(priv->pinstate_uhs)) return PTR_ERR(priv->pinstate_uhs); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index b11ac2314328..9a0b1e4e405d 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -199,7 +199,6 @@ struct usdhi6_host { /* Pin control */ struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_uhs; }; @@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) }; int ret; - host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); + host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, host->chan_tx); - if (!host->chan_tx) + if (IS_ERR(host->chan_tx)) { + host->chan_tx = NULL; return; + } cfg.direction = DMA_MEM_TO_DEV; cfg.dst_addr = start + USDHI6_SD_BUF0; @@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) if (ret < 0) goto e_release_tx; - host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); + host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, host->chan_rx); - if (!host->chan_rx) + if (IS_ERR(host->chan_rx)) { + host->chan_rx = NULL; goto e_release_tx; + } cfg.direction = DMA_DEV_TO_MEM; cfg.src_addr = cfg.dst_addr; @@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage) host->pins_uhs); default: - return pinctrl_select_state(host->pinctrl, - host->pins_default); + return pinctrl_select_default_state(mmc_dev(host->mmc)); } } @@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev) } host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); - if (!IS_ERR(host->pins_uhs)) { - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - - if (IS_ERR(host->pins_default)) { - dev_err(dev, - "UHS pinctrl requires a default pin state.\n"); - ret = PTR_ERR(host->pins_default); - goto e_free_mmc; - } - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->base = devm_ioremap_resource(dev, res); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index f4ac064ff471..e48bddd95ce6 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1106,7 +1106,7 @@ static int via_sd_probe(struct pci_dev *pcidev, len = pci_resource_len(pcidev, 0); base = pci_resource_start(pcidev, 0); - sdhost->mmiobase = ioremap_nocache(base, len); + sdhost->mmiobase = ioremap(base, len); if (!sdhost->mmiobase) { ret = -ENOMEM; goto free_mmc_host; diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c index eccf2e5d905e..3af50db8b21b 100644 --- a/drivers/mtd/devices/bcm47xxsflash.c +++ b/drivers/mtd/devices/bcm47xxsflash.c @@ -320,7 +320,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) * ChipCommon revision. */ if (b47s->bcma_cc->core->id.rev == 54) - b47s->window = ioremap_nocache(res->start, resource_size(res)); + b47s->window = ioremap(res->start, resource_size(res)); else b47s->window = ioremap_cache(res->start, resource_size(res)); if (!b47s->window) { diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index 462fadb56bdb..42a95ba40f2c 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c @@ -163,7 +163,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev, /* FIXME handle registers 0x80 - 0x8C the bios region locks */ /* For write accesses caches are useless */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index c9b7b4d5a923..460494212f6a 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c @@ -191,7 +191,7 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev, /* FIXME handle registers 0x80 - 0x8C the bios region locks */ /* For write accesses caches are useless */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c index 5c27c6994896..85e14150a073 100644 --- a/drivers/mtd/maps/esb2rom.c +++ b/drivers/mtd/maps/esb2rom.c @@ -249,7 +249,7 @@ static int __init esb2rom_init_one(struct pci_dev *pdev, } /* Map the firmware hub into my address space. */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c index 6b989f391baa..fda72c5fd8f9 100644 --- a/drivers/mtd/maps/ichxrom.c +++ b/drivers/mtd/maps/ichxrom.c @@ -184,7 +184,7 @@ static int __init ichxrom_init_one(struct pci_dev *pdev, } /* Map the firmware hub into my address space. */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index 69503aef981e..d67b845b0e89 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c @@ -133,7 +133,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p) if (win_len < (CS0_START + CS0_SIZE)) return -ENXIO; - p->csr_base = ioremap_nocache(csr_phys, csr_len); + p->csr_base = ioremap(csr_phys, csr_len); if (!p->csr_base) return -ENOMEM; @@ -152,7 +152,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p) p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2; p->map.phys = win_phys + CS0_START; p->map.size = CS0_SIZE; - p->map.virt = ioremap_nocache(p->map.phys, p->map.size); + p->map.virt = ioremap(p->map.phys, p->map.size); if (!p->map.virt) { err = -ENOMEM; goto release; diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c index 0eeadfeb620d..832b880d1aaf 100644 --- a/drivers/mtd/maps/l440gx.c +++ b/drivers/mtd/maps/l440gx.c @@ -78,7 +78,7 @@ static int __init init_l440gx(void) return -ENODEV; } - l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE); + l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); if (!l440gx_map.virt) { printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c index abc52b70bb00..0bb651624f05 100644 --- a/drivers/mtd/maps/netsc520.c +++ b/drivers/mtd/maps/netsc520.c @@ -82,10 +82,10 @@ static int __init init_netsc520(void) printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n", (unsigned long long)netsc520_map.size, (unsigned long long)netsc520_map.phys); - netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size); + netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size); if (!netsc520_map.virt) { - printk("Failed to ioremap_nocache\n"); + printk("Failed to ioremap\n"); return -EIO; } diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c index 50046d497398..7d349874ffeb 100644 --- a/drivers/mtd/maps/nettel.c +++ b/drivers/mtd/maps/nettel.c @@ -176,7 +176,7 @@ static int __init nettel_init(void) #endif int rc = 0; - nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096); + nettel_mmcrp = (void *) ioremap(0xfffef000, 4096); if (nettel_mmcrp == NULL) { printk("SNAPGEAR: failed to disable MMCR cache??\n"); return(-EIO); @@ -217,7 +217,7 @@ static int __init nettel_init(void) __asm__ ("wbinvd"); nettel_amd_map.phys = amdaddr; - nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize); + nettel_amd_map.virt = ioremap(amdaddr, maxsize); if (!nettel_amd_map.virt) { printk("SNAPGEAR: failed to ioremap() BOOTCS\n"); iounmap(nettel_mmcrp); @@ -303,7 +303,7 @@ static int __init nettel_init(void) /* Probe for the size of the first Intel flash */ nettel_intel_map.size = maxsize; nettel_intel_map.phys = intel0addr; - nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); + nettel_intel_map.virt = ioremap(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1\n"); rc = -EIO; @@ -337,7 +337,7 @@ static int __init nettel_init(void) iounmap(nettel_intel_map.virt); nettel_intel_map.size = maxsize; - nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); + nettel_intel_map.virt = ioremap(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n"); rc = -EIO; diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index 9a49f8a06fb8..377ef0fc4e3e 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -94,7 +94,7 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map) map->map.write = mtd_pci_write8, map->map.size = 0x00800000; - map->base = ioremap_nocache(pci_resource_start(dev, 0), + map->base = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!map->base) @@ -188,7 +188,7 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map) map->map.read = mtd_pci_read32, map->map.write = mtd_pci_write32, map->map.size = len; - map->base = ioremap_nocache(base, len); + map->base = ioremap(base, len); if (!map->base) return -ENOMEM; diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c index 03af2df90d47..9902b37e18b4 100644 --- a/drivers/mtd/maps/sc520cdp.c +++ b/drivers/mtd/maps/sc520cdp.c @@ -174,8 +174,8 @@ static void sc520cdp_setup_par(void) int i, j; /* map in SC520's MMCR area */ - mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT); - if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */ + mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT); + if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */ /* force physical address fields to BIOS defaults: */ for(i = 0; i < NUM_FLASH_BANKS; i++) sc520cdp_map[i].phys = par_table[i].default_address; @@ -225,10 +225,10 @@ static int __init init_sc520cdp(void) (unsigned long long)sc520cdp_map[i].size, (unsigned long long)sc520cdp_map[i].phys); - sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size); + sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size); if (!sc520cdp_map[i].virt) { - printk("Failed to ioremap_nocache\n"); + printk("Failed to ioremap\n"); for (j = 0; j < i; j++) { if (mymtd[j]) { map_destroy(mymtd[j]); diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c index 2afb253bf456..57303f904bc1 100644 --- a/drivers/mtd/maps/scb2_flash.c +++ b/drivers/mtd/maps/scb2_flash.c @@ -152,7 +152,7 @@ static int scb2_flash_probe(struct pci_dev *dev, } /* remap the IO window (w/o caching) */ - scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW); + scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW); if (!scb2_ioaddr) { printk(KERN_ERR MODNAME ": Failed to ioremap window!\n"); if (!region_fail) diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c index 6cfc8783c0e5..70d6e865f555 100644 --- a/drivers/mtd/maps/ts5500_flash.c +++ b/drivers/mtd/maps/ts5500_flash.c @@ -56,10 +56,10 @@ static int __init init_ts5500_map(void) { int rc = 0; - ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size); + ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size); if (!ts5500_map.virt) { - printk(KERN_ERR "Failed to ioremap_nocache\n"); + printk(KERN_ERR "Failed to ioremap\n"); rc = -EIO; goto err2; } diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index e10b76089048..75eb3e97fae3 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -404,7 +404,7 @@ static int au1550nd_probe(struct platform_device *pdev) goto out1; } - ctx->base = ioremap_nocache(r->start, 0x1000); + ctx->base = ioremap(r->start, 0x1000); if (!ctx->base) { dev_err(&pdev->dev, "cannot remap NAND memory area\n"); ret = -ENODEV; diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c index d62aa5271753..2f77ee55e1bf 100644 --- a/drivers/mtd/nand/raw/denali_pci.c +++ b/drivers/mtd/nand/raw/denali_pci.c @@ -74,15 +74,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return ret; } - denali->reg = ioremap_nocache(csr_base, csr_len); + denali->reg = ioremap(csr_base, csr_len); if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); return -ENOMEM; } - denali->host = ioremap_nocache(mem_base, mem_len); + denali->host = ioremap(mem_base, mem_len); if (!denali->host) { - dev_err(&dev->dev, "Spectra: ioremap_nocache failed!"); + dev_err(&dev->dev, "Spectra: ioremap failed!"); ret = -ENOMEM; goto out_unmap_reg; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 1054cc070747..f31fae3a4c68 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -285,7 +285,7 @@ static int fun_probe(struct platform_device *ofdev) fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN | FSL_UPM_WAIT_WRITE_BYTE; - fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, + fun->io_base = devm_ioremap(&ofdev->dev, io_res.start, resource_size(&io_res)); if (!fun->io_base) { ret = -ENOMEM; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index c8e1a04ba384..9df2007b5e56 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1302,7 +1302,7 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_put; } - addr = ioremap_nocache(res->start, resource_size(res)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { err = -ENOMEM; goto exit_release; diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index b9047d8110d5..194c86e0f340 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -175,7 +175,7 @@ static int cc770_isa_probe(struct platform_device *pdev) err = -EBUSY; goto exit; } - base = ioremap_nocache(mem[idx], iosize); + base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index 1c4d32d1a542..d513fac50718 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -130,7 +130,7 @@ static int sja1000_isa_probe(struct platform_device *pdev) err = -EBUSY; goto exit; } - base = ioremap_nocache(mem[idx], iosize); + base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index ff5a96f34085..d7222ba46622 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -229,7 +229,7 @@ static int sp_probe(struct platform_device *pdev) resource_size(res_mem), DRV_NAME)) return -EBUSY; - addr = devm_ioremap_nocache(&pdev->dev, res_mem->start, + addr = devm_ioremap(&pdev->dev, res_mem->start, resource_size(res_mem)); if (!addr) return -ENOMEM; diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 8242fb287cbb..d1ddf763b188 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -777,7 +777,7 @@ static int softing_pdev_probe(struct platform_device *pdev) goto platform_resource_failed; card->dpram_phys = pres->start; card->dpram_size = resource_size(pres); - card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size); + card->dpram = ioremap(card->dpram_phys, card->dpram_size); if (!card->dpram) { dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); goto ioremap_failed; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 80ef3e15bd22..9daef4c8feef 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1791,7 +1791,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) sdev->is_fiber = slic_is_fiber(pdev->subsystem_device); sdev->pdev = pdev; sdev->netdev = dev; - sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0), + sdev->regs = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!sdev->regs) { dev_err(&pdev->dev, "failed to map registers\n"); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 4cd53fc338b5..1671c1f36691 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1332,10 +1332,10 @@ static int request_and_map(struct platform_device *pdev, const char *name, return -EBUSY; } - *ptr = devm_ioremap_nocache(device, region->start, + *ptr = devm_ioremap(device, region->start, resource_size(region)); if (*ptr == NULL) { - dev_err(device, "ioremap_nocache of %s failed!", name); + dev_err(device, "ioremap of %s failed!", name); return -ENOMEM; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 255847e7fa5b..089a4fbc61a0 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1150,7 +1150,7 @@ static int au1000_probe(struct platform_device *pdev) /* aup->mac is the base address of the MAC's registers */ aup->mac = (struct mac_reg *) - ioremap_nocache(base->start, resource_size(base)); + ioremap(base->start, resource_size(base)); if (!aup->mac) { dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); err = -ENXIO; @@ -1158,7 +1158,7 @@ static int au1000_probe(struct platform_device *pdev) } /* Setup some variables for quick register address access */ - aup->enable = (u32 *)ioremap_nocache(macen->start, + aup->enable = (u32 *)ioremap(macen->start, resource_size(macen)); if (!aup->enable) { dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); @@ -1167,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev) } aup->mac_id = pdev->id; - aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); + aup->macdma = ioremap(macdma->start, resource_size(macdma)); if (!aup->macdma) { dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n"); err = -ENXIO; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 2bb329606794..6b27af0db499 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -253,7 +253,7 @@ static int aq_pci_probe(struct pci_dev *pdev, goto err_free_aq_hw; } - self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); + self->aq_hw->mmio = ioremap(mmio_pa, reg_sz); if (!self->aq_hw->mmio) { err = -EIO; goto err_free_aq_hw; diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 5ce2df482d8c..e95687a780fb 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1679,8 +1679,7 @@ static int ag71xx_probe(struct platform_device *pdev) goto err_free; } - ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); + ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ag->mac_base) { err = -ENOMEM; goto err_free; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 741d865e4afc..1c26fa962233 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14053,7 +14053,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = -ENOMEM; goto init_one_freemem; } - bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + bp->doorbells = ioremap(pci_resource_start(pdev, 2), doorbell_size); } if (!bp->doorbells) { diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 80ff52527233..5b4568c2ad1c 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2537,7 +2537,7 @@ static int sbmac_probe(struct platform_device *pldev) res = platform_get_resource(pldev, IORESOURCE_MEM, 0); BUG_ON(!res); - sbm_base = ioremap_nocache(res->start, resource_size(res)); + sbm_base = ioremap(res->start, resource_size(res)); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", dev_name(&pldev->dev)); diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index e338272931d1..01a50a4b2113 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3477,7 +3477,7 @@ bnad_init(struct bnad *bnad, bnad->pcidev = pdev; bnad->mmio_start = pci_resource_start(pdev, 0); bnad->mmio_len = pci_resource_len(pdev, 0); - bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); + bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len); if (!bnad->bar0) { dev_err(&pdev->dev, "ioremap for bar0 failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 97ff8608f0ab..883cfa9c4b6d 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3267,7 +3267,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_adapter; } - adapter->regs = ioremap_nocache(mmio_start, mmio_len); + adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); err = -ENOMEM; diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index fd3c2abf74b5..d305d1b24b0a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -2039,7 +2039,7 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* remap CSR registers */ - regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); + regs = ioremap(pciaddr, DE_REGS_SIZE); if (!regs) { rc = -EIO; pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 66406da16b60..a817ca661c1f 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1087,7 +1087,7 @@ static int ethoc_probe(struct platform_device *pdev) priv = netdev_priv(netdev); priv->netdev = netdev; - priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, + priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr, resource_size(mmio)); if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); @@ -1096,7 +1096,7 @@ static int ethoc_probe(struct platform_device *pdev) } if (netdev->mem_end) { - priv->membase = devm_ioremap_nocache(&pdev->dev, + priv->membase = devm_ioremap(&pdev->dev, netdev->mem_start, resource_size(mem)); if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 6436a98c5953..22f5887578b2 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -91,10 +91,10 @@ static int sni_82596_probe(struct platform_device *dev) idprom = platform_get_resource(dev, IORESOURCE_MEM, 2); if (!res || !ca || !options || !idprom) return -ENODEV; - mpu_addr = ioremap_nocache(res->start, 4); + mpu_addr = ioremap(res->start, 4); if (!mpu_addr) return -ENOMEM; - ca_addr = ioremap_nocache(ca->start, 4); + ca_addr = ioremap(ca->start, 4); if (!ca_addr) goto probe_failed_free_mpu; @@ -110,7 +110,7 @@ static int sni_82596_probe(struct platform_device *dev) netdevice->base_addr = res->start; netdevice->irq = platform_get_irq(dev, 0); - eth_addr = ioremap_nocache(idprom->start, 0x10); + eth_addr = ioremap(idprom->start, 0x10); if (!eth_addr) goto probe_failed; diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index f98d9d627c71..f1d84921e42b 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1043,7 +1043,7 @@ static int korina_probe(struct platform_device *pdev) r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); dev->base_addr = r->start; - lp->eth_regs = ioremap_nocache(r->start, resource_size(r)); + lp->eth_regs = ioremap(r->start, resource_size(r)); if (!lp->eth_regs) { printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); rc = -ENXIO; @@ -1051,7 +1051,7 @@ static int korina_probe(struct platform_device *pdev) } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); - lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r)); + lp->rx_dma_regs = ioremap(r->start, resource_size(r)); if (!lp->rx_dma_regs) { printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); rc = -ENXIO; @@ -1059,7 +1059,7 @@ static int korina_probe(struct platform_device *pdev) } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); - lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r)); + lp->tx_dma_regs = ioremap(r->start, resource_size(r)); if (!lp->tx_dma_regs) { printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); rc = -ENXIO; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 578c31697cc0..2d0c52f7106b 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -642,7 +642,7 @@ ltq_etop_probe(struct platform_device *pdev) goto err_out; } - ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, + ltq_etop_membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ltq_etop_membase) { dev_err(&pdev->dev, "failed to remap etop engine %d\n", diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 8ca15958e752..97f270d30cce 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3932,7 +3932,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&hw->phy_lock); tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); - hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); goto err_out_free_hw; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index acd1cba987fb..ebfd0ceac884 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5022,7 +5022,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->pdev = pdev; sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); - hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); goto err_out_free_hw; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index be5f62f06785..8e24c7acf79b 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1937,7 +1937,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev, pci_set_master(pci_dev); addr = pci_resource_start(pci_dev, 1); - dev->base = ioremap_nocache(addr, PAGE_SIZE); + dev->base = ioremap(addr, PAGE_SIZE); dev->tx_descs = pci_alloc_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs); dev->rx_info.descs = pci_alloc_consistent(pci_dev, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index e4977cdf7678..c0e2f4394aef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code * the identical for PF and VF drivers. */ - ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR), + ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR), NFP_NET_CFG_BAR_SZ); if (!ctrl_bar) { dev_err(&pdev->dev, @@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; - vf->q_bar = ioremap_nocache(map_addr, bar_sz); + vf->q_bar = ioremap(map_addr, bar_sz); if (!vf->q_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; @@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, /* TX queues */ map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off; - nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz); + nn->tx_bar = ioremap(map_addr, tx_bar_sz); if (!nn->tx_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; @@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, /* RX queues */ map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off; - nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz); + nn->rx_bar = ioremap(map_addr, rx_bar_sz); if (!nn->rx_bar) { nn_err(nn, "Failed to map resource %d\n", rx_bar_no); err = -EIO; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 85d46f206b3c..b454db283aef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */ bar = &nfp->bar[0]; if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE) - bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { int pf; @@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) } bar = &nfp->bar[4 + i]; - bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { msg += snprintf(msg, end - msg, @@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area) priv->iomem = priv->bar->iomem + priv->bar_offset; else /* Must have been too big. Sub-allocate. */ - priv->iomem = ioremap_nocache(priv->phys, priv->size); + priv->iomem = ioremap(priv->phys, priv->size); if (IS_ERR_OR_NULL(priv->iomem)) { dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n", diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 397f3bc7da3b..52113b7529d6 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1229,7 +1229,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) } /* Shrink the original UC mapping of the memory BAR */ - membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); + membase = ioremap(efx->membase_phys, uc_mem_map_size); if (!membase) { netif_err(efx, probe, efx->net_dev, "could not shrink memory BAR to %x\n", diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index ab0ce62f81c1..b0d76bc19673 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1018,7 +1018,7 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, goto fail3; } - efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); + efx->membase = ioremap(efx->membase_phys, mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index bee4cd9d7135..42bcd34fc508 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -1265,7 +1265,7 @@ static int ef4_init_io(struct ef4_nic *efx) rc = -EIO; goto fail3; } - efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); + efx->membase = ioremap(efx->membase_phys, mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 7dc2154ccdad..49a6a9167af4 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2445,7 +2445,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) pdata = netdev_priv(dev); dev->irq = irq; - pdata->ioaddr = ioremap_nocache(res->start, res_size); + pdata->ioaddr = ioremap(res->start, res_size); if (!pdata->ioaddr) { retval = -ENOMEM; goto out_ioremap_fail; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 432645e86495..d7a144b4a09f 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2019,7 +2019,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, goto quit; } - efuse = devm_ioremap_nocache(dev, res.start, size); + efuse = devm_ioremap(dev, res.start, size); if (!efuse) { dev_err(dev, "could not map resource\n"); devm_release_mem_region(dev, res.start, size); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index f5bbb19d62d0..6f11f52c9a9e 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1191,7 +1191,7 @@ static int temac_probe(struct platform_device *pdev) /* map device registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lp->regs = devm_ioremap_nocache(&pdev->dev, res->start, + lp->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map TEMAC registers\n"); @@ -1285,7 +1285,7 @@ static int temac_probe(struct platform_device *pdev) } else if (pdata) { /* 2nd memory resource specifies DMA registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start, + lp->sdma_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(lp->sdma_regs)) { dev_err(&pdev->dev, diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 56b7791911bf..077c68498f04 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -614,7 +614,7 @@ static int dfx_register(struct device *bdev) /* Set up I/O base address. */ if (dfx_use_mmio) { - bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]); + bp->base.mem = ioremap(bar_start[0], bar_len[0]); if (!bp->base.mem) { printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); err = -ENOMEM; diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 060712c666bf..eaf85db53a5e 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -1318,7 +1318,7 @@ static int fza_probe(struct device *bdev) } /* MMIO mapping setup. */ - mmio = ioremap_nocache(start, len); + mmio = ioremap(start, len); if (!mmio) { pr_err("%s: cannot map MMIO\n", fp->name); ret = -ENOMEM; diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index 8a4fbfacad7e..065bb0a40b1d 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -40,7 +40,7 @@ static u8 *fjes_hw_iomap(struct fjes_hw *hw) return NULL; } - base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size); + base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size); return base; } diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h index c611b6a80b20..9237b69d8e21 100644 --- a/drivers/net/fjes/fjes_trace.h +++ b/drivers/net/fjes/fjes_trace.h @@ -28,7 +28,7 @@ TRACE_EVENT(fjes_hw_issue_request_command, __field(u8, cs_busy) __field(u8, cs_complete) __field(int, timeout) - __field(int, ret); + __field(int, ret) ), TP_fast_assign( __entry->cr_req = cr->bits.req_code; diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index 34e94ee806d6..23f93f1c815d 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -635,7 +635,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, /* set up PLX mapping */ plx_phy = pci_resource_start(pdev, 0); - card->plx = ioremap_nocache(plx_phy, 0x70); + card->plx = ioremap(plx_phy, 0x70); if (!card->plx) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); @@ -704,7 +704,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, PCI_DMA_FROMDEVICE); } - mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware)); + mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware)); if (!mem) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index f80854180e21..ed87bc00f2aa 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -458,7 +458,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar) ar_ahb->mem_len = resource_size(res); - ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE, + ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE, ATH10K_GCC_REG_SIZE); if (!ar_ahb->gcc_mem) { ath10k_err(ar, "gcc mem ioremap error\n"); @@ -466,7 +466,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar) goto err_mem_unmap; } - ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE, + ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE, ATH10K_TCSR_REG_SIZE); if (!ar_ahb->tcsr_mem) { ath10k_err(ar, "tcsr mem ioremap error\n"); diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index ab916459d237..842e42ec814f 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -239,7 +239,7 @@ TRACE_EVENT(ath10k_wmi_dbglog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) - __field(u8, hw_type); + __field(u8, hw_type) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), @@ -269,7 +269,7 @@ TRACE_EVENT(ath10k_htt_pktlog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) - __field(u8, hw_type); + __field(u8, hw_type) __field(u16, buf_len) __dynamic_array(u8, pktlog, buf_len) ), @@ -435,7 +435,7 @@ TRACE_EVENT(ath10k_htt_rx_desc, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) - __field(u8, hw_type); + __field(u8, hw_type) __field(u16, len) __dynamic_array(u8, rxdesc, len) ), diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index c0794f5988b3..2c9cec8b53d9 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -106,7 +106,7 @@ static int ath_ahb_probe(struct platform_device *pdev) goto err_out; } - mem = ioremap_nocache(res->start, resource_size(res)); + mem = ioremap(res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 63019c3de034..cdefb8e2daf1 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -92,7 +92,7 @@ static int ath_ahb_probe(struct platform_device *pdev) return -ENXIO; } - mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); + mem = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 7ac72804e285..5105f62767fb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1643,8 +1643,8 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) return -EINVAL; } - devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); - devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size); + devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); + devinfo->tcm = ioremap(bar1_addr, bar1_size); if (!devinfo->regs || !devinfo->tcm) { brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs, diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 3c505636b7cc..536cd729c086 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -6168,7 +6168,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, ioaddr = pci_iomap(pci_dev, 0, 0); if (!ioaddr) { printk(KERN_WARNING DRV_NAME - "Error calling ioremap_nocache.\n"); + "Error calling ioremap.\n"); err = -EIO; goto fail; } diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 53b5a4b2dcc5..59c187898132 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -281,8 +281,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno) { struct mt76_rx_tid *tid = NULL; - rcu_swap_protected(wcid->aggr[tidno], tid, - lockdep_is_held(&dev->mutex)); + tid = rcu_replace_pointer(wcid->aggr[tidno], tid, + lockdep_is_held(&dev->mutex)); if (tid) { mt76_rx_aggr_shutdown(dev, tid); kfree_rcu(tid, rcu_head); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index c6439638a419..b9358db83e96 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config NVME_CORE tristate + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY config BLK_DEV_NVME tristate "NVM Express block device" diff --git a/drivers/opp/core.c b/drivers/opp/core.c index be7a7d332332..ba43e6a3dc0a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -988,7 +988,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); INIT_LIST_HEAD(&opp_table->opp_list); kref_init(&opp_table->kref); - kref_init(&opp_table->list_kref); /* Secure the device table modification */ list_add(&opp_table->node, &opp_tables); @@ -1072,33 +1071,6 @@ static void _opp_table_kref_release(struct kref *kref) mutex_unlock(&opp_table_lock); } -void _opp_remove_all_static(struct opp_table *opp_table) -{ - struct dev_pm_opp *opp, *tmp; - - list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { - if (!opp->dynamic) - dev_pm_opp_put(opp); - } - - opp_table->parsed_static_opps = false; -} - -static void _opp_table_list_kref_release(struct kref *kref) -{ - struct opp_table *opp_table = container_of(kref, struct opp_table, - list_kref); - - _opp_remove_all_static(opp_table); - mutex_unlock(&opp_table_lock); -} - -void _put_opp_list_kref(struct opp_table *opp_table) -{ - kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release, - &opp_table_lock); -} - void dev_pm_opp_put_opp_table(struct opp_table *opp_table) { kref_put_mutex(&opp_table->kref, _opp_table_kref_release, @@ -1202,6 +1174,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +void _opp_remove_all_static(struct opp_table *opp_table) +{ + struct dev_pm_opp *opp, *tmp; + + mutex_lock(&opp_table->lock); + + if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps) + goto unlock; + + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { + if (!opp->dynamic) + dev_pm_opp_put_unlocked(opp); + } + +unlock: + mutex_unlock(&opp_table->lock); +} + /** * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs * @dev: device for which we do this operation @@ -2276,7 +2266,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev) return; } - _put_opp_list_kref(opp_table); + _opp_remove_all_static(opp_table); /* Drop reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1cbb58240b80..9cd8f0adacae 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp; /* OPP table is already initialized for the device */ + mutex_lock(&opp_table->lock); if (opp_table->parsed_static_opps) { - kref_get(&opp_table->list_kref); + opp_table->parsed_static_opps++; + mutex_unlock(&opp_table->lock); return 0; } - /* - * Re-initialize list_kref every time we add static OPPs to the OPP - * table as the reference count may be 0 after the last tie static OPPs - * were removed. - */ - kref_init(&opp_table->list_kref); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -678,15 +676,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); of_node_put(np); - return ret; + goto remove_static_opp; } else if (opp) { count++; } } /* There should be one of more OPP defined */ - if (WARN_ON(!count)) - return -ENOENT; + if (WARN_ON(!count)) { + ret = -ENOENT; + goto remove_static_opp; + } list_for_each_entry(opp, &opp_table->opp_list, node) pstate_count += !!opp->pstate; @@ -695,15 +695,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) if (pstate_count && pstate_count != count) { dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); - return -ENOENT; + ret = -ENOENT; + goto remove_static_opp; } if (pstate_count) opp_table->genpd_performance_state = true; - opp_table->parsed_static_opps = true; - return 0; + +remove_static_opp: + _opp_remove_all_static(opp_table); + + return ret; } /* Initializes OPP tables based on old-deprecated bindings */ @@ -738,6 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", __func__, freq, ret); + _opp_remove_all_static(opp_table); return ret; } nr -= 2; diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 01a500e2c40a..d14e27102730 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -127,11 +127,10 @@ enum opp_table_access { * @dev_list: list of devices that share these OPPs * @opp_list: table of opps * @kref: for reference count of the table. - * @list_kref: for reference count of the OPP list. * @lock: mutex protecting the opp_list and dev_list. * @np: struct device_node pointer for opp's DT node. * @clock_latency_ns_max: Max clock latency in nanoseconds. - * @parsed_static_opps: True if OPPs are initialized from DT. + * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. * @suspend_opp: Pointer to OPP to be used during device suspend. * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. @@ -167,7 +166,6 @@ struct opp_table { struct list_head dev_list; struct list_head opp_list; struct kref kref; - struct kref list_kref; struct mutex lock; struct device_node *np; @@ -176,7 +174,7 @@ struct opp_table { /* For backward compatibility with v1 bindings */ unsigned int voltage_tolerance_v1; - bool parsed_static_opps; + unsigned int parsed_static_opps; enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp; diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 1c69c404df11..e3357e91decb 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -90,7 +90,7 @@ static int _store_optimized_voltages(struct device *dev, goto out_map; } - base = ioremap_nocache(res->start, resource_size(res)); + base = ioremap(res->start, resource_size(res)); if (!base) { dev_err(dev, "Unable to map Efuse registers\n"); ret = -ENOMEM; diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index ad290f79983b..a5507f75b524 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1534,7 +1534,7 @@ static int __init ccio_probe(struct parisc_device *dev) *ioc_p = ioc; ioc->hw_path = dev->hw_path; - ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096); + ioc->ioc_regs = ioremap(dev->hpa.start, 4096); if (!ioc->ioc_regs) { kfree(ioc); return -ENOMEM; diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 2f1cac89ddf5..889d7ce282eb 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -974,7 +974,7 @@ static int __init dino_probe(struct parisc_device *dev) } dino_dev->hba.dev = dev; - dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); + dino_dev->hba.base_addr = ioremap(hpa, 4096); dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND; spin_lock_init(&dino_dev->dinosaur_pen); dino_dev->hba.iommu = ccio_get_iommu(dev); diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 37a2c5db761d..9d00a24277aa 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -354,10 +354,10 @@ static int __init eisa_probe(struct parisc_device *dev) eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR; } } - eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); + eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); if (!eisa_eeprom_addr) { result = -ENOMEM; - printk(KERN_ERR "EISA: ioremap_nocache failed!\n"); + printk(KERN_ERR "EISA: ioremap failed!\n"); goto error_free_irq; } result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 32f506f00c89..8a3b0c3a1e92 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -927,7 +927,7 @@ void *iosapic_register(unsigned long hpa) return NULL; } - isi->addr = ioremap_nocache(hpa, 4096); + isi->addr = ioremap(hpa, 4096); isi->isi_hpa = hpa; isi->isi_version = iosapic_rd_version(isi); isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1; diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index a99e385c68bd..732b516c7bf8 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -1134,7 +1134,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) ** Postable I/O port space is per PCI host adapter. ** base of 64MB PIOP region */ - lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024); + lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024); sprintf(lba_dev->hba.io_name, "PCI%02x Ports", (int)lba_dev->hba.bus_num.start); @@ -1476,7 +1476,7 @@ lba_driver_probe(struct parisc_device *dev) u32 func_class; void *tmp_obj; char *version; - void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096); + void __iomem *addr = ioremap(dev->hpa.start, 4096); int max; /* Read HW Rev First */ @@ -1575,7 +1575,7 @@ lba_driver_probe(struct parisc_device *dev) } else { if (!astro_iop_base) { /* Sprockets PDC uses NPIOP region */ - astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024); + astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024); pci_port = &lba_astro_port_ops; } @@ -1693,7 +1693,7 @@ void __init lba_init(void) */ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) { - void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096); + void __iomem * base_addr = ioremap(lba->hpa.start, 4096); imask <<= 2; /* adjust for hints - 2 more bits */ diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index de8e4e347249..7e112829d250 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1513,7 +1513,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset) { - return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); + return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); } static void sba_hw_init(struct sba_device *sba_dev) @@ -1883,7 +1883,7 @@ static int __init sba_driver_callback(struct parisc_device *dev) u32 func_class; int i; char *version; - void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); + void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE); #ifdef CONFIG_PROC_FS struct proc_dir_entry *root; #endif diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index b20651cea09f..9bf7fa99b103 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -719,7 +719,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); - base = devm_ioremap_nocache(dev, res->start, resource_size(res)); + base = devm_ioremap(dev, res->start, resource_size(res)); if (!base) return -ENOMEM; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 3dd2e2697294..cfeccd7e9fff 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -434,7 +434,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; - msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr, + msix_tbl = ioremap(ep->phys_base + tbl_addr, PCI_MSIX_ENTRY_SIZE); if (!msix_tbl) return -EINVAL; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index c7709e49f0e4..6b43a5455c7a 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -688,7 +688,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) table_offset &= PCI_MSIX_TABLE_OFFSET; phys_addr = pci_resource_start(dev, bir) + table_offset; - return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); + return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); } static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e87196cc1a7f..df21e3227b57 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -184,7 +184,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res); return NULL; } - return ioremap_nocache(res->start, resource_size(res)); + return ioremap(res->start, resource_size(res)); } EXPORT_SYMBOL_GPL(pci_ioremap_bar); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fbeb9f73ef28..a3a1a0ea64f4 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1571,7 +1571,7 @@ static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev) pci_read_config_dword(dev, 0xF0, &rcba); /* use bits 31:14, 16 kB aligned */ - asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); + asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000); if (asus_rcba_base == NULL) return; } @@ -4784,7 +4784,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev) if (!(rcba & INTEL_LPC_RCBA_ENABLE)) return -EINVAL; - rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK, + rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK, PAGE_ALIGN(INTEL_UPDCR_REG)); if (!rcba_mem) return -ENOMEM; diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 55083c67b2bb..95dca2cb5265 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); - goto ddr_perf_err; + goto cpuhp_state_err; } pmu->cpuhp_state = ret; /* Register the pmu instance for cpu hotplug */ - cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + goto cpuhp_instance_err; + } /* Request irq */ irq = of_irq_get(np, 0); @@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev) return 0; ddr_perf_err: - if (pmu->cpuhp_state) - cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); - + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(pmu->cpuhp_state); +cpuhp_state_err: ida_simple_remove(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; @@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev) struct ddr_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); + cpuhp_remove_multi_state(pmu->cpuhp_state); irq_set_affinity_hint(pmu->irq, NULL); perf_pmu_unregister(&pmu->pmu); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 96183e31b96a..584de8f807cc 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu) hisi_pmu->ops->stop_counters(hisi_pmu); } + /* - * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1. - * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu - * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID - * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID - * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID - * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1]. + * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be + * determined from the MPIDR_EL1, but the encoding varies by CPU: + * + * - For MT variants of TSV110: + * SCCL is Aff2[7:3], CCL is Aff2[2:0] + * + * - For other MT parts: + * SCCL is Aff3[7:0], CCL is Aff2[7:0] + * + * - For non-MT parts: + * SCCL is Aff2[7:0], CCL is Aff1[7:0] */ -static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id) +static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) { u64 mpidr = read_cpuid_mpidr(); - - if (mpidr & MPIDR_MT_BITMASK) { - if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) { - int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (sccl_id) - *sccl_id = aff2 >> 3; - if (ccl_id) - *ccl_id = aff2 & 0x7; - } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - } + int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + bool mt = mpidr & MPIDR_MT_BITMASK; + int sccl, ccl; + + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + } else if (mt) { + sccl = aff3; + ccl = aff2; } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + sccl = aff2; + ccl = aff1; } + + if (scclp) + *scclp = sccl; + if (cclp) + *cclp = ccl; } /* diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c index 32f268f173d1..57044ab376d3 100644 --- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c @@ -1049,7 +1049,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -EINVAL; - pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, + pinctrl->base1 = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!pinctrl->base1) { dev_err(&pdev->dev, "unable to map I/O space\n"); diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c index 3756fc9d5826..f1d60a708815 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c @@ -578,7 +578,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -EINVAL; - pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, + pinctrl->base1 = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!pinctrl->base1) { dev_err(&pdev->dev, "unable to map I/O space\n"); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2bbd8ee93507..46600d9380ea 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1535,15 +1535,8 @@ int pinctrl_init_done(struct device *dev) return ret; } -#ifdef CONFIG_PM - -/** - * pinctrl_pm_select_state() - select pinctrl state for PM - * @dev: device to select default state for - * @state: state to set - */ -static int pinctrl_pm_select_state(struct device *dev, - struct pinctrl_state *state) +static int pinctrl_select_bound_state(struct device *dev, + struct pinctrl_state *state) { struct dev_pin_info *pins = dev->pins; int ret; @@ -1558,15 +1551,27 @@ static int pinctrl_pm_select_state(struct device *dev, } /** - * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * pinctrl_select_default_state() - select default pinctrl state * @dev: device to select default state for */ -int pinctrl_pm_select_default_state(struct device *dev) +int pinctrl_select_default_state(struct device *dev) { if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->default_state); + return pinctrl_select_bound_state(dev, dev->pins->default_state); +} +EXPORT_SYMBOL_GPL(pinctrl_select_default_state); + +#ifdef CONFIG_PM + +/** + * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * @dev: device to select default state for + */ +int pinctrl_pm_select_default_state(struct device *dev) +{ + return pinctrl_select_default_state(dev); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state); @@ -1579,7 +1584,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->sleep_state); + return pinctrl_select_bound_state(dev, dev->pins->sleep_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state); @@ -1592,7 +1597,7 @@ int pinctrl_pm_select_idle_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->idle_state); + return pinctrl_select_bound_state(dev, dev->pins->idle_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state); #endif diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 7e29e3fecdb2..c00d0022d311 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -611,7 +611,7 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, if (!res) return -ENOENT; - ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start, + ipctl->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ipctl->base) return -ENOMEM; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index eab078244a4c..73aff6591de2 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -866,7 +866,7 @@ static int amd_gpio_probe(struct platform_device *pdev) return -EINVAL; } - gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start, + gpio_dev->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!gpio_dev->base) return -ENOMEM; diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index 706207d192ae..77be37a1fbcf 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) item = pdata->items; for (i = 0; i < pdata->counter; i++, item++) { + if (item->capability) { + /* + * Read group capability register to get actual number + * of interrupt capable components and set group mask + * accordingly. + */ + ret = regmap_read(priv->regmap, item->capability, + ®val); + if (ret) + goto out; + + item->mask = GENMASK((regval & item->mask) - 1, 0); + } + /* Clear group presense event. */ ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF, 0); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 27d5b40fb717..587403c44598 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -997,7 +997,6 @@ config INTEL_SCU_IPC config INTEL_SCU_IPC_UTIL tristate "Intel SCU IPC utility driver" depends on INTEL_SCU_IPC - default y ---help--- The IPC Util driver provides an interface with the SCU enabling low level access for debug work and updating the firmware. Say @@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM depends on PCI && IOSF_MBI && PM help Power-management driver for Intel's Image Signal Processor found on - Bay and Cherry Trail devices. This dummy driver's sole purpose is to - turn the ISP off (put it in D3) to save power and to allow entering - of S0ix modes. + Bay Trail and Cherry Trail devices. This dummy driver's sole purpose + is to turn the ISP off (put it in D3) to save power and to allow + entering of S0ix modes. To compile this driver as a module, choose M here: the module will be called intel_atomisp2_pm. @@ -1337,6 +1336,17 @@ config PCENGINES_APU2 To compile this driver as a module, choose M here: the module will be called pcengines-apuv2. +config INTEL_UNCORE_FREQ_CONTROL + tristate "Intel Uncore frequency control driver" + depends on X86_64 + help + This driver allows control of uncore frequency limits on + supported server platforms. + Uncore frequency controls RING/LLC (last-level cache) clocks. + + To compile this driver as a module, choose M here: the module + will be called intel-uncore-frequency. + source "drivers/platform/x86/intel_speed_select_if/Kconfig" config SYSTEM76_ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 42d85a00be4e..3747b1f07cf1 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o +obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index b361c73636a4..6f12747a359a 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */ { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, { KE_IGNORE, 0x6E, }, /* Low Battery notification */ + { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */ { KE_KEY, 0x7c, { KEY_MICMUTE } }, { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */ diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 982f0cc8270c..43bb15e05529 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL"); #define NOTIFY_KBD_BRTDWN 0xc5 #define NOTIFY_KBD_BRTTOGGLE 0xc7 #define NOTIFY_KBD_FBM 0x99 +#define NOTIFY_KBD_TTP 0xae #define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0) @@ -81,6 +82,10 @@ MODULE_LICENSE("GPL"); #define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02 #define ASUS_FAN_BOOST_MODES_MASK 0x03 +#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0 +#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1 +#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2 + #define USB_INTEL_XUSB2PR 0xD0 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -198,6 +203,9 @@ struct asus_wmi { u8 fan_boost_mode_mask; u8 fan_boost_mode; + bool throttle_thermal_policy_available; + u8 throttle_thermal_policy_mode; + // The RSOC controls the maximum charging percentage. bool battery_rsoc_available; @@ -1718,6 +1726,107 @@ static ssize_t fan_boost_mode_store(struct device *dev, // Fan boost mode: 0 - normal, 1 - overboost, 2 - silent static DEVICE_ATTR_RW(fan_boost_mode); +/* Throttle thermal policy ****************************************************/ + +static int throttle_thermal_policy_check_present(struct asus_wmi *asus) +{ + u32 result; + int err; + + asus->throttle_thermal_policy_available = false; + + err = asus_wmi_get_devstate(asus, + ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + &result); + if (err) { + if (err == -ENODEV) + return 0; + return err; + } + + if (result & ASUS_WMI_DSTS_PRESENCE_BIT) + asus->throttle_thermal_policy_available = true; + + return 0; +} + +static int throttle_thermal_policy_write(struct asus_wmi *asus) +{ + int err; + u8 value; + u32 retval; + + value = asus->throttle_thermal_policy_mode; + + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + value, &retval); + if (err) { + pr_warn("Failed to set throttle thermal policy: %d\n", err); + return err; + } + + if (retval != 1) { + pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n", + retval); + return -EIO; + } + + return 0; +} + +static int throttle_thermal_policy_set_default(struct asus_wmi *asus) +{ + if (!asus->throttle_thermal_policy_available) + return 0; + + asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + return throttle_thermal_policy_write(asus); +} + +static int throttle_thermal_policy_switch_next(struct asus_wmi *asus) +{ + u8 new_mode = asus->throttle_thermal_policy_mode + 1; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + + asus->throttle_thermal_policy_mode = new_mode; + return throttle_thermal_policy_write(asus); +} + +static ssize_t throttle_thermal_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + u8 mode = asus->throttle_thermal_policy_mode; + + return scnprintf(buf, PAGE_SIZE, "%d\n", mode); +} + +static ssize_t throttle_thermal_policy_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int result; + u8 new_mode; + struct asus_wmi *asus = dev_get_drvdata(dev); + + result = kstrtou8(buf, 10, &new_mode); + if (result < 0) + return result; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + return -EINVAL; + + asus->throttle_thermal_policy_mode = new_mode; + throttle_thermal_policy_write(asus); + + return count; +} + +// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent +static DEVICE_ATTR_RW(throttle_thermal_policy); + /* Backlight ******************************************************************/ static int read_backlight_power(struct asus_wmi *asus) @@ -1999,6 +2108,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) return; } + if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) { + throttle_thermal_policy_switch_next(asus); + return; + } + if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle) return; @@ -2149,6 +2263,7 @@ static struct attribute *platform_attributes[] = { &dev_attr_lid_resume.attr, &dev_attr_als_enable.attr, &dev_attr_fan_boost_mode.attr, + &dev_attr_throttle_thermal_policy.attr, NULL }; @@ -2172,6 +2287,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, devid = ASUS_WMI_DEVID_ALS_ENABLE; else if (attr == &dev_attr_fan_boost_mode.attr) ok = asus->fan_boost_mode_available; + else if (attr == &dev_attr_throttle_thermal_policy.attr) + ok = asus->throttle_thermal_policy_available; if (devid != -1) ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); @@ -2431,6 +2548,12 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_fan_boost_mode; + err = throttle_thermal_policy_check_present(asus); + if (err) + goto fail_throttle_thermal_policy; + else + throttle_thermal_policy_set_default(asus); + err = asus_wmi_sysfs_init(asus->platform_device); if (err) goto fail_sysfs; @@ -2515,6 +2638,7 @@ fail_hwmon: fail_input: asus_wmi_sysfs_exit(asus->platform_device); fail_sysfs: +fail_throttle_thermal_policy: fail_fan_boost_mode: fail_platform: kfree(asus); diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ef6d4bd77b1a..43d590250228 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -19,6 +19,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung"); static const struct acpi_device_id intel_hid_ids[] = { + {"INT1051", 0}, {"INT33D5", 0}, {"", 0}, }; diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c new file mode 100644 index 000000000000..2b1a0734c3f8 --- /dev/null +++ b/drivers/platform/x86/intel-uncore-frequency.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Uncore Frequency Setting + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Provide interface to set MSR 620 at a granularity of per die. On CPU online, + * one control CPU is identified per die to read/write limit. This control CPU + * is changed, if the CPU state is changed to offline. When the last CPU is + * offline in a die then remove the sysfs object for that die. + * The majority of actual code is related to sysfs create and read/write + * attributes. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/suspend.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +#define MSR_UNCORE_RATIO_LIMIT 0x620 +#define UNCORE_FREQ_KHZ_MULTIPLIER 100000 + +/** + * struct uncore_data - Encapsulate all uncore data + * @stored_uncore_data: Last user changed MSR 620 value, which will be restored + * on system resume. + * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init + * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init + * @control_cpu: Designated CPU for a die to read/write + * @valid: Mark the data valid/invalid + * + * This structure is used to encapsulate all data related to uncore sysfs + * settings for a die/package. + */ +struct uncore_data { + struct kobject kobj; + u64 stored_uncore_data; + u32 initial_min_freq_khz; + u32 initial_max_freq_khz; + int control_cpu; + bool valid; +}; + +#define to_uncore_data(a) container_of(a, struct uncore_data, kobj) + +/* Max instances for uncore data, one for each die */ +static int uncore_max_entries __read_mostly; +/* Storage for uncore data for all instances */ +static struct uncore_data *uncore_instances; +/* Root of the all uncore sysfs kobjs */ +struct kobject uncore_root_kobj; +/* Stores the CPU mask of the target CPUs to use during uncore read/write */ +static cpumask_t uncore_cpu_mask; +/* CPU online callback register instance */ +static enum cpuhp_state uncore_hp_state __read_mostly; +/* Mutex to control all mutual exclusions */ +static DEFINE_MUTEX(uncore_lock); + +struct uncore_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *c, ssize_t count); +}; + +#define define_one_uncore_ro(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_uncore_rw(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +#define show_uncore_data(member_name) \ + static ssize_t show_##member_name(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct uncore_data *data = to_uncore_data(kobj); \ + return scnprintf(buf, PAGE_SIZE, "%u\n", \ + data->member_name); \ + } \ + define_one_uncore_ro(member_name) + +show_uncore_data(initial_min_freq_khz); +show_uncore_data(initial_max_freq_khz); + +/* Common function to read MSR 0x620 and read min/max */ +static int uncore_read_ratio(struct uncore_data *data, unsigned int *min, + unsigned int *max) +{ + u64 cap; + int ret; + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + return ret; + + *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; + *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; + + return 0; +} + +/* Common function to set min/max ratios to be used by sysfs callbacks */ +static int uncore_write_ratio(struct uncore_data *data, unsigned int input, + int set_max) +{ + int ret; + u64 cap; + + mutex_lock(&uncore_lock); + + input /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (!input || input > 0x7F) { + ret = -EINVAL; + goto finish_write; + } + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + goto finish_write; + + if (set_max) { + cap &= ~0x7F; + cap |= input; + } else { + cap &= ~GENMASK(14, 8); + cap |= (input << 8); + } + + ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); + if (ret) + goto finish_write; + + data->stored_uncore_data = cap; + +finish_write: + mutex_unlock(&uncore_lock); + + return ret; +} + +static ssize_t store_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + const char *buf, ssize_t count, + int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int input; + + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + + uncore_write_ratio(data, input, min_max); + + return count; +} + +static ssize_t show_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + char *buf, int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int min, max; + int ret; + + mutex_lock(&uncore_lock); + ret = uncore_read_ratio(data, &min, &max); + mutex_unlock(&uncore_lock); + if (ret) + return ret; + + if (min_max) + return sprintf(buf, "%u\n", max); + + return sprintf(buf, "%u\n", min); +} + +#define store_uncore_min_max(name, min_max) \ + static ssize_t store_##name(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buf, ssize_t count) \ + { \ + \ + return store_min_max_freq_khz(kobj, attr, buf, count, \ + min_max); \ + } + +#define show_uncore_min_max(name, min_max) \ + static ssize_t show_##name(struct kobject *kobj, \ + struct attribute *attr, char *buf) \ + { \ + \ + return show_min_max_freq_khz(kobj, attr, buf, min_max); \ + } + +store_uncore_min_max(min_freq_khz, 0); +store_uncore_min_max(max_freq_khz, 1); + +show_uncore_min_max(min_freq_khz, 0); +show_uncore_min_max(max_freq_khz, 1); + +define_one_uncore_rw(min_freq_khz); +define_one_uncore_rw(max_freq_khz); + +static struct attribute *uncore_attrs[] = { + &initial_min_freq_khz.attr, + &initial_max_freq_khz.attr, + &max_freq_khz.attr, + &min_freq_khz.attr, + NULL +}; + +static struct kobj_type uncore_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = uncore_attrs, +}; + +static struct kobj_type uncore_root_ktype = { + .sysfs_ops = &kobj_sysfs_ops, +}; + +/* Caller provides protection */ +static struct uncore_data *uncore_get_instance(unsigned int cpu) +{ + int id = topology_logical_die_id(cpu); + + if (id >= 0 && id < uncore_max_entries) + return &uncore_instances[id]; + + return NULL; +} + +static void uncore_add_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (!data) { + mutex_unlock(&uncore_lock); + return; + } + + if (data->valid) { + /* control cpu changed */ + data->control_cpu = cpu; + } else { + char str[64]; + int ret; + + memset(data, 0, sizeof(*data)); + sprintf(str, "package_%02d_die_%02d", + topology_physical_package_id(cpu), + topology_die_id(cpu)); + + uncore_read_ratio(data, &data->initial_min_freq_khz, + &data->initial_max_freq_khz); + + ret = kobject_init_and_add(&data->kobj, &uncore_ktype, + &uncore_root_kobj, str); + if (!ret) { + data->control_cpu = cpu; + data->valid = true; + } + } + mutex_unlock(&uncore_lock); +} + +/* Last CPU in this die is offline, so remove sysfs entries */ +static void uncore_remove_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (data) { + kobject_put(&data->kobj); + data->control_cpu = -1; + data->valid = false; + } + mutex_unlock(&uncore_lock); +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int target; + + /* Check if there is an online cpu in the package for uncore MSR */ + target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + uncore_add_die_entry(cpu); + + return 0; +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + int target; + + /* Check if existing cpu is used for uncore MSRs */ + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + return 0; + + /* Find a new cpu to set uncore MSR */ + target = cpumask_any_but(topology_die_cpumask(cpu), cpu); + + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &uncore_cpu_mask); + uncore_add_die_entry(target); + } else { + uncore_remove_die_entry(cpu); + } + + return 0; +} + +static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode, + void *_unused) +{ + int cpu; + + switch (mode) { + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + for_each_cpu(cpu, &uncore_cpu_mask) { + struct uncore_data *data; + int ret; + + data = uncore_get_instance(cpu); + if (!data || !data->valid || !data->stored_uncore_data) + continue; + + ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT, + data->stored_uncore_data); + if (ret) + return ret; + } + break; + default: + break; + } + return 0; +} + +static struct notifier_block uncore_pm_nb = { + .notifier_call = uncore_pm_notify, +}; + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id intel_uncore_cpu_ids[] = { + ICPU(INTEL_FAM6_BROADWELL_G), + ICPU(INTEL_FAM6_BROADWELL_X), + ICPU(INTEL_FAM6_BROADWELL_D), + ICPU(INTEL_FAM6_SKYLAKE_X), + ICPU(INTEL_FAM6_ICELAKE_X), + ICPU(INTEL_FAM6_ICELAKE_D), + {} +}; + +static int __init intel_uncore_init(void) +{ + const struct x86_cpu_id *id; + int ret; + + id = x86_match_cpu(intel_uncore_cpu_ids); + if (!id) + return -ENODEV; + + uncore_max_entries = topology_max_packages() * + topology_max_die_per_package(); + uncore_instances = kcalloc(uncore_max_entries, + sizeof(*uncore_instances), GFP_KERNEL); + if (!uncore_instances) + return -ENOMEM; + + ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype, + &cpu_subsys.dev_root->kobj, + "intel_uncore_frequency"); + if (ret) + goto err_free; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/uncore-freq:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret < 0) + goto err_rem_kobj; + + uncore_hp_state = ret; + + ret = register_pm_notifier(&uncore_pm_nb); + if (ret) + goto err_rem_state; + + return 0; + +err_rem_state: + cpuhp_remove_state(uncore_hp_state); +err_rem_kobj: + kobject_put(&uncore_root_kobj); +err_free: + kfree(uncore_instances); + + return ret; +} +module_init(intel_uncore_init) + +static void __exit intel_uncore_exit(void) +{ + int i; + + unregister_pm_notifier(&uncore_pm_nb); + cpuhp_remove_state(uncore_hp_state); + for (i = 0; i < uncore_max_entries; ++i) { + if (uncore_instances[i].valid) + kobject_put(&uncore_instances[i].kobj); + } + kobject_put(&uncore_root_kobj); + kfree(uncore_instances); +} +module_exit(intel_uncore_exit) + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver"); diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c index b0f421fea2a5..805fc0d8515c 100644 --- a/drivers/platform/x86/intel_atomisp2_pm.c +++ b/drivers/platform/x86/intel_atomisp2_pm.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry - * Trail devices. The sole purpose of this driver is to allow the ISP to - * be put in D3. + * Dummy driver for Intel's Image Signal Processor found on Bay Trail + * and Cherry Trail devices. The sole purpose of this driver is to allow + * the ISP to be put in D3. * * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com> * @@ -36,8 +36,7 @@ static int isp_set_power(struct pci_dev *dev, bool enable) { unsigned long timeout; - u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : - ISPSSPM0_IUNIT_POWER_OFF; + u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF; /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, @@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable) /* * There should be no IUNIT access while power-down is - * in progress HW sighting: 4567865 + * in progress. HW sighting: 4567865. * Wait up to 50 ms for the IUNIT to shut down. * And we do the same for power on. */ timeout = jiffies + msecs_to_jiffies(50); - while (1) { + do { u32 tmp; /* Wait until ISPSSPM0 bit[25:24] shows the right value */ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp); tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET; if (tmp == val) - break; + return 0; - if (time_after(jiffies, timeout)) { - dev_err(&dev->dev, "IUNIT power-%s timeout.\n", - enable ? "on" : "off"); - return -EBUSY; - } usleep_range(1000, 2000); - } + } while (time_before(jiffies, timeout)); - return 0; + dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off"); + return -EBUSY; } static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel_cht_int33fe_typec.c index 2d097fc2dd46..04138215956b 100644 --- a/drivers/platform/x86/intel_cht_int33fe_typec.c +++ b/drivers/platform/x86/intel_cht_int33fe_typec.c @@ -36,30 +36,6 @@ enum { INT33FE_NODE_MAX, }; -static const struct software_node nodes[]; - -static const struct software_node_ref_args pi3usb30532_ref = { - &nodes[INT33FE_NODE_PI3USB30532] -}; - -static const struct software_node_ref_args dp_ref = { - &nodes[INT33FE_NODE_DISPLAYPORT] -}; - -static struct software_node_ref_args mux_ref; - -static const struct software_node_reference usb_connector_refs[] = { - { "orientation-switch", 1, &pi3usb30532_ref}, - { "mode-switch", 1, &pi3usb30532_ref}, - { "displayport", 1, &dp_ref}, - { } -}; - -static const struct software_node_reference fusb302_refs[] = { - { "usb-role-switch", 1, &mux_ref}, - { } -}; - /* * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates * the max17047 both through the INT33FE ACPI device (it is right there @@ -95,8 +71,18 @@ static const struct property_entry max17047_props[] = { { } }; +/* + * We are not using inline property here because those are constant, + * and we need to adjust this one at runtime to point to real + * software node. + */ +static struct software_node_ref_args fusb302_mux_refs[] = { + { .node = NULL }, +}; + static const struct property_entry fusb302_props[] = { PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"), + PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs), { } }; @@ -112,6 +98,8 @@ static const u32 snk_pdo[] = { PDO_VAR(5000, 12000, 3000), }; +static const struct software_node nodes[]; + static const struct property_entry usb_connector_props[] = { PROPERTY_ENTRY_STRING("data-role", "dual"), PROPERTY_ENTRY_STRING("power-role", "dual"), @@ -119,15 +107,21 @@ static const struct property_entry usb_connector_props[] = { PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo), PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo), PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000), + PROPERTY_ENTRY_REF("orientation-switch", + &nodes[INT33FE_NODE_PI3USB30532]), + PROPERTY_ENTRY_REF("mode-switch", + &nodes[INT33FE_NODE_PI3USB30532]), + PROPERTY_ENTRY_REF("displayport", + &nodes[INT33FE_NODE_DISPLAYPORT]), { } }; static const struct software_node nodes[] = { - { "fusb302", NULL, fusb302_props, fusb302_refs }, + { "fusb302", NULL, fusb302_props }, { "max17047", NULL, max17047_props }, { "pi3usb30532" }, { "displayport" }, - { "connector", &nodes[0], usb_connector_props, usb_connector_refs }, + { "connector", &nodes[0], usb_connector_props }, { } }; @@ -163,9 +157,10 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) { software_node_unregister_nodes(nodes); - if (mux_ref.node) { - fwnode_handle_put(software_node_fwnode(mux_ref.node)); - mux_ref.node = NULL; + if (fusb302_mux_refs[0].node) { + fwnode_handle_put( + software_node_fwnode(fusb302_mux_refs[0].node)); + fusb302_mux_refs[0].node = NULL; } if (data->dp) { @@ -177,25 +172,31 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) { + const struct software_node *mux_ref_node; int ret; - ret = software_node_register_nodes(nodes); - if (ret) - return ret; - - /* The devices that are not created in this driver need extra steps. */ - /* * There is no ACPI device node for the USB role mux, so we need to wait * until the mux driver has created software node for the mux device. * It means we depend on the mux driver. This function will return * -EPROBE_DEFER until the mux device is registered. */ - mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); - if (!mux_ref.node) { - ret = -EPROBE_DEFER; - goto err_remove_nodes; - } + mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); + if (!mux_ref_node) + return -EPROBE_DEFER; + + /* + * Update node used in "usb-role-switch" property. Note that we + * rely on software_node_register_nodes() to use the original + * instance of properties instead of copying them. + */ + fusb302_mux_refs[0].node = mux_ref_node; + + ret = software_node_register_nodes(nodes); + if (ret) + return ret; + + /* The devices that are not created in this driver need extra steps. */ /* * The DP connector does have ACPI device node. In this case we can just diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 292bace83f1e..6f436836fe50 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - ddata = (struct mid_pb_ddata *)id->driver_data; + ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data, + sizeof(*ddata), GFP_KERNEL); if (!ddata) - return -ENODATA; + return -ENOMEM; ddata->dev = &pdev->dev; ddata->irq = irq; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 571b4754477c..144faa8bad3d 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = { {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3}, - {}, + {} }; static const struct pmc_bit_map spt_mphy_map[] = { @@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = { {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13}, {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14}, {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15}, - {}, + {} }; static const struct pmc_bit_map spt_pfear_map[] = { @@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = { {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1}, {"CSME_RTC", SPT_PMC_BIT_CSME_RTC}, {"CSME_PSF", SPT_PMC_BIT_CSME_PSF}, - {}, + {} +}; + +static const struct pmc_bit_map *ext_spt_pfear_map[] = { + spt_pfear_map, + NULL }; static const struct pmc_bit_map spt_ltr_show_map[] = { @@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = { }; static const struct pmc_reg_map spt_reg_map = { - .pfear_sts = spt_pfear_map, + .pfear_sts = ext_spt_pfear_map, .mphy_sts = spt_mphy_map, .pll_sts = spt_pll_map, .ltr_show_sts = spt_ltr_show_map, @@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"SDX", BIT(4)}, {"SPE", BIT(5)}, {"Fuse", BIT(6)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and Elkhart Lake. + */ {"SBR8", BIT(7)}, {"CSME_FSC", BIT(0)}, @@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"HDA_PGD4", BIT(2)}, {"HDA_PGD5", BIT(3)}, {"HDA_PGD6", BIT(4)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and ELkhart Lake. + */ {"PSF6", BIT(5)}, {"PSF7", BIT(6)}, {"PSF8", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_cnp_pfear_map[] = { + cnp_pfear_map, + NULL +}; +static const struct pmc_bit_map icl_pfear_map[] = { /* Ice Lake generation onwards only */ {"RES_65", BIT(0)}, {"RES_66", BIT(1)}, @@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {} }; +static const struct pmc_bit_map *ext_icl_pfear_map[] = { + cnp_pfear_map, + icl_pfear_map, + NULL +}; + +static const struct pmc_bit_map tgl_pfear_map[] = { + /* Tiger Lake and Elkhart Lake generation onwards only */ + {"PSF9", BIT(0)}, + {"RES_66", BIT(1)}, + {"RES_67", BIT(2)}, + {"RES_68", BIT(3)}, + {"RES_69", BIT(4)}, + {"RES_70", BIT(5)}, + {"TBTLSX", BIT(6)}, + {} +}; + +static const struct pmc_bit_map *ext_tgl_pfear_map[] = { + cnp_pfear_map, + tgl_pfear_map, + NULL +}; + static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { {"AUDIO_D3", BIT(0)}, {"OTG_D3", BIT(1)}, @@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = { cnp_slps0_dbg0_map, cnp_slps0_dbg1_map, cnp_slps0_dbg2_map, - NULL, + NULL }; static const struct pmc_bit_map cnp_ltr_show_map[] = { @@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { }; static const struct pmc_reg_map cnp_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_cnp_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = { }; static const struct pmc_reg_map icl_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_icl_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = { .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, }; -static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) -{ - return readb(pmcdev->regbase + offset); -} +static const struct pmc_reg_map tgl_reg_map = { + .pfear_sts = ext_tgl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slps0_dbg_maps = cnp_slps0_dbg_maps, + .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, + .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = CNP_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, +}; static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) { return readl(pmcdev->regbase + reg_offset); } -static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int - reg_offset, u32 val) +static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset, + u32 val) { writel(val, pmcdev->regbase + reg_offset); } @@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void) #if IS_ENABLED(CONFIG_DEBUG_FS) static bool slps0_dbg_latch; -static void pmc_core_display_map(struct seq_file *s, int index, - u8 pf_reg, const struct pmc_bit_map *pf_map) +static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) +{ + return readb(pmcdev->regbase + offset); +} + +static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, + u8 pf_reg, const struct pmc_bit_map **pf_map) { seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", - index, pf_map[index].name, - pf_map[index].bit_mask & pf_reg ? "Off" : "On"); + ip, pf_map[idx][index].name, + pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); } static int pmc_core_ppfear_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->pfear_sts; + const struct pmc_bit_map **maps = pmcdev->map->pfear_sts; u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; - int index, iter; + int index, iter, idx, ip = 0; iter = pmcdev->map->ppfear0_offset; @@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); - for (index = 0; map[index].name && - index < pmcdev->map->ppfear_buckets * 8; index++) - pmc_core_display_map(s, index, pf_regs[index / 8], map); + for (idx = 0; maps[idx]; idx++) { + for (index = 0; maps[idx][index].name && + index < pmcdev->map->ppfear_buckets * 8; ip++, index++) + pmc_core_display_map(s, index, idx, ip, + pf_regs[index / 8], maps); + } return 0; } @@ -561,21 +623,22 @@ out_unlock: } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user -*userbuf, size_t count, loff_t *ppos) +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) { struct pmc_dev *pmcdev = &pmc; const struct pmc_reg_map *map = pmcdev->map; u32 val, buf_size, fd; - int err = 0; + int err; buf_size = count < 64 ? count : 64; - mutex_lock(&pmcdev->lock); - if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) { - err = -EFAULT; - goto out_unlock; - } + err = kstrtou32_from_user(userbuf, buf_size, 10, &val); + if (err) + return err; + + mutex_lock(&pmcdev->lock); if (val > map->ltr_ignore_max) { err = -EINVAL; @@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev, &pmc_core_dev_state); - debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev, - &pmc_core_ppfear_fops); + if (pmcdev->map->pfear_sts) + debugfs_create_file("pch_ip_power_gating_status", 0444, dir, + pmcdev, &pmc_core_ppfear_fops); debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, &pmc_core_ltr_ignore_ops); @@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map), INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map), + INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map), + INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map), + INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); static const struct pci_device_id pmc_pci_ids[] = { - { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0}, - { 0, }, + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) }, + { } }; /* * This quirk can be used on those platforms where - * the platform BIOS enforces 24Mhx Crystal to shutdown + * the platform BIOS enforces 24Mhz crystal to shutdown * before PMC can assert SLP_S0#. */ static int quirk_xtal_ignore(const struct dmi_system_id *id) diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index 8203ae38dc46..f1a0792b3f91 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -186,6 +186,8 @@ enum ppfear_regs { #define ICL_NUM_IP_IGN_ALLOWED 20 #define ICL_PMC_LTR_WIGIG 0x1BFC +#define TGL_NUM_IP_IGN_ALLOWED 22 + struct pmc_bit_map { const char *name; u32 bit_mask; @@ -213,7 +215,7 @@ struct pmc_bit_map { * captures them to have a common implementation. */ struct pmc_reg_map { - const struct pmc_bit_map *pfear_sts; + const struct pmc_bit_map **pfear_sts; const struct pmc_bit_map *mphy_sts; const struct pmc_bit_map *pll_sts; const struct pmc_bit_map **slps0_dbg_maps; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index 5c1da2bb1435..2433bf73f1ed 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -12,23 +12,13 @@ */ #include <linux/acpi.h> -#include <linux/atomic.h> -#include <linux/bitops.h> #include <linux/delay.h> -#include <linux/device.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/notifier.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/pm.h> -#include <linux/pm_qos.h> -#include <linux/sched.h> -#include <linux/spinlock.h> -#include <linux/suspend.h> #include <asm/intel_pmc_ipc.h> @@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset); } -static inline u8 __maybe_unused ipc_data_readb(u32 offset) -{ - return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset); -} - static inline u32 ipc_data_readl(u32 offset) { return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); @@ -211,35 +196,6 @@ static inline int is_gcr_valid(u32 offset) } /** - * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: data pointer for storing the register output - * - * Reads the 32-bit PMC GCR register at given offset. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_read(u32 offset, u32 *data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - *data = readl(ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_read); - -/** * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register * @offset: offset of GCR register from GCR address base * @data: data pointer for storing the register output @@ -269,36 +225,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data) EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64); /** - * intel_pmc_gcr_write() - Write PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: register update value - * - * Writes the PMC GCR register of given offset with given - * value. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_write(u32 offset, u32 data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - writel(data, ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); - -/** * intel_pmc_gcr_update() - Update PMC GCR register bits * @offset: offset of GCR register from GCR address base * @mask: bit mask for update operation @@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); * * Return: negative value on error or 0 on success. */ -int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) +static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) { u32 new_val; int ret = 0; @@ -339,7 +265,6 @@ gcr_ipc_unlock: spin_unlock(&ipcdev.gcr_lock); return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); static int update_no_reboot_bit(void *priv, bool set) { @@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void) * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_simple_command(int cmd, int sub) +static int intel_pmc_ipc_simple_command(int cmd, int sub) { int ret; @@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub) return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); /** * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers @@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, - u32 outlen, u32 dptr, u32 sptr) +static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, + u32 outlen, u32 dptr, u32 sptr) { u32 wbuf[4] = { 0 }; int ret; @@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); /** * intel_pmc_ipc_command() - IPC command with input/output data @@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, } return (ssize_t)count; } +static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store); static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, struct device_attribute *attr, @@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, int subcmd; int ret; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; if (val) subcmd = 1; @@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, } return (ssize_t)count; } - -static DEVICE_ATTR(simplecmd, S_IWUSR, - NULL, intel_pmc_ipc_simple_cmd_store); -static DEVICE_ATTR(northpeak, S_IWUSR, - NULL, intel_pmc_ipc_northpeak_store); +static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store); static struct attribute *intel_ipc_attrs[] = { &dev_attr_northpeak.attr, @@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = { .attrs = intel_ipc_attrs, }; +static const struct attribute_group *intel_ipc_groups[] = { + &intel_ipc_group, + NULL +}; + static struct resource punit_res_array[] = { /* Punit BIOS */ { @@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev) goto err_irq; } - ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group); - if (ret) { - dev_err(&pdev->dev, "Failed to create sysfs group %d\n", - ret); - goto err_sys; - } - ipcdev.has_gcr_regs = true; return 0; -err_sys: - devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); + err_irq: platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -980,7 +898,6 @@ err_irq: static int ipc_plat_remove(struct platform_device *pdev) { - sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = { .driver = { .name = "pmc-ipc-plat", .acpi_match_table = ACPI_PTR(ipc_acpi_ids), + .dev_groups = intel_ipc_groups, }, }; diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index cdab916fbf92..3d7da5266136 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -26,11 +26,7 @@ #include <asm/intel_scu_ipc.h> /* IPC defines the following message types */ -#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */ -#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */ -#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */ -#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */ -#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */ +#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */ /* Command id associated with message IPCMSG_PCNTRL */ #define IPC_CMD_PCNTRL_W 0 /* Register write */ @@ -58,56 +54,29 @@ #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ #define IPC_IOC 0x100 /* IPC command register IOC bit */ -#define PCI_DEVICE_ID_LINCROFT 0x082a -#define PCI_DEVICE_ID_PENWELL 0x080e -#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea -#define PCI_DEVICE_ID_TANGIER 0x11a0 - -/* intel scu ipc driver data */ -struct intel_scu_ipc_pdata_t { - u32 i2c_base; - u32 i2c_len; - u8 irq_mode; -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - -/* Penwell and Cloverview */ -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 1, -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { - .i2c_base = 0xff00d000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - struct intel_scu_ipc_dev { struct device *dev; void __iomem *ipc_base; - void __iomem *i2c_base; struct completion cmd_complete; u8 irq_mode; }; static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ +#define IPC_STATUS 0x04 +#define IPC_STATUS_IRQ BIT(2) +#define IPC_STATUS_ERR BIT(1) +#define IPC_STATUS_BUSY BIT(0) + /* - * IPC Read Buffer (Read Only): - * 16 byte buffer for receiving data from SCU, if IPC command - * processing results in response data + * IPC Write/Read Buffers: + * 16 byte buffer for sending and receiving data to and from SCU. */ +#define IPC_WRITE_BUFFER 0x80 #define IPC_READ_BUFFER 0x90 -#define IPC_I2C_CNTRL_ADDR 0 -#define I2C_DATA_ADDR 0x04 +/* Timeout in jiffies */ +#define IPC_TIMEOUT (3 * HZ) static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ @@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ */ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) { - if (scu->irq_mode) { - reinit_completion(&scu->cmd_complete); - writel(cmd | IPC_IOC, scu->ipc_base); - } - writel(cmd, scu->ipc_base); + reinit_completion(&scu->cmd_complete); + writel(cmd | IPC_IOC, scu->ipc_base); } /* @@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) */ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset) { - writel(data, scu->ipc_base + 0x80 + offset); + writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset); } /* @@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 */ static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu) { - return __raw_readl(scu->ipc_base + 0x04); + return __raw_readl(scu->ipc_base + IPC_STATUS); } /* Read ipc byte data */ @@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset) /* Wait till scu status is busy */ static inline int busy_loop(struct intel_scu_ipc_dev *scu) { - u32 status = ipc_read_status(scu); - u32 loop_count = 100000; + unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT); - /* break if scu doesn't reset busy bit after huge retry */ - while ((status & BIT(0)) && --loop_count) { - udelay(1); /* scu processing time is in few u secods */ - status = ipc_read_status(scu); - } + do { + u32 status; - if (status & BIT(0)) { - dev_err(scu->dev, "IPC timed out"); - return -ETIMEDOUT; - } + status = ipc_read_status(scu); + if (!(status & IPC_STATUS_BUSY)) + return (status & IPC_STATUS_ERR) ? -EIO : 0; - if (status & BIT(1)) - return -EIO; + usleep_range(50, 100); + } while (time_before(jiffies, end)); - return 0; + dev_err(scu->dev, "IPC timed out"); + return -ETIMEDOUT; } /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ @@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu) { int status; - if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) { + if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) { dev_err(scu->dev, "IPC timed out\n"); return -ETIMEDOUT; } status = ipc_read_status(scu); - if (status & BIT(1)) + if (status & IPC_STATUS_ERR) return -EIO; return 0; @@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) } /** - * intel_scu_ipc_ioread8 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read byte + * intel_scu_ipc_ioread8 - read a word via the SCU + * @addr: Register on SCU + * @data: Return pointer for read byte * - * Read a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Read a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_ioread8(u16 addr, u8 *data) { @@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data) EXPORT_SYMBOL(intel_scu_ipc_ioread8); /** - * intel_scu_ipc_ioread16 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read word - * - * Read a register pair. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread16(u16 addr, u16 *data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread16); - -/** - * intel_scu_ipc_ioread32 - read a dword via the SCU - * @addr: register on SCU - * @data: return pointer for read dword - * - * Read four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread32(u16 addr, u32 *data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread32); - -/** - * intel_scu_ipc_iowrite8 - write a byte via the SCU - * @addr: register on SCU - * @data: byte to write + * intel_scu_ipc_iowrite8 - write a byte via the SCU + * @addr: Register on SCU + * @data: Byte to write * - * Write a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Write a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_iowrite8(u16 addr, u8 data) { @@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data) EXPORT_SYMBOL(intel_scu_ipc_iowrite8); /** - * intel_scu_ipc_iowrite16 - write a word via the SCU - * @addr: register on SCU - * @data: word to write - * - * Write two registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_readvv - read a set of registers + * @addr: Register list + * @data: Bytes to return + * @len: Length of array * - * This function may sleep. - */ -int intel_scu_ipc_iowrite16(u16 addr, u16 data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite16); - -/** - * intel_scu_ipc_iowrite32 - write a dword via the SCU - * @addr: register on SCU - * @data: dword to write + * Read registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * Write four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * The largest array length permitted by the hardware is 5 items. * - * This function may sleep. - */ -int intel_scu_ipc_iowrite32(u16 addr, u32 data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite32); - -/** - * intel_scu_ipc_readvv - read a set of registers - * @addr: register list - * @data: bytes to return - * @len: length of array - * - * Read registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * The largest array length permitted by the hardware is 5 items. - * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) { @@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_readv); /** - * intel_scu_ipc_writev - write a set of registers - * @addr: register list - * @data: bytes to write - * @len: length of array - * - * Write registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_writev - write a set of registers + * @addr: Register list + * @data: Bytes to write + * @len: Length of array * - * The largest array length permitted by the hardware is 5 items. + * Write registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * This function may sleep. + * The largest array length permitted by the hardware is 5 items. * + * This function may sleep. */ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) { @@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_writev); /** - * intel_scu_ipc_update_register - r/m/w a register - * @addr: register address - * @bits: bits to update - * @mask: mask of bits to update - * - * Read-modify-write power control unit register. The first data argument - * must be register value and second is mask value - * mask is a bitmap that indicates which bits to update. - * 0 = masked. Don't modify this bit, 1 = modify this bit. - * returns 0 on success or an error code. - * - * This function may sleep. Locking between SCU accesses is handled - * for the caller. + * intel_scu_ipc_update_register - r/m/w a register + * @addr: Register address + * @bits: Bits to update + * @mask: Mask of bits to update + * + * Read-modify-write power control unit register. The first data argument + * must be register value and second is mask value mask is a bitmap that + * indicates which bits to update. %0 = masked. Don't modify this bit, %1 = + * modify this bit. returns %0 on success or an error code. + * + * This function may sleep. Locking between SCU accesses is handled + * for the caller. */ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) { @@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) EXPORT_SYMBOL(intel_scu_ipc_update_register); /** - * intel_scu_ipc_simple_command - send a simple command - * @cmd: command - * @sub: sub type + * intel_scu_ipc_simple_command - send a simple command + * @cmd: Command + * @sub: Sub type * - * Issue a simple command to the SCU. Do not use this interface if - * you must then access data as any data values may be overwritten - * by another SCU access by the time this function returns. + * Issue a simple command to the SCU. Do not use this interface if you must + * then access data as any data values may be overwritten by another SCU + * access by the time this function returns. * - * This function may sleep. Locking for SCU accesses is handled for - * the caller. + * This function may sleep. Locking for SCU accesses is handled for the + * caller. */ int intel_scu_ipc_simple_command(int cmd, int sub) { @@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub) EXPORT_SYMBOL(intel_scu_ipc_simple_command); /** - * intel_scu_ipc_command - command with data - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in dwords - * @out: output data - * @outlein: output length in dwords - * - * Issue a command to the SCU which involves data transfers. Do the - * data copies under the lock but leave it for the caller to interpret + * intel_scu_ipc_command - command with data + * @cmd: Command + * @sub: Sub type + * @in: Input data + * @inlen: Input length in dwords + * @out: Output data + * @outlen: Output length in dwords + * + * Issue a command to the SCU which involves data transfers. Do the + * data copies under the lock but leave it for the caller to interpret. */ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, u32 *out, int outlen) @@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, } EXPORT_SYMBOL(intel_scu_ipc_command); -#define IPC_SPTR 0x08 -#define IPC_DPTR 0x0C - -/** - * intel_scu_ipc_raw_command() - IPC command with data and pointers - * @cmd: IPC command code. - * @sub: IPC command sub type. - * @in: input data of this IPC command. - * @inlen: input data length in dwords. - * @out: output data of this IPC command. - * @outlen: output data length in dwords. - * @sptr: data writing to SPTR register. - * @dptr: data writing to DPTR register. - * - * Send an IPC command to SCU with input/output data and source/dest pointers. - * - * Return: an IPC error code or 0 on success. - */ -int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen, - u32 *out, int outlen, u32 dptr, u32 sptr) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - int inbuflen = DIV_ROUND_UP(inlen, 4); - u32 inbuf[4]; - int i, err; - - /* Up to 16 bytes */ - if (inbuflen > 4) - return -EINVAL; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - - writel(dptr, scu->ipc_base + IPC_DPTR); - writel(sptr, scu->ipc_base + IPC_SPTR); - - /* - * SRAM controller doesn't support 8-bit writes, it only - * supports 32-bit writes, so we have to copy input data into - * the temporary buffer, and SCU FW will use the inlen to - * determine the actual input data length in the temporary - * buffer. - */ - memcpy(inbuf, in, inlen); - - for (i = 0; i < inbuflen; i++) - ipc_data_writel(scu, inbuf[i], 4 * i); - - ipc_command(scu, (inlen << 16) | (sub << 12) | cmd); - err = intel_scu_ipc_check_status(scu); - if (!err) { - for (i = 0; i < outlen; i++) - *out++ = ipc_data_readl(scu, 4 * i); - } - - mutex_unlock(&ipclock); - return err; -} -EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command); - -/* I2C commands */ -#define IPC_I2C_WRITE 1 /* I2C Write command */ -#define IPC_I2C_READ 2 /* I2C Read command */ - -/** - * intel_scu_ipc_i2c_cntrl - I2C read/write operations - * @addr: I2C address + command bits - * @data: data to read/write - * - * Perform an an I2C read/write operation via the SCU. All locking is - * handled for the caller. This function may sleep. - * - * Returns an error code or 0 on success. - * - * This has to be in the IPC driver for the locking. - */ -int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - u32 cmd = 0; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - cmd = (addr >> 24) & 0xFF; - if (cmd == IPC_I2C_READ) { - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - /* Write not getting updated without delay */ - usleep_range(1000, 2000); - *data = readl(scu->i2c_base + I2C_DATA_ADDR); - } else if (cmd == IPC_I2C_WRITE) { - writel(*data, scu->i2c_base + I2C_DATA_ADDR); - usleep_range(1000, 2000); - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - } else { - dev_err(scu->dev, - "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd); - - mutex_unlock(&ipclock); - return -EIO; - } - mutex_unlock(&ipclock); - return 0; -} -EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); - /* * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1 * When ioc bit is set to 1, caller api must wait for interrupt handler called @@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); static irqreturn_t ioc(int irq, void *dev_id) { struct intel_scu_ipc_dev *scu = dev_id; + int status = ipc_read_status(scu); - if (scu->irq_mode) - complete(&scu->cmd_complete); + writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS); + complete(&scu->cmd_complete); return IRQ_HANDLED; } @@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; struct intel_scu_ipc_dev *scu = &ipcdev; - struct intel_scu_ipc_pdata_t *pdata; if (scu->dev) /* We support only one SCU */ return -EBUSY; - pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data; - if (!pdata) - return -ENODEV; - - scu->irq_mode = pdata->irq_mode; - err = pcim_enable_device(pdev); if (err) return err; @@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) scu->ipc_base = pcim_iomap_table(pdev)[0]; - scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len); - if (!scu->i2c_base) - return -ENOMEM; - err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc", scu); if (err) @@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; } -#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata} - static const struct pci_device_id pci_ids[] = { - SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata), - SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata), + { PCI_VDEVICE(INTEL, 0x080e) }, + { PCI_VDEVICE(INTEL, 0x08ea) }, + { PCI_VDEVICE(INTEL, 0x11a0) }, {} }; diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c index 3de5a3c66529..0c2aa22c7a12 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { {0x7F, 0x00, 0x0B}, {0x7F, 0x10, 0x12}, {0x7F, 0x20, 0x23}, + {0x94, 0x03, 0x03}, + {0x95, 0x03, 0x03}, }; static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { @@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { {0xD0, 0x03, 0x08}, {0x7F, 0x02, 0x00}, {0x7F, 0x08, 0x00}, + {0x95, 0x03, 0x03}, }; struct isst_cmd { diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index e84d3e983e0c..8e3fb55ac1ae 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity); if (err) { pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; @@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity); if (err) { pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index df8565bad595..c4c742bb23cf 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = { static int telemetry_pltdrv_probe(struct platform_device *pdev) { - struct resource *res0 = NULL, *res1 = NULL; const struct x86_cpu_id *id; - int size, ret = -ENOMEM; + void __iomem *mem; + int ret; id = x86_match_cpu(telemetry_cpu_ids); if (!id) @@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) telm_conf = (struct telemetry_plt_config *)id->driver_data; - res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res0) { - ret = -EINVAL; - goto out; - } - size = resource_size(res0); - if (!devm_request_mem_region(&pdev->dev, res0->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } - telm_conf->pss_config.ssram_base_addr = res0->start; - telm_conf->pss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); - res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res1) { - ret = -EINVAL; - goto out; - } - size = resource_size(res1); - if (!devm_request_mem_region(&pdev->dev, res1->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } + telm_conf->pss_config.regmap = mem; - telm_conf->ioss_config.ssram_base_addr = res1->start; - telm_conf->ioss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mem)) + return PTR_ERR(mem); - telm_conf->pss_config.regmap = ioremap_nocache( - telm_conf->pss_config.ssram_base_addr, - telm_conf->pss_config.ssram_size); - if (!telm_conf->pss_config.regmap) { - ret = -ENOMEM; - goto out; - } - - telm_conf->ioss_config.regmap = ioremap_nocache( - telm_conf->ioss_config.ssram_base_addr, - telm_conf->ioss_config.ssram_size); - if (!telm_conf->ioss_config.regmap) { - ret = -ENOMEM; - goto out; - } + telm_conf->ioss_config.regmap = mem; mutex_init(&telm_conf->telem_lock); mutex_init(&telm_conf->telem_trace_lock); @@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) return 0; out: - if (res0) - release_mem_region(res0->start, resource_size(res0)); - if (res1) - release_mem_region(res1->start, resource_size(res1)); - if (telm_conf->pss_config.regmap) - iounmap(telm_conf->pss_config.regmap); - if (telm_conf->ioss_config.regmap) - iounmap(telm_conf->ioss_config.regmap); dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n"); return ret; @@ -1204,9 +1163,6 @@ out: static int telemetry_pltdrv_remove(struct platform_device *pdev) { telemetry_clear_pltdata(); - iounmap(telm_conf->pss_config.regmap); - iounmap(telm_conf->ioss_config.regmap); - return 0; } diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8fe51e43f1bc..c27548fd386a 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -35,6 +35,8 @@ #define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23 #define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24 #define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a +#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b +#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e #define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30 #define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31 #define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32 @@ -46,6 +48,8 @@ #define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45 #define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50 #define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51 #define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52 @@ -68,6 +72,7 @@ #define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1 #define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2 #define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3 +#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5 @@ -85,9 +90,13 @@ #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7 #define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8 +#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9 +#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb +#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda +#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc #define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL #define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ @@ -96,6 +105,9 @@ #define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \ MLXPLAT_CPLD_LPC_PIO_OFFSET) +#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ + MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \ + MLXPLAT_CPLD_LPC_PIO_OFFSET) /* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */ #define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04 @@ -112,17 +124,29 @@ #define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6) #define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0) +#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0) #define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4) #define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4) +#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04 +#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT) + +/* Masks for aggregation for comex carriers */ +#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1) +#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \ + MLXPLAT_CPLD_AGGR_MASK_CARRIER) +#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1 /* Default I2C parent bus number */ #define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1 /* Maximum number of possible physical buses equipped on system */ #define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16 +#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24 /* Number of channels in group */ #define MLXPLAT_CPLD_GRP_CHNL_NUM 8 @@ -130,14 +154,16 @@ /* Start channel numbers */ #define MLXPLAT_CPLD_CH1 2 #define MLXPLAT_CPLD_CH2 10 +#define MLXPLAT_CPLD_CH3 18 /* Number of LPC attached MUX platform devices */ -#define MLXPLAT_CPLD_LPC_MUX_DEVS 2 +#define MLXPLAT_CPLD_LPC_MUX_DEVS 3 /* Hotplug devices adapter numbers */ #define MLXPLAT_CPLD_NR_NONE -1 #define MLXPLAT_CPLD_PSU_DEFAULT_NR 10 #define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4 +#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3 #define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11 #define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13 @@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = { IORESOURCE_IO), }; +/* Platform i2c next generation systems data */ +static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = { + { + .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .mask = MLXPLAT_CPLD_I2C_CAP_MASK, + .bit = MLXPLAT_CPLD_I2C_CAP_BIT, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = { + { + .data = mlxplat_mlxcpld_i2c_ng_items_data, + }, +}; + /* Platform next generation systems i2c data */ static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = { + .items = mlxplat_mlxcpld_i2c_ng_items, .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX, .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET, @@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = { static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 }; /* Platform mux data */ -static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { +static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = { { .parent = 1, .base_nr = MLXPLAT_CPLD_CH1, @@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { }; +/* Platform mux configuration variables */ +static int mlxplat_max_adap_num; +static int mlxplat_mux_num; +static struct i2c_mux_reg_platform_data *mlxplat_mux_data; + +/* Platform extended mux data */ +static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = { + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH1, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH2, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH3, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2, + .reg_size = 1, + .idle_in_use = 1, + }, + +}; + /* Platform hotplug devices */ static struct i2c_board_info mlxplat_mlxcpld_psu[] = { { @@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = { }, }; +/* Platform hotplug comex carrier system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + /* Platform hotplug default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = { { @@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { }, }; +static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = { + { + .data = mlxplat_mlxcpld_comex_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_psu), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_fan), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .items = mlxplat_mlxcpld_default_items, @@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = { + .items = mlxplat_mlxcpld_comex_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK, +}; + static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = { { .label = "pwr1", @@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +/* Platform hotplug extended system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu3", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(2), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu4", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(3), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = { + { + .label = "pwr1", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr2", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr3", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(2), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, + { + .label = "pwr4", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(3), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = { + { + .data = mlxplat_mlxcpld_ext_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_ext_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_ng_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_NG_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = { + .items = mlxplat_mlxcpld_ext_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, +}; + /* Platform led default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = { { @@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = { .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data), }; +/* Platform led for Comex based 100GbE systems */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = { + { + .label = "status:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "status:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK + }, + { + .label = "psu:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "psu:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan1:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan1:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan2:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan2:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan3:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan3:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan4:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan4:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "uid:blue", + .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, +}; + +static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = { + .data = mlxplat_mlxcpld_comex_100G_led_data, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data), +}; + /* Platform register access default */ static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = { { @@ -1157,6 +1482,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = { .mode = 0200, }, { + .label = "select_iio", + .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0644, + }, + { .label = "asic_health", .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, .mask = MLXPLAT_CPLD_ASIC_MASK, @@ -1245,6 +1576,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_platform", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0444, + }, + { + .label = "reset_soc", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(5), + .mode = 0444, + }, + { .label = "reset_comex_wd", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(6), @@ -1263,6 +1606,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_sw_pwr_off", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0444, + }, + { .label = "reset_comex_thermal", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, .mask = GENMASK(7, 0) & ~BIT(3), @@ -1275,6 +1624,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_ac_pwr_fail", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0444, + }, + { .label = "psu1_on", .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(0), @@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .bit = GENMASK(7, 0), .mode = 0444, }, + { + .label = "voltreg_update_status", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET, + .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK, + .bit = 5, + .mode = 0444, + }, + { + .label = "vpd_wp", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(3), + .mode = 0644, + }, + { + .label = "pcie_asic_reset_dis", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0644, + }, + { + .label = "config1", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "config2", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "ufm_version", + .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, }; static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = { @@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET: @@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET: @@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = { { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 }, }; +static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = { + { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET, + MLXPLAT_CPLD_LOW_AGGRCX_MASK }, + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, +}; + +static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = { + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 }, +}; + struct mlxplat_mlxcpld_regmap_context { void __iomem *base; }; @@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = { .reg_write = mlxplat_mlxcpld_reg_write, }; +static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_comex_default, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + +static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_ng400, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + static struct resource mlxplat_mlxcpld_resources[] = { [0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"), }; @@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_default_channels[i]; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_default_channels[i]); @@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng; return 1; -}; +} + +static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data); + mlxplat_mux_data = mlxplat_extended_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_comex_data; + mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_led = &mlxplat_comex_100G_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex; + + return 1; +} + +static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_ext_data; + mlxplat_hotplug->deferred_nr = + mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; + mlxplat_led = &mlxplat_default_ng_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400; + + return 1; +} static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { @@ -1954,6 +2470,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { }, }, { + .callback = mlxplat_dmi_comex_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"), + }, + }, + { + .callback = mlxplat_dmi_ng400_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"), + }, + }, + { .callback = mlxplat_dmi_msn274x_matched, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), @@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) /* Scan adapters from expected id to verify it is free. */ *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i < - MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) { + mlxplat_max_adap_num; i++) { search_adap = i2c_get_adapter(i); if (search_adap) { i2c_put_adapter(search_adap); @@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) } /* Return with error if free id for adapter is not found. */ - if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) + if (i == mlxplat_max_adap_num) return -ENODEV; /* Shift adapter ids, since expected parent adapter is not free. */ *nr = i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { shift = *nr - mlxplat_mux_data[i].parent; mlxplat_mux_data[i].parent = *nr; mlxplat_mux_data[i].base_nr += shift; @@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void) if (nr < 0) goto fail_alloc; - nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr; + nr = (nr == mlxplat_max_adap_num) ? -1 : nr; if (mlxplat_i2c) mlxplat_i2c->regmap = priv->regmap; priv->pdev_i2c = platform_device_register_resndata( @@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void) goto fail_alloc; } - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { priv->pdev_mux[i] = platform_device_register_resndata( &priv->pdev_i2c->dev, "i2c-mux-reg", i, NULL, @@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void) platform_device_unregister(priv->pdev_led); platform_device_unregister(priv->pdev_hotplug); - for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--) + for (i = mlxplat_mux_num - 1; i >= 0 ; i--) platform_device_unregister(priv->pdev_mux[i]); platform_device_unregister(priv->pdev_i2c); diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 52ef1419b671..3e3c66dfec2e 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -489,7 +489,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent) pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr); pmc->base_addr &= PMC_BASE_ADDR_MASK; - pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN); + pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN); if (!pmc->regmap) { dev_err(&pdev->dev, "error: ioremap failed\n"); return -ENOMEM; diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 9b6a93ff41ff..23e40aa2176e 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1394,7 +1394,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung) int ret = 0; int i; - samsung->f0000_segment = ioremap_nocache(0xf0000, 0xffff); + samsung->f0000_segment = ioremap(0xf0000, 0xffff); if (!samsung->f0000_segment) { if (debug || force) pr_err("Can't map the segment at 0xf0000\n"); @@ -1434,7 +1434,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung) if (debug) samsung_sabi_infos(samsung, loca, ifaceP); - samsung->sabi_iface = ioremap_nocache(ifaceP, 16); + samsung->sabi_iface = ioremap(ifaceP, 16); if (!samsung->sabi_iface) { pr_err("Can't remap %x\n", ifaceP); ret = -EINVAL; diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 72205771d03d..93177e6e5ecd 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-digma_citi_e200.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 640), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-gp-electronic-t701.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"), { } }; @@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1698), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v80-plus-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1665), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-onda-v820w-32g.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 1676), PROPERTY_ENTRY_U32("touchscreen-size-y", 1130), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-onda-v891w-v1.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1625), PROPERTY_ENTRY_U32("touchscreen-size-y", 1135), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v891w-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 880), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-pipo-w2s.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"), { } }; @@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = { .properties = pipo_w2s_props, }; +static const struct property_entry pipo_w11_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 15), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1532), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data pipo_w11_data = { + .acpi_name = "MSSL1680:00", + .properties = pipo_w11_props, +}; + static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 32), PROPERTY_ENTRY_U32("touchscreen-min-y", 16), PROPERTY_ENTRY_U32("touchscreen-size-x", 1692), PROPERTY_ENTRY_U32("touchscreen-size-y", 1146), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-pov-mobii-wintab-p800w-v20.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1794), PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p800w.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), PROPERTY_ENTRY_U32("touchscreen-size-y", 1520), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-schneider-sct101ctm.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-teclast_x98plus2.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1970), PROPERTY_ENTRY_U32("touchscreen-size-y", 1530), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c11.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = { static const struct property_entry trekstor_primebook_c13_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2624), PROPERTY_ENTRY_U32("touchscreen-size-y", 1920), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c13.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = { static const struct property_entry trekstor_primetab_t13b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2500), PROPERTY_ENTRY_U32("touchscreen-size-y", 1900), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primetab-t13b.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), @@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1900), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3670-surftab-twin-10-1-st10432-8.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 884), PROPERTY_ENTRY_U32("touchscreen-size-y", 632), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-surftab-wintron70-st70416-6.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -910,6 +909,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Pipo W11 */ + .driver_data = (void *)&pipo_w11_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PIPO"), + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + /* Above matches are too generic, add bios-ver match */ + DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"), + }, + }, + { /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */ .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { @@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), - DMI_MATCH(DMI_PRODUCT_NAME, - "SurfTab wintron 7.0 ST70416-6"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"), /* Exact match, different versions need different fw */ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"), }, @@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client) } static int ts_dmi_notifier_call(struct notifier_block *nb, - unsigned long action, void *data) + unsigned long action, void *data) { struct device *dev = data; struct i2c_client *client; diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c index 179b737280e1..c43d8ad02529 100644 --- a/drivers/pnp/isapnp/core.c +++ b/drivers/pnp/isapnp/core.c @@ -747,34 +747,12 @@ __skip: } /* - * Compute ISA PnP checksum for first eight bytes. - */ -static unsigned char __init isapnp_checksum(unsigned char *data) -{ - int i, j; - unsigned char checksum = 0x6a, bit, b; - - for (i = 0; i < 8; i++) { - b = data[i]; - for (j = 0; j < 8; j++) { - bit = 0; - if (b & (1 << j)) - bit = 1; - checksum = - ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7) - | (checksum >> 1); - } - } - return checksum; -} - -/* * Build device list for all present ISA PnP devices. */ static int __init isapnp_build_device_list(void) { int csn; - unsigned char header[9], checksum; + unsigned char header[9]; struct pnp_card *card; u32 eisa_id; char id[8]; @@ -784,7 +762,6 @@ static int __init isapnp_build_device_list(void) for (csn = 1; csn <= isapnp_csn_count; csn++) { isapnp_wake(csn); isapnp_peek(header, 9); - checksum = isapnp_checksum(header); eisa_id = header[0] | header[1] << 8 | header[2] << 16 | header[3] << 24; pnp_eisa_id_to_string(eisa_id, id); diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index 089b6244b716..b8fe166cd0d9 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -12,6 +12,22 @@ menuconfig POWER_AVS Say Y here to enable Adaptive Voltage Scaling class support. +config QCOM_CPR + tristate "QCOM Core Power Reduction (CPR) support" + depends on POWER_AVS + select PM_OPP + select REGMAP + help + Say Y here to enable support for the CPR hardware found on Qualcomm + SoCs like QCS404. + + This driver populates CPU OPPs tables and makes adjustments to the + tables based on feedback from the CPR hardware. If you want to do + CPUfrequency scaling say Y here. + + To compile this driver as a module, choose M here: the module will + be called qcom-cpr + config ROCKCHIP_IODOMAIN tristate "Rockchip IO domain support" depends on POWER_AVS && ARCH_ROCKCHIP && OF diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile index a1b8cd453f19..9007d05853e2 100644 --- a/drivers/power/avs/Makefile +++ b/drivers/power/avs/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o +obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c new file mode 100644 index 000000000000..9192fb747653 --- /dev/null +++ b/drivers/power/avs/qcom-cpr.c @@ -0,0 +1,1793 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, Linaro Limited + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/debugfs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/interrupt.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> +#include <linux/nvmem-consumer.h> + +/* Register Offsets for RB-CPR and Bit Definitions */ + +/* RBCPR Version Register */ +#define REG_RBCPR_VERSION 0 +#define RBCPR_VER_2 0x02 +#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0) + +/* RBCPR Gate Count and Target Registers */ +#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n)) + +#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0 +#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0) +#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12 +#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0) + +/* RBCPR Timer Control */ +#define REG_RBCPR_TIMER_INTERVAL 0x44 +#define REG_RBIF_TIMER_ADJUST 0x4c + +#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0 +#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4 +#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0) +#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8 + +/* RBCPR Config Register */ +#define REG_RBIF_LIMIT 0x48 +#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0) +#define RBIF_LIMIT_CEILING_SHIFT 6 +#define RBIF_LIMIT_FLOOR_BITS 6 +#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0) + +#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK +#define RBIF_LIMIT_FLOOR_DEFAULT 0 + +#define REG_RBIF_SW_VLEVEL 0x94 +#define RBIF_SW_VLEVEL_DEFAULT 0x20 + +#define REG_RBCPR_STEP_QUOT 0x80 +#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8 + +/* RBCPR Control Register */ +#define REG_RBCPR_CTL 0x90 + +#define RBCPR_CTL_LOOP_EN BIT(0) +#define RBCPR_CTL_TIMER_EN BIT(3) +#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5) +#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6) +#define RBCPR_CTL_COUNT_MODE BIT(10) +#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24 +#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28 + +/* RBCPR Ack/Nack Response */ +#define REG_RBIF_CONT_ACK_CMD 0x98 +#define REG_RBIF_CONT_NACK_CMD 0x9c + +/* RBCPR Result status Register */ +#define REG_RBCPR_RESULT_0 0xa0 + +#define RBCPR_RESULT0_BUSY_SHIFT 19 +#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT) +#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18 +#define RBCPR_RESULT0_ERROR_SHIFT 6 +#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0) +#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2 +#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0) +#define RBCPR_RESULT0_STEP_UP_SHIFT 1 + +/* RBCPR Interrupt Control Register */ +#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n)) +#define REG_RBIF_IRQ_CLEAR 0x110 +#define REG_RBIF_IRQ_STATUS 0x114 + +#define CPR_INT_DONE BIT(0) +#define CPR_INT_MIN BIT(1) +#define CPR_INT_DOWN BIT(2) +#define CPR_INT_MID BIT(3) +#define CPR_INT_UP BIT(4) +#define CPR_INT_MAX BIT(5) +#define CPR_INT_CLAMP BIT(6) +#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \ + CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP) +#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN) + +#define CPR_NUM_RING_OSC 8 + +/* CPR eFuse parameters */ +#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0) + +#define CPR_FUSE_MIN_QUOT_DIFF 50 + +#define FUSE_REVISION_UNKNOWN (-1) + +enum voltage_change_dir { + NO_CHANGE, + DOWN, + UP, +}; + +struct cpr_fuse { + char *ring_osc; + char *init_voltage; + char *quotient; + char *quotient_offset; +}; + +struct fuse_corner_data { + int ref_uV; + int max_uV; + int min_uV; + int max_volt_scale; + int max_quot_scale; + /* fuse quot */ + int quot_offset; + int quot_scale; + int quot_adjust; + /* fuse quot_offset */ + int quot_offset_scale; + int quot_offset_adjust; +}; + +struct cpr_fuses { + int init_voltage_step; + int init_voltage_width; + struct fuse_corner_data *fuse_corner_data; +}; + +struct corner_data { + unsigned int fuse_corner; + unsigned long freq; +}; + +struct cpr_desc { + unsigned int num_fuse_corners; + int min_diff_quot; + int *step_quot; + + unsigned int timer_delay_us; + unsigned int timer_cons_up; + unsigned int timer_cons_down; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int idle_clocks; + unsigned int gcnt_us; + unsigned int vdd_apc_step_up_limit; + unsigned int vdd_apc_step_down_limit; + unsigned int clamp_timer_interval; + + struct cpr_fuses cpr_fuses; + bool reduce_to_fuse_uV; + bool reduce_to_corner_uV; +}; + +struct acc_desc { + unsigned int enable_reg; + u32 enable_mask; + + struct reg_sequence *config; + struct reg_sequence *settings; + int num_regs_per_fuse; +}; + +struct cpr_acc_desc { + const struct cpr_desc *cpr_desc; + const struct acc_desc *acc_desc; +}; + +struct fuse_corner { + int min_uV; + int max_uV; + int uV; + int quot; + int step_quot; + const struct reg_sequence *accs; + int num_accs; + unsigned long max_freq; + u8 ring_osc_idx; +}; + +struct corner { + int min_uV; + int max_uV; + int uV; + int last_uV; + int quot_adjust; + u32 save_ctl; + u32 save_irq; + unsigned long freq; + struct fuse_corner *fuse_corner; +}; + +struct cpr_drv { + unsigned int num_corners; + unsigned int ref_clk_khz; + + struct generic_pm_domain pd; + struct device *dev; + struct device *attached_cpu_dev; + struct mutex lock; + void __iomem *base; + struct corner *corner; + struct regulator *vdd_apc; + struct clk *cpu_clk; + struct regmap *tcsr; + bool loop_disabled; + u32 gcnt; + unsigned long flags; + + struct fuse_corner *fuse_corners; + struct corner *corners; + + const struct cpr_desc *desc; + const struct acc_desc *acc_desc; + const struct cpr_fuse *cpr_fuses; + + struct dentry *debugfs; +}; + +static bool cpr_is_allowed(struct cpr_drv *drv) +{ + return !drv->loop_disabled; +} + +static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value) +{ + writel_relaxed(value, drv->base + offset); +} + +static u32 cpr_read(struct cpr_drv *drv, u32 offset) +{ + return readl_relaxed(drv->base + offset); +} + +static void +cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(drv->base + offset); + val &= ~mask; + val |= value & mask; + writel_relaxed(val, drv->base + offset); +} + +static void cpr_irq_clr(struct cpr_drv *drv) +{ + cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL); +} + +static void cpr_irq_clr_nack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); +} + +static void cpr_irq_clr_ack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); +} + +static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits) +{ + cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits); +} + +static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value) +{ + cpr_masked_write(drv, REG_RBCPR_CTL, mask, value); +} + +static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner) +{ + u32 val, mask; + const struct cpr_desc *desc = drv->desc; + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK; + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val); + cpr_masked_write(drv, REG_RBCPR_CTL, + RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, + corner->save_ctl); + cpr_irq_set(drv, corner->save_irq); + + if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV) + val = RBCPR_CTL_LOOP_EN; + else + val = 0; + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val); +} + +static void cpr_ctl_disable(struct cpr_drv *drv) +{ + cpr_irq_set(drv, 0); + cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0); + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, + RBIF_TIMER_ADJ_CONS_UP_MASK | + RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0); + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0); +} + +static bool cpr_ctl_is_enabled(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_CTL); + return reg_val & RBCPR_CTL_LOOP_EN; +} + +static bool cpr_ctl_is_busy(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_RESULT_0); + return reg_val & RBCPR_RESULT0_BUSY_MASK; +} + +static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner) +{ + corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL); + corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0)); +} + +static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner) +{ + u32 gcnt, ctl, irq, ro_sel, step_quot; + struct fuse_corner *fuse = corner->fuse_corner; + const struct cpr_desc *desc = drv->desc; + int i; + + ro_sel = fuse->ring_osc_idx; + gcnt = drv->gcnt; + gcnt |= fuse->quot - corner->quot_adjust; + + /* Program the step quotient and idle clocks */ + step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT; + step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK; + cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot); + + /* Clear the target quotient value and gate count of all ROs */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt); + ctl = corner->save_ctl; + cpr_write(drv, REG_RBCPR_CTL, ctl); + irq = corner->save_irq; + cpr_irq_set(drv, irq); + dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt, + ctl, irq); +} + +static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f, + struct fuse_corner *end) +{ + if (f == end) + return; + + if (f < end) { + for (f += 1; f <= end; f++) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } else { + for (f -= 1; f >= end; f--) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } +} + +static int cpr_pre_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == DOWN) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_post_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == UP) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner, + int new_uV, enum voltage_change_dir dir) +{ + int ret; + struct fuse_corner *fuse_corner = corner->fuse_corner; + + ret = cpr_pre_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV); + if (ret) { + dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n", + new_uV); + return ret; + } + + ret = cpr_post_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + return 0; +} + +static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv) +{ + return drv->corner ? drv->corner - drv->corners + 1 : 0; +} + +static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) +{ + u32 val, error_steps, reg_mask; + int last_uV, new_uV, step_uV, ret; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + if (dir != UP && dir != DOWN) + return 0; + + step_uV = regulator_get_linear_step(drv->vdd_apc); + if (!step_uV) + return -EINVAL; + + corner = drv->corner; + + val = cpr_read(drv, REG_RBCPR_RESULT_0); + + error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT; + error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK; + last_uV = corner->last_uV; + + if (dir == UP) { + if (desc->clamp_timer_interval && + error_steps < desc->up_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->up_threshold, + desc->vdd_apc_step_up_limit); + } + + if (last_uV >= corner->max_uV) { + cpr_irq_clr_nack(drv); + + /* Maximize the UP threshold */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = reg_mask; + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable UP interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_up_limit) + error_steps = desc->vdd_apc_step_up_limit; + + /* Calculate new voltage */ + new_uV = last_uV + error_steps * step_uV; + new_uV = min(new_uV, corner->max_uV); + + dev_dbg(drv->dev, + "UP: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } else if (dir == DOWN) { + if (desc->clamp_timer_interval && + error_steps < desc->down_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->down_threshold, + desc->vdd_apc_step_down_limit); + } + + if (last_uV <= corner->min_uV) { + cpr_irq_clr_nack(drv); + + /* Enable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable DOWN interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_down_limit) + error_steps = desc->vdd_apc_step_down_limit; + + /* Calculate new voltage */ + new_uV = last_uV - error_steps * step_uV; + new_uV = max(new_uV, corner->min_uV); + + dev_dbg(drv->dev, + "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) { + cpr_irq_clr_nack(drv); + return ret; + } + drv->corner->last_uV = new_uV; + + if (dir == UP) { + /* Disable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = 0; + } else if (dir == DOWN) { + /* Restore default threshold for UP */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = desc->up_threshold; + val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + } + + cpr_ctl_modify(drv, reg_mask, val); + + /* Re-enable default interrupts */ + cpr_irq_set(drv, CPR_INT_DEFAULT); + + /* Ack */ + cpr_irq_clr_ack(drv); + + return 0; +} + +static irqreturn_t cpr_irq_handler(int irq, void *dev) +{ + struct cpr_drv *drv = dev; + const struct cpr_desc *desc = drv->desc; + irqreturn_t ret = IRQ_HANDLED; + u32 val; + + mutex_lock(&drv->lock); + + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS) + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + + dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val); + + if (!cpr_ctl_is_enabled(drv)) { + dev_dbg(drv->dev, "CPR is disabled\n"); + ret = IRQ_NONE; + } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) { + dev_dbg(drv->dev, "CPR measurement is not ready\n"); + } else if (!cpr_is_allowed(drv)) { + val = cpr_read(drv, REG_RBCPR_CTL); + dev_err_ratelimited(drv->dev, + "Interrupt broken? RBCPR_CTL = %#02x\n", + val); + ret = IRQ_NONE; + } else { + /* + * Following sequence of handling is as per each IRQ's + * priority + */ + if (val & CPR_INT_UP) { + cpr_scale(drv, UP); + } else if (val & CPR_INT_DOWN) { + cpr_scale(drv, DOWN); + } else if (val & CPR_INT_MIN) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MAX) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MID) { + /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */ + dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n"); + } else { + dev_dbg(drv->dev, + "IRQ occurred for unknown flag (%#08x)\n", val); + } + + /* Save register values for the corner */ + cpr_corner_save(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_enable(struct cpr_drv *drv) +{ + int ret; + + ret = regulator_enable(drv->vdd_apc); + if (ret) + return ret; + + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv) && drv->corner) { + cpr_irq_clr(drv); + cpr_corner_restore(drv, drv->corner); + cpr_ctl_enable(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return 0; +} + +static int cpr_disable(struct cpr_drv *drv) +{ + int ret; + + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_clr(drv); + } + + mutex_unlock(&drv->lock); + + ret = regulator_disable(drv->vdd_apc); + if (ret) + return ret; + + return 0; +} + +static int cpr_config(struct cpr_drv *drv) +{ + int i; + u32 val, gcnt; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + /* Disable interrupt and CPR */ + cpr_write(drv, REG_RBIF_IRQ_EN(0), 0); + cpr_write(drv, REG_RBCPR_CTL, 0); + + /* Program the default HW ceiling, floor and vlevel */ + val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK) + << RBIF_LIMIT_CEILING_SHIFT; + val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK; + cpr_write(drv, REG_RBIF_LIMIT, val); + cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT); + + /* + * Clear the target quotient value and gate count of all + * ring oscillators + */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + /* Init and save gcnt */ + gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000; + gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK; + gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT; + drv->gcnt = gcnt; + + /* Program the delay count for the timer */ + val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000; + cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val); + dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val, + desc->timer_delay_us); + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT; + cpr_write(drv, REG_RBIF_TIMER_ADJUST, val); + + /* Program the control register */ + val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT; + val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT; + val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE; + val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN; + cpr_write(drv, REG_RBCPR_CTL, val); + + for (i = 0; i < drv->num_corners; i++) { + corner = &drv->corners[i]; + corner->save_ctl = val; + corner->save_irq = CPR_INT_DEFAULT; + } + + cpr_irq_set(drv, CPR_INT_DEFAULT); + + val = cpr_read(drv, REG_RBCPR_VERSION); + if (val <= RBCPR_VER_2) + drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS; + + return 0; +} + +static int cpr_set_performance_state(struct generic_pm_domain *domain, + unsigned int state) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + struct corner *corner, *end; + enum voltage_change_dir dir; + int ret = 0, new_uV; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n", + __func__, state, cpr_get_cur_perf_state(drv)); + + /* + * Determine new corner we're going to. + * Remove one since lowest performance state is 1. + */ + corner = drv->corners + state - 1; + end = &drv->corners[drv->num_corners - 1]; + if (corner > end || corner < drv->corners) { + ret = -EINVAL; + goto unlock; + } + + /* Determine direction */ + if (drv->corner > corner) + dir = DOWN; + else if (drv->corner < corner) + dir = UP; + else + dir = NO_CHANGE; + + if (cpr_is_allowed(drv)) + new_uV = corner->last_uV; + else + new_uV = corner->uV; + + if (cpr_is_allowed(drv)) + cpr_ctl_disable(drv); + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) + goto unlock; + + if (cpr_is_allowed(drv)) { + cpr_irq_clr(drv); + if (drv->corner != corner) + cpr_corner_restore(drv, corner); + cpr_ctl_enable(drv, corner); + } + + drv->corner = corner; + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data) +{ + struct nvmem_cell *cell; + ssize_t len; + char *ret; + int i; + + *data = 0; + + cell = nvmem_cell_get(dev, cname); + if (IS_ERR(cell)) { + if (PTR_ERR(cell) != -EPROBE_DEFER) + dev_err(dev, "undefined cell %s\n", cname); + return PTR_ERR(cell); + } + + ret = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + if (IS_ERR(ret)) { + dev_err(dev, "can't read cell %s\n", cname); + return PTR_ERR(ret); + } + + for (i = 0; i < len; i++) + *data |= ret[i] << (8 * i); + + kfree(ret); + dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len); + + return 0; +} + +static int +cpr_populate_ring_osc_idx(struct cpr_drv *drv) +{ + struct fuse_corner *fuse = drv->fuse_corners; + struct fuse_corner *end = fuse + drv->desc->num_fuse_corners; + const struct cpr_fuse *fuses = drv->cpr_fuses; + u32 data; + int ret; + + for (; fuse < end; fuse++, fuses++) { + ret = cpr_read_efuse(drv->dev, fuses->ring_osc, + &data); + if (ret) + return ret; + fuse->ring_osc_idx = data; + } + + return 0; +} + +static int cpr_read_fuse_uV(const struct cpr_desc *desc, + const struct fuse_corner_data *fdata, + const char *init_v_efuse, + int step_volt, + struct cpr_drv *drv) +{ + int step_size_uV, steps, uV; + u32 bits = 0; + int ret; + + ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits); + if (ret) + return ret; + + steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1); + /* Not two's complement.. instead highest bit is sign bit */ + if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1)) + steps = -steps; + + step_size_uV = desc->cpr_fuses.init_voltage_step; + + uV = fdata->ref_uV + steps * step_size_uV; + return DIV_ROUND_UP(uV, step_volt) * step_volt; +} + +static int cpr_fuse_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + const struct acc_desc *acc_desc = drv->acc_desc; + int i; + unsigned int step_volt; + struct fuse_corner_data *fdata; + struct fuse_corner *fuse, *end; + int uV; + const struct reg_sequence *accs; + int ret; + + accs = acc_desc->settings; + + step_volt = regulator_get_linear_step(drv->vdd_apc); + if (!step_volt) + return -EINVAL; + + /* Populate fuse_corner members */ + fuse = drv->fuse_corners; + end = &fuse[desc->num_fuse_corners - 1]; + fdata = desc->cpr_fuses.fuse_corner_data; + + for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) { + /* + * Update SoC voltages: platforms might choose a different + * regulators than the one used to characterize the algorithms + * (ie, init_voltage_step). + */ + fdata->min_uV = roundup(fdata->min_uV, step_volt); + fdata->max_uV = roundup(fdata->max_uV, step_volt); + + /* Populate uV */ + uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage, + step_volt, drv); + if (uV < 0) + return uV; + + fuse->min_uV = fdata->min_uV; + fuse->max_uV = fdata->max_uV; + fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV); + + if (fuse == end) { + /* + * Allow the highest fuse corner's PVS voltage to + * define the ceiling voltage for that corner in order + * to support SoC's in which variable ceiling values + * are required. + */ + end->max_uV = max(end->max_uV, end->uV); + } + + /* Populate target quotient by scaling */ + ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot); + if (ret) + return ret; + + fuse->quot *= fdata->quot_scale; + fuse->quot += fdata->quot_offset; + fuse->quot += fdata->quot_adjust; + fuse->step_quot = desc->step_quot[fuse->ring_osc_idx]; + + /* Populate acc settings */ + fuse->accs = accs; + fuse->num_accs = acc_desc->num_regs_per_fuse; + accs += acc_desc->num_regs_per_fuse; + } + + /* + * Restrict all fuse corner PVS voltages based upon per corner + * ceiling and floor voltages. + */ + for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) { + if (fuse->uV > fuse->max_uV) + fuse->uV = fuse->max_uV; + else if (fuse->uV < fuse->min_uV) + fuse->uV = fuse->min_uV; + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->min_uV, + fuse->min_uV); + if (!ret) { + dev_err(drv->dev, + "min uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->min_uV, i); + return -EINVAL; + } + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->max_uV, + fuse->max_uV); + if (!ret) { + dev_err(drv->dev, + "max uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->max_uV, i); + return -EINVAL; + } + + dev_dbg(drv->dev, + "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n", + i, fuse->min_uV, fuse->uV, fuse->max_uV, + fuse->ring_osc_idx, fuse->quot, fuse->step_quot); + } + + return 0; +} + +static int cpr_calculate_scaling(const char *quot_offset, + struct cpr_drv *drv, + const struct fuse_corner_data *fdata, + const struct corner *corner) +{ + u32 quot_diff = 0; + unsigned long freq_diff; + int scaling; + const struct fuse_corner *fuse, *prev_fuse; + int ret; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + if (quot_offset) { + ret = cpr_read_efuse(drv->dev, quot_offset, "_diff); + if (ret) + return ret; + + quot_diff *= fdata->quot_offset_scale; + quot_diff += fdata->quot_offset_adjust; + } else { + quot_diff = fuse->quot - prev_fuse->quot; + } + + freq_diff = fuse->max_freq - prev_fuse->max_freq; + freq_diff /= 1000000; /* Convert to MHz */ + scaling = 1000 * quot_diff / freq_diff; + return min(scaling, fdata->max_quot_scale); +} + +static int cpr_interpolate(const struct corner *corner, int step_volt, + const struct fuse_corner_data *fdata) +{ + unsigned long f_high, f_low, f_diff; + int uV_high, uV_low, uV; + u64 temp, temp_limit; + const struct fuse_corner *fuse, *prev_fuse; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + f_high = fuse->max_freq; + f_low = prev_fuse->max_freq; + uV_high = fuse->uV; + uV_low = prev_fuse->uV; + f_diff = fuse->max_freq - corner->freq; + + /* + * Don't interpolate in the wrong direction. This could happen + * if the adjusted fuse voltage overlaps with the previous fuse's + * adjusted voltage. + */ + if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq) + return corner->uV; + + temp = f_diff * (uV_high - uV_low); + do_div(temp, f_high - f_low); + + /* + * max_volt_scale has units of uV/MHz while freq values + * have units of Hz. Divide by 1000000 to convert to. + */ + temp_limit = f_diff * fdata->max_volt_scale; + do_div(temp_limit, 1000000); + + uV = uV_high - min(temp, temp_limit); + return roundup(uV, step_volt); +} + +static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp) +{ + struct device_node *np; + unsigned int fuse_corner = 0; + + np = dev_pm_opp_get_of_node(opp); + if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner)) + pr_err("%s: missing 'qcom,opp-fuse-level' property\n", + __func__); + + of_node_put(np); + + return fuse_corner; +} + +static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, + struct device *cpu_dev) +{ + u64 rate = 0; + struct device_node *ref_np; + struct device_node *desc_np; + struct device_node *child_np = NULL; + struct device_node *child_req_np = NULL; + + desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!desc_np) + return 0; + + ref_np = dev_pm_opp_get_of_node(ref); + if (!ref_np) + goto out_ref; + + do { + of_node_put(child_req_np); + child_np = of_get_next_available_child(desc_np, child_np); + child_req_np = of_parse_phandle(child_np, "required-opps", 0); + } while (child_np && child_req_np != ref_np); + + if (child_np && child_req_np == ref_np) + of_property_read_u64(child_np, "opp-hz", &rate); + + of_node_put(child_req_np); + of_node_put(child_np); + of_node_put(ref_np); +out_ref: + of_node_put(desc_np); + + return (unsigned long) rate; +} + +static int cpr_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + int i, level, scaling = 0; + unsigned int fnum, fc; + const char *quot_offset; + struct fuse_corner *fuse, *prev_fuse; + struct corner *corner, *end; + struct corner_data *cdata; + const struct fuse_corner_data *fdata; + bool apply_scaling; + unsigned long freq_diff, freq_diff_mhz; + unsigned long freq; + int step_volt = regulator_get_linear_step(drv->vdd_apc); + struct dev_pm_opp *opp; + + if (!step_volt) + return -EINVAL; + + corner = drv->corners; + end = &corner[drv->num_corners - 1]; + + cdata = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(struct corner_data), + GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + /* + * Store maximum frequency for each fuse corner based on the frequency + * plan + */ + for (level = 1; level <= drv->num_corners; level++) { + opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level); + if (IS_ERR(opp)) + return -EINVAL; + fc = cpr_get_fuse_corner(opp); + if (!fc) { + dev_pm_opp_put(opp); + return -EINVAL; + } + fnum = fc - 1; + freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev); + if (!freq) { + dev_pm_opp_put(opp); + return -EINVAL; + } + cdata[level - 1].fuse_corner = fnum; + cdata[level - 1].freq = freq; + + fuse = &drv->fuse_corners[fnum]; + dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n", + freq, dev_pm_opp_get_level(opp) - 1, fnum); + if (freq > fuse->max_freq) + fuse->max_freq = freq; + dev_pm_opp_put(opp); + } + + /* + * Get the quotient adjustment scaling factor, according to: + * + * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1)) + * / (freq(corner_N) - freq(corner_N-1)), max_factor) + * + * QUOT(corner_N): quotient read from fuse for fuse corner N + * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1) + * freq(corner_N): max frequency in MHz supported by fuse corner N + * freq(corner_N-1): max frequency in MHz supported by fuse corner + * (N - 1) + * + * Then walk through the corners mapped to each fuse corner + * and calculate the quotient adjustment for each one using the + * following formula: + * + * quot_adjust = (freq_max - freq_corner) * scaling / 1000 + * + * freq_max: max frequency in MHz supported by the fuse corner + * freq_corner: frequency in MHz corresponding to the corner + * scaling: calculated from above equation + * + * + * + + + * | v | + * q | f c o | f c + * u | c l | c + * o | f t | f + * t | c a | c + * | c f g | c f + * | e | + * +--------------- +---------------- + * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 + * corner corner + * + * c = corner + * f = fuse corner + * + */ + for (apply_scaling = false, i = 0; corner <= end; corner++, i++) { + fnum = cdata[i].fuse_corner; + fdata = &desc->cpr_fuses.fuse_corner_data[fnum]; + quot_offset = fuses[fnum].quotient_offset; + fuse = &drv->fuse_corners[fnum]; + if (fnum) + prev_fuse = &drv->fuse_corners[fnum - 1]; + else + prev_fuse = NULL; + + corner->fuse_corner = fuse; + corner->freq = cdata[i].freq; + corner->uV = fuse->uV; + + if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) { + scaling = cpr_calculate_scaling(quot_offset, drv, + fdata, corner); + if (scaling < 0) + return scaling; + + apply_scaling = true; + } else if (corner->freq == fuse->max_freq) { + /* This is a fuse corner; don't scale anything */ + apply_scaling = false; + } + + if (apply_scaling) { + freq_diff = fuse->max_freq - corner->freq; + freq_diff_mhz = freq_diff / 1000000; + corner->quot_adjust = scaling * freq_diff_mhz / 1000; + + corner->uV = cpr_interpolate(corner, step_volt, fdata); + } + + corner->max_uV = fuse->max_uV; + corner->min_uV = fuse->min_uV; + corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV); + corner->last_uV = corner->uV; + + /* Reduce the ceiling voltage if needed */ + if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV) + corner->max_uV = corner->uV; + else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV) + corner->max_uV = max(corner->min_uV, fuse->uV); + + dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i, + corner->min_uV, corner->uV, corner->max_uV, + fuse->quot - corner->quot_adjust); + } + + return 0; +} + +static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct cpr_fuse *fuses; + int i; + + fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners, + sizeof(struct cpr_fuse), + GFP_KERNEL); + if (!fuses) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < desc->num_fuse_corners; i++) { + char tbuf[32]; + + snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1); + fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].ring_osc) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1); + fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].init_voltage) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient%d", i + 1); + fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].quotient) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1); + fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].quotient_offset) + return ERR_PTR(-ENOMEM); + } + + return fuses; +} + +static void cpr_set_loop_allowed(struct cpr_drv *drv) +{ + drv->loop_disabled = false; +} + +static int cpr_init_parameters(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct clk *clk; + + clk = clk_get(drv->dev, "ref"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + drv->ref_clk_khz = clk_get_rate(clk) / 1000; + clk_put(clk); + + if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK || + desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK || + desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK || + desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK || + desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK || + desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK) + return -EINVAL; + + dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n", + desc->up_threshold, desc->down_threshold); + + return 0; +} + +static int cpr_find_initial_corner(struct cpr_drv *drv) +{ + unsigned long rate; + const struct corner *end; + struct corner *iter; + unsigned int i = 0; + + if (!drv->cpu_clk) { + dev_err(drv->dev, "cannot get rate from NULL clk\n"); + return -EINVAL; + } + + end = &drv->corners[drv->num_corners - 1]; + rate = clk_get_rate(drv->cpu_clk); + + /* + * Some bootloaders set a CPU clock frequency that is not defined + * in the OPP table. When running at an unlisted frequency, + * cpufreq_online() will change to the OPP which has the lowest + * frequency, at or above the unlisted frequency. + * Since cpufreq_online() always "rounds up" in the case of an + * unlisted frequency, this function always "rounds down" in case + * of an unlisted frequency. That way, when cpufreq_online() + * triggers the first ever call to cpr_set_performance_state(), + * it will correctly determine the direction as UP. + */ + for (iter = drv->corners; iter <= end; iter++) { + if (iter->freq > rate) + break; + i++; + if (iter->freq == rate) { + drv->corner = iter; + break; + } + if (iter->freq < rate) + drv->corner = iter; + } + + if (!drv->corner) { + dev_err(drv->dev, "boot up corner not found\n"); + return -EINVAL; + } + + dev_dbg(drv->dev, "boot up perf state: %u\n", i); + + return 0; +} + +static const struct cpr_desc qcs404_cpr_desc = { + .num_fuse_corners = 3, + .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF, + .step_quot = (int []){ 25, 25, 25, }, + .timer_delay_us = 5000, + .timer_cons_up = 0, + .timer_cons_down = 2, + .up_threshold = 1, + .down_threshold = 3, + .idle_clocks = 15, + .gcnt_us = 1, + .vdd_apc_step_up_limit = 1, + .vdd_apc_step_down_limit = 1, + .cpr_fuses = { + .init_voltage_step = 8000, + .init_voltage_width = 6, + .fuse_corner_data = (struct fuse_corner_data[]){ + /* fuse corner 0 */ + { + .ref_uV = 1224000, + .max_uV = 1224000, + .min_uV = 1048000, + .max_volt_scale = 0, + .max_quot_scale = 0, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 1 */ + { + .ref_uV = 1288000, + .max_uV = 1288000, + .min_uV = 1048000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = -20, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 2 */ + { + .ref_uV = 1352000, + .max_uV = 1384000, + .min_uV = 1088000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + }, + }, +}; + +static const struct acc_desc qcs404_acc_desc = { + .settings = (struct reg_sequence[]){ + { 0xb120, 0x1041040 }, + { 0xb124, 0x41 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + }, + .config = (struct reg_sequence[]){ + { 0xb138, 0xff }, + { 0xb130, 0x5555 }, + }, + .num_regs_per_fuse = 2, +}; + +static const struct cpr_acc_desc qcs404_cpr_acc_desc = { + .cpr_desc = &qcs404_cpr_desc, + .acc_desc = &qcs404_acc_desc, +}; + +static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int cpr_power_off(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_disable(drv); +} + +static int cpr_power_on(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_enable(drv); +} + +static int cpr_pd_attach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + const struct acc_desc *acc_desc = drv->acc_desc; + int ret = 0; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev)); + + /* + * This driver only supports scaling voltage for a CPU cluster + * where all CPUs in the cluster share a single regulator. + * Therefore, save the struct device pointer only for the first + * CPU device that gets attached. There is no need to do any + * additional initialization when further CPUs get attached. + */ + if (drv->attached_cpu_dev) + goto unlock; + + /* + * cpr_scale_voltage() requires the direction (if we are changing + * to a higher or lower OPP). The first time + * cpr_set_performance_state() is called, there is no previous + * performance state defined. Therefore, we call + * cpr_find_initial_corner() that gets the CPU clock frequency + * set by the bootloader, so that we can determine the direction + * the first time cpr_set_performance_state() is called. + */ + drv->cpu_clk = devm_clk_get(dev, NULL); + if (IS_ERR(drv->cpu_clk)) { + ret = PTR_ERR(drv->cpu_clk); + if (ret != -EPROBE_DEFER) + dev_err(drv->dev, "could not get cpu clk: %d\n", ret); + goto unlock; + } + drv->attached_cpu_dev = dev; + + dev_dbg(drv->dev, "using cpu clk from: %s\n", + dev_name(drv->attached_cpu_dev)); + + /* + * Everything related to (virtual) corners has to be initialized + * here, when attaching to the power domain, since we need to know + * the maximum frequency for each fuse corner, and this is only + * available after the cpufreq driver has attached to us. + * The reason for this is that we need to know the highest + * frequency associated with each fuse corner. + */ + ret = dev_pm_opp_get_opp_count(&drv->pd.dev); + if (ret < 0) { + dev_err(drv->dev, "could not get OPP count\n"); + goto unlock; + } + drv->num_corners = ret; + + if (drv->num_corners < 2) { + dev_err(drv->dev, "need at least 2 OPPs to use CPR\n"); + ret = -EINVAL; + goto unlock; + } + + dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners); + + drv->corners = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(*drv->corners), + GFP_KERNEL); + if (!drv->corners) { + ret = -ENOMEM; + goto unlock; + } + + ret = cpr_corner_init(drv); + if (ret) + goto unlock; + + cpr_set_loop_allowed(drv); + + ret = cpr_init_parameters(drv); + if (ret) + goto unlock; + + /* Configure CPR HW but keep it disabled */ + ret = cpr_config(drv); + if (ret) + goto unlock; + + ret = cpr_find_initial_corner(drv); + if (ret) + goto unlock; + + if (acc_desc->config) + regmap_multi_reg_write(drv->tcsr, acc_desc->config, + acc_desc->num_regs_per_fuse); + + /* Enable ACC if required */ + if (acc_desc->enable_mask) + regmap_update_bits(drv->tcsr, acc_desc->enable_reg, + acc_desc->enable_mask, + acc_desc->enable_mask); + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_debug_info_show(struct seq_file *s, void *unused) +{ + u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps; + u32 step_dn, step_up, error, error_lt0, busy; + struct cpr_drv *drv = s->private; + struct fuse_corner *fuse_corner; + struct corner *corner; + + corner = drv->corner; + fuse_corner = corner->fuse_corner; + + seq_printf(s, "corner, current_volt = %d uV\n", + corner->last_uV); + + ro_sel = fuse_corner->ring_osc_idx; + gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel)); + seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt); + + ctl = cpr_read(drv, REG_RBCPR_CTL); + seq_printf(s, "rbcpr_ctl = %#02X\n", ctl); + + irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS); + seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status); + + reg = cpr_read(drv, REG_RBCPR_RESULT_0); + seq_printf(s, "rbcpr_result_0 = %#02X\n", reg); + + step_dn = reg & 0x01; + step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01; + seq_printf(s, " [step_dn = %u", step_dn); + + seq_printf(s, ", step_up = %u", step_up); + + error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT) + & RBCPR_RESULT0_ERROR_STEPS_MASK; + seq_printf(s, ", error_steps = %u", error_steps); + + error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK; + seq_printf(s, ", error = %u", error); + + error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01; + seq_printf(s, ", error_lt_0 = %u", error_lt0); + + busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01; + seq_printf(s, ", busy = %u]\n", busy); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cpr_debug_info); + +static void cpr_debugfs_init(struct cpr_drv *drv) +{ + drv->debugfs = debugfs_create_dir("qcom_cpr", NULL); + + debugfs_create_file("debug_info", 0444, drv->debugfs, + drv, &cpr_debug_info_fops); +} + +static int cpr_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct cpr_drv *drv; + int irq, ret; + const struct cpr_acc_desc *data; + struct device_node *np; + u32 cpr_rev = FUSE_REVISION_UNKNOWN; + + data = of_device_get_match_data(dev); + if (!data || !data->cpr_desc || !data->acc_desc) + return -EINVAL; + + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + drv->dev = dev; + drv->desc = data->cpr_desc; + drv->acc_desc = data->acc_desc; + + drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners, + sizeof(*drv->fuse_corners), + GFP_KERNEL); + if (!drv->fuse_corners) + return -ENOMEM; + + np = of_parse_phandle(dev->of_node, "acc-syscon", 0); + if (!np) + return -ENODEV; + + drv->tcsr = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(drv->tcsr)) + return PTR_ERR(drv->tcsr); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + drv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(drv->base)) + return PTR_ERR(drv->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + drv->vdd_apc = devm_regulator_get(dev, "vdd-apc"); + if (IS_ERR(drv->vdd_apc)) + return PTR_ERR(drv->vdd_apc); + + /* + * Initialize fuse corners, since it simply depends + * on data in efuses. + * Everything related to (virtual) corners has to be + * initialized after attaching to the power domain, + * since it depends on the CPU's OPP table. + */ + ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev); + if (ret) + return ret; + + drv->cpr_fuses = cpr_get_fuses(drv); + if (IS_ERR(drv->cpr_fuses)) + return PTR_ERR(drv->cpr_fuses); + + ret = cpr_populate_ring_osc_idx(drv); + if (ret) + return ret; + + ret = cpr_fuse_corner_init(drv); + if (ret) + return ret; + + mutex_init(&drv->lock); + + ret = devm_request_threaded_irq(dev, irq, NULL, + cpr_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_RISING, + "cpr", drv); + if (ret) + return ret; + + drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name, + GFP_KERNEL); + if (!drv->pd.name) + return -EINVAL; + + drv->pd.power_off = cpr_power_off; + drv->pd.power_on = cpr_power_on; + drv->pd.set_performance_state = cpr_set_performance_state; + drv->pd.opp_to_performance_state = cpr_get_performance_state; + drv->pd.attach_dev = cpr_pd_attach_dev; + + ret = pm_genpd_init(&drv->pd, NULL, true); + if (ret) + return ret; + + ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); + if (ret) + return ret; + + platform_set_drvdata(pdev, drv); + cpr_debugfs_init(drv); + + return 0; +} + +static int cpr_remove(struct platform_device *pdev) +{ + struct cpr_drv *drv = platform_get_drvdata(pdev); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_set(drv, 0); + } + + of_genpd_del_provider(pdev->dev.of_node); + pm_genpd_remove(&drv->pd); + + debugfs_remove_recursive(drv->debugfs); + + return 0; +} + +static const struct of_device_id cpr_match_table[] = { + { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, cpr_match_table); + +static struct platform_driver cpr_driver = { + .probe = cpr_probe, + .remove = cpr_remove, + .driver = { + .name = "qcom-cpr", + .of_match_table = cpr_match_table, + }, +}; +module_platform_driver(cpr_driver); + +MODULE_DESCRIPTION("Core Power Reduction (CPR) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 2e5b6a6834da..73257cf107d9 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -980,6 +980,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server), INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core), + INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), @@ -989,6 +990,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core), INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 74eb5af7295f..97bfdd47954f 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -194,6 +194,18 @@ config REGULATOR_BD70528 This driver can also be built as a module. If so, the module will be called bd70528-regulator. +config REGULATOR_BD71828 + tristate "ROHM BD71828 Power Regulator" + depends on MFD_ROHM_BD71828 + select REGULATOR_ROHM + help + This driver supports voltage regulators on ROHM BD71828 PMIC. + This will enable support for the software controllable buck + and LDO regulators. + + This driver can also be built as a module. If so, the module + will be called bd71828-regulator. + config REGULATOR_BD718XX tristate "ROHM BD71837 Power Regulator" depends on MFD_ROHM_BD718XX @@ -600,6 +612,27 @@ config REGULATOR_MCP16502 through the regulator interface. In addition it enables suspend-to-ram/standby transition. +config REGULATOR_MP8859 + tristate "MPS MP8859 regulator driver" + depends on I2C + select REGMAP_I2C + help + Say y here to support the MP8859 voltage regulator. This driver + supports basic operations (get/set voltage) through the regulator + interface. + Say M here if you want to include support for the regulator as a + module. The module will be named "mp8859". + +config REGULATOR_MPQ7920 + tristate "Monolithic MPQ7920 PMIC" + depends on I2C && OF + select REGMAP_I2C + help + Say y here to support the MPQ7920 PMIC. This will enable supports + the software controllable 4 buck and 5 LDO regulators. + This driver supports the control of different power rails of device + through regulator interface. + config REGULATOR_MT6311 tristate "MediaTek MT6311 PMIC" depends on I2C @@ -1077,6 +1110,13 @@ config REGULATOR_VEXPRESS This driver provides support for voltage regulators available on the ARM Ltd's Versatile Express platform. +config REGULATOR_VQMMC_IPQ4019 + tristate "IPQ4019 VQMMC SD LDO regulator support" + depends on ARCH_QCOM + help + This driver provides support for the VQMMC LDO I/0 + voltage regulator of the IPQ4019 SD/EMMC controller. + config REGULATOR_WM831X tristate "Wolfson Microelectronics WM831x PMIC regulators" depends on MFD_WM831X diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 2210ba56f9bd..07bc977c52b0 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o +obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o @@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o +obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o +obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o @@ -132,6 +135,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o +obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c new file mode 100644 index 000000000000..b2fa17be4988 --- /dev/null +++ b/drivers/regulator/bd71828-regulator.c @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2019 ROHM Semiconductors +// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver +// + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/rohm-bd71828.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +struct reg_init { + unsigned int reg; + unsigned int mask; + unsigned int val; +}; +struct bd71828_regulator_data { + struct regulator_desc desc; + const struct rohm_dvs_config dvs; + const struct reg_init *reg_inits; + int reg_init_amnt; +}; + +static const struct reg_init buck1_inits[] = { + /* + * DVS Buck voltages can be changed by register values or via GPIO. + * Use register accesses by default. + */ + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK1_CTRL, + .val = BD71828_DVS_BUCK1_CTRL_I2C, + }, +}; + +static const struct reg_init buck2_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK2_CTRL, + .val = BD71828_DVS_BUCK2_CTRL_I2C, + }, +}; + +static const struct reg_init buck6_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK6_CTRL, + .val = BD71828_DVS_BUCK6_CTRL_I2C, + }, +}; + +static const struct reg_init buck7_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK7_CTRL, + .val = BD71828_DVS_BUCK7_CTRL_I2C, + }, +}; + +static const struct regulator_linear_range bd71828_buck1267_volts[] = { + REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250), + REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0), +}; + +static const struct regulator_linear_range bd71828_buck3_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_buck4_volts[] = { + REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000), + REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0), +}; + +static const struct regulator_linear_range bd71828_buck5_volts[] = { + REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_ldo_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0), +}; + +static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int val; + + switch (ramp_delay) { + case 1 ... 2500: + val = 0; + break; + case 2501 ... 5000: + val = 1; + break; + case 5001 ... 10000: + val = 2; + break; + case 10001 ... 20000: + val = 3; + break; + default: + val = 3; + dev_err(&rdev->dev, + "ramp_delay: %d not supported, setting 20mV/uS", + ramp_delay); + } + + /* + * On BD71828 the ramp delay level control reg is at offset +2 to + * enable reg + */ + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2, + BD71828_MASK_RAMP_DELAY, + val << (ffs(BD71828_MASK_RAMP_DELAY) - 1)); +} + +static int buck_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + struct bd71828_regulator_data *data; + + data = container_of(desc, struct bd71828_regulator_data, desc); + + return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap); +} + +static int ldo6_parse_dt(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + uint32_t uv = 0; + unsigned int en; + struct regmap *regmap = cfg->regmap; + static const char * const props[] = { "rohm,dvs-run-voltage", + "rohm,dvs-idle-voltage", + "rohm,dvs-suspend-voltage", + "rohm,dvs-lpsr-voltage" }; + unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN, + BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN }; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + ret = of_property_read_u32(np, props[i], &uv); + if (ret) { + if (ret != -EINVAL) + return ret; + continue; + } + if (uv) + en = 0xffffffff; + else + en = 0; + + ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en); + if (ret) + return ret; + } + return 0; +} + +static const struct regulator_ops bd71828_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_dvs_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd71828_set_ramp_delay, +}; + +static const struct regulator_ops bd71828_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_ldo6_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static const struct bd71828_regulator_data bd71828_rdata[] = { + { + .desc = { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK1, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK1_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK1_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + /* + * LPSR voltage is same as SUSPEND voltage. Allow + * setting it so that regulator can be set enabled at + * LPSR state + */ + .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck1_inits, + .reg_init_amnt = ARRAY_SIZE(buck1_inits), + }, + { + .desc = { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK2, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK2_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK2_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck2_inits, + .reg_init_amnt = ARRAY_SIZE(buck2_inits), + }, + { + .desc = { + .name = "buck3", + .of_match = of_match_ptr("BUCK3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK3, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck3_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts), + .n_voltages = BD71828_BUCK3_VOLTS, + .enable_reg = BD71828_REG_BUCK3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK3_VOLT, + .vsel_mask = BD71828_MASK_BUCK3_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK3_VOLT, + .idle_reg = BD71828_REG_BUCK3_VOLT, + .suspend_reg = BD71828_REG_BUCK3_VOLT, + .lpsr_reg = BD71828_REG_BUCK3_VOLT, + .run_mask = BD71828_MASK_BUCK3_VOLT, + .idle_mask = BD71828_MASK_BUCK3_VOLT, + .suspend_mask = BD71828_MASK_BUCK3_VOLT, + .lpsr_mask = BD71828_MASK_BUCK3_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck4", + .of_match = of_match_ptr("BUCK4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK4, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck4_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts), + .n_voltages = BD71828_BUCK4_VOLTS, + .enable_reg = BD71828_REG_BUCK4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK4_VOLT, + .vsel_mask = BD71828_MASK_BUCK4_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK4 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK4_VOLT, + .idle_reg = BD71828_REG_BUCK4_VOLT, + .suspend_reg = BD71828_REG_BUCK4_VOLT, + .lpsr_reg = BD71828_REG_BUCK4_VOLT, + .run_mask = BD71828_MASK_BUCK4_VOLT, + .idle_mask = BD71828_MASK_BUCK4_VOLT, + .suspend_mask = BD71828_MASK_BUCK4_VOLT, + .lpsr_mask = BD71828_MASK_BUCK4_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck5", + .of_match = of_match_ptr("BUCK5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK5, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck5_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts), + .n_voltages = BD71828_BUCK5_VOLTS, + .enable_reg = BD71828_REG_BUCK5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK5_VOLT, + .vsel_mask = BD71828_MASK_BUCK5_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK5 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK5_VOLT, + .idle_reg = BD71828_REG_BUCK5_VOLT, + .suspend_reg = BD71828_REG_BUCK5_VOLT, + .lpsr_reg = BD71828_REG_BUCK5_VOLT, + .run_mask = BD71828_MASK_BUCK5_VOLT, + .idle_mask = BD71828_MASK_BUCK5_VOLT, + .suspend_mask = BD71828_MASK_BUCK5_VOLT, + .lpsr_mask = BD71828_MASK_BUCK5_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck6", + .of_match = of_match_ptr("BUCK6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK6, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK6_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK6_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck6_inits, + .reg_init_amnt = ARRAY_SIZE(buck6_inits), + }, + { + .desc = { + .name = "buck7", + .of_match = of_match_ptr("BUCK7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK7, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK7_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK7_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck7_inits, + .reg_init_amnt = ARRAY_SIZE(buck7_inits), + }, + { + .desc = { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO1, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO1_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO1_VOLT, + .idle_reg = BD71828_REG_LDO1_VOLT, + .suspend_reg = BD71828_REG_LDO1_VOLT, + .lpsr_reg = BD71828_REG_LDO1_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO2, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO2_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO2 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO2_VOLT, + .idle_reg = BD71828_REG_LDO2_VOLT, + .suspend_reg = BD71828_REG_LDO2_VOLT, + .lpsr_reg = BD71828_REG_LDO2_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO3, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO3_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO3_VOLT, + .idle_reg = BD71828_REG_LDO3_VOLT, + .suspend_reg = BD71828_REG_LDO3_VOLT, + .lpsr_reg = BD71828_REG_LDO3_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo4", + .of_match = of_match_ptr("LDO4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO4, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO4_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO4_VOLT, + .idle_reg = BD71828_REG_LDO4_VOLT, + .suspend_reg = BD71828_REG_LDO4_VOLT, + .lpsr_reg = BD71828_REG_LDO4_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo5", + .of_match = of_match_ptr("LDO5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO5, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO5_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .of_parse_cb = buck_set_hw_dvs_levels, + .owner = THIS_MODULE, + }, + /* + * LDO5 is special. It can choose vsel settings to be configured + * from 2 different registers (by GPIO). + * + * This driver supports only configuration where + * BD71828_REG_LDO5_VOLT_L is used. + */ + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO5_VOLT, + .idle_reg = BD71828_REG_LDO5_VOLT, + .suspend_reg = BD71828_REG_LDO5_VOLT, + .lpsr_reg = BD71828_REG_LDO5_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo6", + .of_match = of_match_ptr("LDO6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO6, + .ops = &bd71828_ldo6_ops, + .type = REGULATOR_VOLTAGE, + .fixed_uV = BD71828_LDO_6_VOLTAGE, + .n_voltages = 1, + .enable_reg = BD71828_REG_LDO6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .owner = THIS_MODULE, + /* + * LDO6 only supports enable/disable for all states. + * Voltage for LDO6 is fixed. + */ + .of_parse_cb = ldo6_parse_dt, + }, + }, { + .desc = { + /* SNVS LDO in data-sheet */ + .name = "ldo7", + .of_match = of_match_ptr("LDO7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO_SNVS, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO7_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO7 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO7_VOLT, + .idle_reg = BD71828_REG_LDO7_VOLT, + .suspend_reg = BD71828_REG_LDO7_VOLT, + .lpsr_reg = BD71828_REG_LDO7_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, +}; + +static int bd71828_probe(struct platform_device *pdev) +{ + struct rohm_regmap_dev *bd71828; + int i, j, ret; + struct regulator_config config = { + .dev = pdev->dev.parent, + }; + + bd71828 = dev_get_drvdata(pdev->dev.parent); + if (!bd71828) { + dev_err(&pdev->dev, "No MFD driver data\n"); + return -EINVAL; + } + + config.regmap = bd71828->regmap; + + for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) { + struct regulator_dev *rdev; + const struct bd71828_regulator_data *rd; + + rd = &bd71828_rdata[i]; + rdev = devm_regulator_register(&pdev->dev, + &rd->desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "failed to register %s regulator\n", + rd->desc.name); + return PTR_ERR(rdev); + } + for (j = 0; j < rd->reg_init_amnt; j++) { + ret = regmap_update_bits(bd71828->regmap, + rd->reg_inits[j].reg, + rd->reg_inits[j].mask, + rd->reg_inits[j].val); + if (ret) { + dev_err(&pdev->dev, + "regulator %s init failed\n", + rd->desc.name); + return ret; + } + } + } + return 0; +} + +static struct platform_driver bd71828_regulator = { + .driver = { + .name = "bd71828-pmic" + }, + .probe = bd71828_probe, +}; + +module_platform_driver(bd71828_regulator); + +MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); +MODULE_DESCRIPTION("BD71828 voltage regulator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd71828-pmic"); diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 13a43eee2e46..8f9b2d8eaf10 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -1142,28 +1142,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { }, }; -struct bd718xx_pmic_inits { - const struct bd718xx_regulator_data *r_datas; - unsigned int r_amount; -}; - static int bd718xx_probe(struct platform_device *pdev) { struct bd718xx *mfd; struct regulator_config config = { 0 }; - struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = { - [ROHM_CHIP_TYPE_BD71837] = { - .r_datas = bd71837_regulators, - .r_amount = ARRAY_SIZE(bd71837_regulators), - }, - [ROHM_CHIP_TYPE_BD71847] = { - .r_datas = bd71847_regulators, - .r_amount = ARRAY_SIZE(bd71847_regulators), - }, - }; - int i, j, err; bool use_snvs; + const struct bd718xx_regulator_data *reg_data; + unsigned int num_reg_data; mfd = dev_get_drvdata(pdev->dev.parent); if (!mfd) { @@ -1172,8 +1158,16 @@ static int bd718xx_probe(struct platform_device *pdev) goto err; } - if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT || - !pmic_regulators[mfd->chip.chip_type].r_datas) { + switch (mfd->chip.chip_type) { + case ROHM_CHIP_TYPE_BD71837: + reg_data = bd71837_regulators; + num_reg_data = ARRAY_SIZE(bd71837_regulators); + break; + case ROHM_CHIP_TYPE_BD71847: + reg_data = bd71847_regulators; + num_reg_data = ARRAY_SIZE(bd71847_regulators); + break; + default: dev_err(&pdev->dev, "Unsupported chip type\n"); err = -EINVAL; goto err; @@ -1215,13 +1209,13 @@ static int bd718xx_probe(struct platform_device *pdev) } } - for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) { + for (i = 0; i < num_reg_data; i++) { const struct regulator_desc *desc; struct regulator_dev *rdev; const struct bd718xx_regulator_data *r; - r = &pmic_regulators[mfd->chip.chip_type].r_datas[i]; + r = ®_data[i]; desc = &r->desc; config.dev = pdev->dev.parent; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 03d79fee2987..d015d99cb59d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3470,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, out: return ret; } +EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev); static int regulator_limit_voltage_step(struct regulator_dev *rdev, int *current_uV, int *min_uV) @@ -4034,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev) return ret; return ret - rdev->constraints->uV_offset; } +EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev); /** * regulator_get_voltage - get regulator output voltage diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index f9448ed50e05..0cdeb6186529 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, da9210_dt_ids); -static int da9210_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9210_i2c_probe(struct i2c_client *i2c) { struct da9210 *chip; struct device *dev = &i2c->dev; @@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = { .name = "da9210", .of_match_table = of_match_ptr(da9210_dt_ids), }, - .probe = da9210_i2c_probe, + .probe_new = da9210_i2c_probe, .id_table = da9210_i2c_id, }; diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index 523dc1b95826..2ea4362ffa5c 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip) /* * I2C driver interface functions */ -static int da9211_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9211_i2c_probe(struct i2c_client *i2c) { struct da9211 *chip; int error, ret; @@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = { .name = "da9211", .of_match_table = of_match_ptr(da9211_dt_ids), }, - .probe = da9211_i2c_probe, + .probe_new = da9211_i2c_probe, .id_table = da9211_i2c_id, }; diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index ca3dc3f3bb29..bb16c465426e 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -13,6 +13,8 @@ #include <linux/regulator/driver.h> #include <linux/module.h> +#include "internal.h" + /** * regulator_is_enabled_regmap - standard is_enabled() for regmap users * @@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, consumers[i].supply = supply_names[i]; } EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names); + +/** + * regulator_is_equal - test whether two regulators are the same + * + * @reg1: first regulator to operate on + * @reg2: second regulator to operate on + */ +bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2) +{ + return reg1->rdev == reg2->rdev; +} +EXPORT_SYMBOL_GPL(regulator_is_equal); diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c index 978f5e903cae..cfb765986d0d 100644 --- a/drivers/regulator/isl9305.c +++ b/drivers/regulator/isl9305.c @@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = { .cache_type = REGCACHE_RBTREE, }; -static int isl9305_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int isl9305_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct isl9305_pdata *pdata = i2c->dev.platform_data; @@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = { .name = "isl9305", .of_match_table = of_match_ptr(isl9305_dt_ids), }, - .probe = isl9305_i2c_probe, + .probe_new = isl9305_i2c_probe, .id_table = isl9305_i2c_id, }; diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index bc96e65ef7c0..8be252f81b09 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971, return 0; } -static int lp3971_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int lp3971_i2c_probe(struct i2c_client *i2c) { struct lp3971 *lp3971; struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev); @@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = { .driver = { .name = "LP3971", }, - .probe = lp3971_i2c_probe, + .probe_new = lp3971_i2c_probe, .id_table = lp3971_i2c_id, }; diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c index d934540eb8c4..e12e52c69e52 100644 --- a/drivers/regulator/ltc3676.c +++ b/drivers/regulator/ltc3676.c @@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int ltc3676_regulator_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc3676_regulator_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct regulator_init_data *init_data = dev_get_platdata(dev); @@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(ltc3676_of_match), }, - .probe = ltc3676_regulator_probe, + .probe_new = ltc3676_regulator_probe, .id_table = ltc3676_i2c_id, }; module_i2c_driver(ltc3676_driver); diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c new file mode 100644 index 000000000000..1d26b506ee5b --- /dev/null +++ b/drivers/regulator/mp8859.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2019 five technologies GmbH +// Author: Markus Reichl <m.reichl@fivetechno.de> + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/of.h> +#include <linux/regulator/driver.h> +#include <linux/regmap.h> + + +#define VOL_MIN_IDX 0x00 +#define VOL_MAX_IDX 0x7ff + +/* Register definitions */ +#define MP8859_VOUT_L_REG 0 //3 lo Bits +#define MP8859_VOUT_H_REG 1 //8 hi Bits +#define MP8859_VOUT_GO_REG 2 +#define MP8859_IOUT_LIM_REG 3 +#define MP8859_CTL1_REG 4 +#define MP8859_CTL2_REG 5 +#define MP8859_RESERVED1_REG 6 +#define MP8859_RESERVED2_REG 7 +#define MP8859_RESERVED3_REG 8 +#define MP8859_STATUS_REG 9 +#define MP8859_INTERRUPT_REG 0x0A +#define MP8859_MASK_REG 0x0B +#define MP8859_ID1_REG 0x0C +#define MP8859_MFR_ID_REG 0x27 +#define MP8859_DEV_ID_REG 0x28 +#define MP8859_IC_REV_REG 0x29 + +#define MP8859_MAX_REG 0x29 + +#define MP8859_GO_BIT 0x01 + + +static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel) +{ + int ret; + + ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7); + + if (ret) + return ret; + ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3); + + if (ret) + return ret; + ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG, + MP8859_GO_BIT, 1); + return ret; +} + +static int mp8859_get_voltage_sel(struct regulator_dev *rdev) +{ + unsigned int val_tmp; + unsigned int val; + int ret; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp); + + if (ret) + return ret; + val = val_tmp << 3; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp); + + if (ret) + return ret; + val |= val_tmp & 0x07; + return val; +} + +static const struct regulator_linear_range mp8859_dcdc_ranges[] = { + REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000), +}; + +static const struct regmap_config mp8859_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MP8859_MAX_REG, + .cache_type = REGCACHE_RBTREE, +}; + +static const struct regulator_ops mp8859_ops = { + .set_voltage_sel = mp8859_set_voltage_sel, + .get_voltage_sel = mp8859_get_voltage_sel, + .list_voltage = regulator_list_voltage_linear_range, +}; + +static const struct regulator_desc mp8859_regulators[] = { + { + .id = 0, + .type = REGULATOR_VOLTAGE, + .name = "mp8859_dcdc", + .of_match = of_match_ptr("mp8859_dcdc"), + .n_voltages = VOL_MAX_IDX + 1, + .linear_ranges = mp8859_dcdc_ranges, + .n_linear_ranges = 1, + .ops = &mp8859_ops, + .owner = THIS_MODULE, + }, +}; + +static int mp8859_i2c_probe(struct i2c_client *i2c) +{ + int ret; + struct regulator_config config = {.dev = &i2c->dev}; + struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap); + struct regulator_dev *rdev; + + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(&i2c->dev, "regmap init failed: %d\n", ret); + return ret; + } + rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0], + &config); + + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(&i2c->dev, "failed to register %s: %d\n", + mp8859_regulators[0].name, ret); + return ret; + } + return 0; +} + +static const struct of_device_id mp8859_dt_id[] = { + {.compatible = "mps,mp8859"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mp8859_dt_id); + +static const struct i2c_device_id mp8859_i2c_id[] = { + { "mp8859", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id); + +static struct i2c_driver mp8859_regulator_driver = { + .driver = { + .name = "mp8859", + .of_match_table = of_match_ptr(mp8859_dt_id), + }, + .probe_new = mp8859_i2c_probe, + .id_table = mp8859_i2c_id, +}; + +module_i2c_driver(mp8859_regulator_driver); + +MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver"); +MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c new file mode 100644 index 000000000000..54c862edf571 --- /dev/null +++ b/drivers/regulator/mpq7920.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// mpq7920.c - regulator driver for mps mpq7920 +// +// Copyright 2019 Monolithic Power Systems, Inc +// +// Author: Saravanan Sekar <sravanhome@gmail.com> + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include "mpq7920.h" + +#define MPQ7920_BUCK_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) +#define MPQ7920_LDO_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) + +#define MPQ7920BUCK(_name, _id, _ilim) \ + [MPQ7920_BUCK ## _id] = { \ + .id = MPQ7920_BUCK ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .of_parse_cb = mpq7920_parse_cb, \ + .ops = &mpq7920_buck_ops, \ + .min_uV = MPQ7920_BUCK_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \ + .curr_table = _ilim, \ + .n_current_limits = ARRAY_SIZE(_ilim), \ + .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .csel_mask = MPQ7920_MASK_BUCK_ILIM, \ + .enable_reg = MPQ7920_REG_REGULATOR_EN, \ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_BUCK ## _id), \ + .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .soft_start_mask = MPQ7920_MASK_SOFTSTART, \ + .owner = THIS_MODULE, \ + } + +#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \ + [MPQ7920_LDO ## _id] = { \ + .id = MPQ7920_LDO ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .ops = _ops, \ + .min_uV = MPQ7920_LDO_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_LDO_VOLT_RANGE, \ + .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .curr_table = _ilim, \ + .n_current_limits = _ilim_sz, \ + .csel_reg = _creg, \ + .csel_mask = _cmask, \ + .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_LDO ##_id + 1), \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + } + +enum mpq7920_regulators { + MPQ7920_BUCK1, + MPQ7920_BUCK2, + MPQ7920_BUCK3, + MPQ7920_BUCK4, + MPQ7920_LDO1, /* LDORTC */ + MPQ7920_LDO2, + MPQ7920_LDO3, + MPQ7920_LDO4, + MPQ7920_LDO5, + MPQ7920_MAX_REGULATORS, +}; + +struct mpq7920_regulator_info { + struct regmap *regmap; + struct regulator_desc *rdesc; +}; + +static const struct regmap_config mpq7920_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x25, +}; + +/* Current limits array (in uA) + * ILIM1 & ILIM3 + */ +static const unsigned int mpq7920_I_limits1[] = { + 4600000, 6600000, 7600000, 9300000 +}; + +/* ILIM2 & ILIM4 */ +static const unsigned int mpq7920_I_limits2[] = { + 2700000, 3900000, 5100000, 6100000 +}; + +/* LDO4 & LDO5 */ +static const unsigned int mpq7920_I_limits3[] = { + 300000, 700000 +}; + +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay); +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *rdesc, + struct regulator_config *config); + +/* RTCLDO not controllable, always ON */ +static const struct regulator_ops mpq7920_ldortc_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_wo_current_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, +}; + +static const struct regulator_ops mpq7920_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_soft_start = regulator_set_soft_start_regmap, + .set_ramp_delay = mpq7920_set_ramp_delay, +}; + +static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = { + MPQ7920BUCK("buck1", 1, mpq7920_I_limits1), + MPQ7920BUCK("buck2", 2, mpq7920_I_limits2), + MPQ7920BUCK("buck3", 3, mpq7920_I_limits1), + MPQ7920BUCK("buck4", 4, mpq7920_I_limits2), + MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B, + MPQ7920_MASK_LDO_ILIM), + MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B, + MPQ7920_MASK_LDO_ILIM), +}; + +/* + * DVS ramp rate BUCK1 to BUCK4 + * 00-01: Reserved + * 10: 8mV/us + * 11: 4mV/us + */ +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int ramp_val; + + if (ramp_delay > 8000 || ramp_delay < 0) + return -EINVAL; + + if (ramp_delay <= 4000) + ramp_val = 3; + else + ramp_val = 2; + + return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6); +} + +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *config) +{ + uint8_t val; + int ret; + struct mpq7920_regulator_info *info = config->driver_data; + struct regulator_desc *rdesc = &info->rdesc[desc->id]; + + if (of_property_read_bool(np, "mps,buck-ovp-disable")) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_B + (rdesc->id * 4), + MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE); + } + + ret = of_property_read_u8(np, "mps,buck-phase-delay", &val); + if (!ret) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_C + (rdesc->id * 4), + MPQ7920_MASK_BUCK_PHASE_DEALY, + (val & 3) << 4); + } + + ret = of_property_read_u8(np, "mps,buck-softstart", &val); + if (!ret) + rdesc->soft_start_val_on = (val & 3) << 2; + + return 0; +} + +static void mpq7920_parse_dt(struct device *dev, + struct mpq7920_regulator_info *info) +{ + int ret; + struct device_node *np = dev->of_node; + uint8_t freq; + + np = of_get_child_by_name(np, "regulators"); + if (!np) { + dev_err(dev, "missing 'regulators' subnode in DT\n"); + return; + } + + ret = of_property_read_u8(np, "mps,switch-freq", &freq); + if (!ret) { + regmap_update_bits(info->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_SWITCH_FREQ, + (freq & 3) << 4); + } + + of_node_put(np); +} + +static int mpq7920_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct mpq7920_regulator_info *info; + struct regulator_config config = { NULL, }; + struct regulator_dev *rdev; + struct regmap *regmap; + int i; + + info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->rdesc = mpq7920_regulators_desc; + regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to allocate regmap!\n"); + return PTR_ERR(regmap); + } + + i2c_set_clientdata(client, info); + info->regmap = regmap; + if (client->dev.of_node) + mpq7920_parse_dt(&client->dev, info); + + config.dev = dev; + config.regmap = regmap; + config.driver_data = info; + + for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) { + rdev = devm_regulator_register(dev, + &mpq7920_regulators_desc[i], + &config); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator!\n"); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct of_device_id mpq7920_of_match[] = { + { .compatible = "mps,mpq7920"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mpq7920_of_match); + +static const struct i2c_device_id mpq7920_id[] = { + { "mpq7920", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mpq7920_id); + +static struct i2c_driver mpq7920_regulator_driver = { + .driver = { + .name = "mpq7920", + .of_match_table = of_match_ptr(mpq7920_of_match), + }, + .probe_new = mpq7920_i2c_probe, + .id_table = mpq7920_id, +}; +module_i2c_driver(mpq7920_regulator_driver); + +MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>"); +MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h new file mode 100644 index 000000000000..489924655a96 --- /dev/null +++ b/drivers/regulator/mpq7920.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * mpq7920.h - Regulator definitions for mpq7920 + * + * Copyright 2019 Monolithic Power Systems, Inc + * + */ + +#ifndef __MPQ7920_H__ +#define __MPQ7920_H__ + +#define MPQ7920_REG_CTL0 0x00 +#define MPQ7920_REG_CTL1 0x01 +#define MPQ7920_REG_CTL2 0x02 +#define MPQ7920_BUCK1_REG_A 0x03 +#define MPQ7920_BUCK1_REG_B 0x04 +#define MPQ7920_BUCK1_REG_C 0x05 +#define MPQ7920_BUCK1_REG_D 0x06 +#define MPQ7920_BUCK2_REG_A 0x07 +#define MPQ7920_BUCK2_REG_B 0x08 +#define MPQ7920_BUCK2_REG_C 0x09 +#define MPQ7920_BUCK2_REG_D 0x0a +#define MPQ7920_BUCK3_REG_A 0x0b +#define MPQ7920_BUCK3_REG_B 0x0c +#define MPQ7920_BUCK3_REG_C 0x0d +#define MPQ7920_BUCK3_REG_D 0x0e +#define MPQ7920_BUCK4_REG_A 0x0f +#define MPQ7920_BUCK4_REG_B 0x10 +#define MPQ7920_BUCK4_REG_C 0x11 +#define MPQ7920_BUCK4_REG_D 0x12 +#define MPQ7920_LDO1_REG_A 0x13 +#define MPQ7920_LDO1_REG_B 0x0 +#define MPQ7920_LDO2_REG_A 0x14 +#define MPQ7920_LDO2_REG_B 0x15 +#define MPQ7920_LDO2_REG_C 0x16 +#define MPQ7920_LDO3_REG_A 0x17 +#define MPQ7920_LDO3_REG_B 0x18 +#define MPQ7920_LDO3_REG_C 0x19 +#define MPQ7920_LDO4_REG_A 0x1a +#define MPQ7920_LDO4_REG_B 0x1b +#define MPQ7920_LDO4_REG_C 0x1c +#define MPQ7920_LDO5_REG_A 0x1d +#define MPQ7920_LDO5_REG_B 0x1e +#define MPQ7920_LDO5_REG_C 0x1f +#define MPQ7920_REG_MODE 0x20 +#define MPQ7920_REG_REGULATOR_EN 0x22 + +#define MPQ7920_MASK_VREF 0x7f +#define MPQ7920_MASK_BUCK_ILIM 0xc0 +#define MPQ7920_MASK_LDO_ILIM BIT(6) +#define MPQ7920_MASK_DISCHARGE BIT(5) +#define MPQ7920_MASK_MODE 0xc0 +#define MPQ7920_MASK_SOFTSTART 0x0c +#define MPQ7920_MASK_SWITCH_FREQ 0x30 +#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30 +#define MPQ7920_MASK_DVS_SLEWRATE 0xc0 +#define MPQ7920_MASK_OVP 0x40 +#define MPQ7920_OVP_DISABLE ~(0x40) +#define MPQ7920_DISCHARGE_ON BIT(5) + +#define MPQ7920_REGULATOR_EN_OFFSET 7 + +/* values in mV */ +#define MPQ7920_BUCK_VOLT_MIN 400000 +#define MPQ7920_LDO_VOLT_MIN 650000 +#define MPQ7920_VOLT_MAX 3587500 +#define MPQ7920_VOLT_STEP 12500 + +#endif /* __MPQ7920_H__ */ diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c index af95449d3590..69e6af3cd505 100644 --- a/drivers/regulator/mt6311-regulator.c +++ b/drivers/regulator/mt6311-regulator.c @@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = { /* * I2C driver interface functions */ -static int mt6311_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int mt6311_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct regulator_dev *rdev; @@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = { .name = "mt6311", .of_match_table = of_match_ptr(mt6311_dt_ids), }, - .probe = mt6311_i2c_probe, + .probe_new = mt6311_i2c_probe, .id_table = mt6311_i2c_id, }; diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c index 3d3415839ba2..787ced918372 100644 --- a/drivers/regulator/pv88060-regulator.c +++ b/drivers/regulator/pv88060-regulator.c @@ -279,8 +279,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88060_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88060_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88060 *chip; @@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = { .name = "pv88060", .of_match_table = of_match_ptr(pv88060_dt_ids), }, - .probe = pv88060_i2c_probe, + .probe_new = pv88060_i2c_probe, .id_table = pv88060_i2c_id, }; diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c index b1d0d97ae935..784729ec2182 100644 --- a/drivers/regulator/pv88090-regulator.c +++ b/drivers/regulator/pv88090-regulator.c @@ -272,8 +272,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88090_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88090_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88090 *chip; @@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = { .name = "pv88090", .of_match_table = of_match_ptr(pv88090_dt_ids), }, - .probe = pv88090_i2c_probe, + .probe_new = pv88090_i2c_probe, .id_table = pv88090_i2c_id, }; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 5b4003226484..31f79fda3238 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev, } if (!pdata->dvs_gpio[i]) { - dev_warn(dev, "there is no dvs%d gpio\n", i); + dev_info(dev, "there is no dvs%d gpio\n", i); continue; } diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 51f7e8b74d8c..115f59530852 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 4f2dc5ebffdc..23d288278957 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 12d6b8d2e97e..4abd3ed31f60 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index bf1a3508ebc4..44e4cecbf6de 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip) dev_dbg(chip->dev, "Fault log: FLT_POR\n"); } -static int slg51000_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int slg51000_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct slg51000 *chip; @@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = { .driver = { .name = "slg51000-regulator", }, - .probe = slg51000_i2c_probe, + .probe_new = slg51000_i2c_probe, .id_table = slg51000_i2c_id, }; diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c index 42e03b2c10a0..2222e739e62b 100644 --- a/drivers/regulator/sy8106a-regulator.c +++ b/drivers/regulator/sy8106a-regulator.c @@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = { /* * I2C driver interface functions */ -static int sy8106a_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int sy8106a_i2c_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; struct regulator_dev *rdev; @@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = { .name = "sy8106a", .of_match_table = of_match_ptr(sy8106a_i2c_of_match), }, - .probe = sy8106a_i2c_probe, + .probe_new = sy8106a_i2c_probe, .id_table = sy8106a_i2c_id, }; diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c index 92adb4f3ee19..62d243f3b904 100644 --- a/drivers/regulator/sy8824x.c +++ b/drivers/regulator/sy8824x.c @@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = { .val_bits = 8, }; -static int sy8824_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sy8824_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device_node *np = dev->of_node; @@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = { .name = "sy8824-regulator", .of_match_table = of_match_ptr(sy8824_dt_ids), }, - .probe = sy8824_i2c_probe, + .probe_new = sy8824_i2c_probe, .id_table = sy8824_id, }; module_i2c_driver(sy8824_regulator_driver); diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index 89b9314d64c9..af9abcd9c166 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -748,7 +748,7 @@ static int ti_abb_probe(struct platform_device *pdev) * We may have shared interrupt register offsets which are * write-1-to-clear between domains ensuring exclusivity. */ - abb->int_base = devm_ioremap_nocache(dev, res->start, + abb->int_base = devm_ioremap(dev, res->start, resource_size(res)); if (!abb->int_base) { dev_err(dev, "Unable to map '%s'\n", pname); @@ -768,7 +768,7 @@ static int ti_abb_probe(struct platform_device *pdev) * We may have shared efuse register offsets which are read-only * between domains */ - abb->efuse_base = devm_ioremap_nocache(dev, res->start, + abb->efuse_base = devm_ioremap(dev, res->start, resource_size(res)); if (!abb->efuse_base) { dev_err(dev, "Unable to map '%s'\n", pname); diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c index 7b0e38f8d627..0edc83089ba2 100644 --- a/drivers/regulator/tps65132-regulator.c +++ b/drivers/regulator/tps65132-regulator.c @@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = { .wr_table = &tps65132_no_reg_table, }; -static int tps65132_probe(struct i2c_client *client, - const struct i2c_device_id *client_id) +static int tps65132_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct tps65132_regulator *tps; @@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = { .driver = { .name = "tps65132", }, - .probe = tps65132_probe, + .probe_new = tps65132_probe, .id_table = tps65132_id, }; diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c index 9a9ee8188109..cbadb1c99679 100644 --- a/drivers/regulator/vctrl-regulator.c +++ b/drivers/regulator/vctrl-regulator.c @@ -11,10 +11,13 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/regulator/coupler.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> #include <linux/sort.h> +#include "internal.h" + struct vctrl_voltage_range { int min_uV; int max_uV; @@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV) static int vctrl_get_voltage(struct regulator_dev *rdev) { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); - int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); return vctrl_calc_output_voltage(vctrl, ctrl_uV); } @@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); struct regulator *ctrl_reg = vctrl->ctrl_reg; - int orig_ctrl_uV = regulator_get_voltage(ctrl_reg); + int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev); int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV); int ret; if (req_min_uV >= uV || !vctrl->ovp_threshold) /* voltage rising or no OVP */ - return regulator_set_voltage( - ctrl_reg, + return regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl_calc_ctrl_voltage(vctrl, req_min_uV), - vctrl_calc_ctrl_voltage(vctrl, req_max_uV)); + vctrl_calc_ctrl_voltage(vctrl, req_max_uV), + PM_SUSPEND_ON); while (uV > req_min_uV) { int max_drop_uV = (uV * vctrl->ovp_threshold) / 100; @@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, next_uV = max_t(int, req_min_uV, uV - max_drop_uV); next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV); - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + next_ctrl_uV, next_ctrl_uV, - next_ctrl_uV); + PM_SUSPEND_ON); if (ret) goto err; @@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, err: /* Try to go back to original voltage */ - regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV); + regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV, + PM_SUSPEND_ON); return ret; } @@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, if (selector >= vctrl->sel || !vctrl->ovp_threshold) { /* voltage rising or no OVP */ - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[selector].ctrl, vctrl->vtable[selector].ctrl, - vctrl->vtable[selector].ctrl); + PM_SUSPEND_ON); if (!ret) vctrl->sel = selector; @@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, else next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel; - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl->vtable[next_sel].ctrl, - vctrl->vtable[next_sel].ctrl); + vctrl->vtable[next_sel].ctrl, + PM_SUSPEND_ON); if (ret) { dev_err(&rdev->dev, "failed to set control voltage to %duV\n", @@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, err: if (vctrl->sel != orig_sel) { /* Try to go back to original voltage */ - if (!regulator_set_voltage(ctrl_reg, + if (!regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[orig_sel].ctrl, vctrl->vtable[orig_sel].ctrl, - vctrl->vtable[orig_sel].ctrl)) + PM_SUSPEND_ON)) vctrl->sel = orig_sel; else dev_warn(&rdev->dev, @@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev) if (ret) return ret; - ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); if (ctrl_uV < 0) { dev_err(&pdev->dev, "failed to get control voltage\n"); return ctrl_uV; diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c new file mode 100644 index 000000000000..6d5ae25d08d1 --- /dev/null +++ b/drivers/regulator/vqmmc-ipq4019-regulator.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com> +// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr> +// +// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +static const unsigned int ipq4019_vmmc_voltages[] = { + 1500000, 1800000, 2500000, 3000000, +}; + +static const struct regulator_ops ipq4019_regulator_voltage_ops = { + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_desc vmmc_regulator = { + .name = "vmmcq", + .ops = &ipq4019_regulator_voltage_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .volt_table = ipq4019_vmmc_voltages, + .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages), + .vsel_reg = 0, + .vsel_mask = 0x3, +}; + +static const struct regmap_config ipq4019_vmmcq_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, +}; + +static int ipq4019_regulator_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regulator_init_data *init_data; + struct regulator_config cfg = {}; + struct regulator_dev *rdev; + struct resource *res; + struct regmap *rmap; + void __iomem *base; + + init_data = of_get_regulator_init_data(dev, dev->of_node, + &vmmc_regulator); + if (!init_data) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config); + if (IS_ERR(rmap)) + return PTR_ERR(rmap); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.of_node = dev->of_node; + cfg.regmap = rmap; + + rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator: %ld\n", + PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + platform_set_drvdata(pdev, rdev); + + return 0; +} + +static const struct of_device_id regulator_ipq4019_of_match[] = { + { .compatible = "qcom,vqmmc-ipq4019-regulator", }, + {}, +}; + +static struct platform_driver ipq4019_regulator_driver = { + .probe = ipq4019_regulator_probe, + .driver = { + .name = "vqmmc-ipq4019-regulator", + .of_match_table = of_match_ptr(regulator_ipq4019_of_match), + }, +}; +module_platform_driver(ipq4019_regulator_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>"); +MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator"); diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 579b3ff5c644..feb1f8e52c00 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -504,7 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev) if (unlikely(!rtc->res)) return -EBUSY; - rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start, + rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, rtc->regsize); if (unlikely(!rtc->regbase)) return -EINVAL; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 90cf4691b8c3..a7881f8eb05e 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)" config BLK_DEV_SD tristate "SCSI disk support" depends on SCSI + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY ---help--- If you want to use SCSI hard disks, Fibre Channel disks, Serial ATA (SATA) or Parallel ATA (PATA) hard disks, diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c index 8466aa784ec1..8b891a05d9e7 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c @@ -293,7 +293,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, if (!request_mem_region(start, 0x1000, "aic79xx")) error = ENOMEM; if (!error) { - *maddr = ioremap_nocache(base_page, base_offset + 512); + *maddr = ioremap(base_page, base_offset + 512); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 717d8d1082ce..9b293b1f0b71 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -372,7 +372,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, if (!request_mem_region(start, 0x1000, "aic7xxx")) error = ENOMEM; if (error == 0) { - *maddr = ioremap_nocache(start, 256); + *maddr = ioremap(start, 256); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index db687ef8a99e..40dc8eac0e3a 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -270,7 +270,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) break; } case ACB_ADAPTER_TYPE_C:{ - acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); + acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!acb->pmuC) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 0760d0bd8a10..9b81cfbbc5c5 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -453,14 +453,14 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, u8 __iomem *addr; int pcicfg_reg; - addr = ioremap_nocache(pci_resource_start(pcidev, 2), + addr = ioremap(pci_resource_start(pcidev, 2), pci_resource_len(pcidev, 2)); if (addr == NULL) return -ENOMEM; phba->ctrl.csr = addr; phba->csr_va = addr; - addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); + addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); if (addr == NULL) goto pci_map_err; phba->ctrl.db = addr; @@ -471,7 +471,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, else pcicfg_reg = 0; - addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), + addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), pci_resource_len(pcidev, pcicfg_reg)); if (addr == NULL) diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index f069e09beb10..6f8335ddb1f2 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1414,7 +1414,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); - tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); + tgt->ctx_base = ioremap(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; return 0; diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 12666313b937..e53ebc5eff85 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -2715,7 +2715,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) reg_base = pci_resource_start(ep->hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); - ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); + ep->qp.ctx_base = ioremap(reg_base + reg_off, 4); if (!ep->qp.ctx_base) return -ENOMEM; goto arm_cq; @@ -2736,7 +2736,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) /* 5709 device in normal node and 5706/5708 devices */ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); - ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, + ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off, MB_KERNEL_CTX_SIZE); if (!ep->qp.ctx_base) return -ENOMEM; diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 2e8a3ac575cb..8dea7d53788a 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -529,7 +529,7 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev) goto err_free_hw; /* Get the start address of registers from BAR 0 */ - hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0), + hw->regstart = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!hw->regstart) { csio_err(hw, "Could not map BAR 0, regstart = %p\n", diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 8ef150dfb6f7..b60795893994 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 216e557f703e..1a4ddfacb458 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6876,7 +6876,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; - void __iomem *page_remapped = ioremap_nocache(page_base, + void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c index abac2f350aee..c48a73a0f517 100644 --- a/drivers/scsi/lasi700.c +++ b/drivers/scsi/lasi700.c @@ -98,7 +98,7 @@ lasi700_probe(struct parisc_device *dev) hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); - hostdata->base = ioremap_nocache(base, 0x100); + hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; if (dev->id.sversion == LASI_700_SVERSION) { diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index f6ac819e6e96..8443f2f35be2 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -731,7 +731,7 @@ megaraid_init_mbox(adapter_t *adapter) goto out_free_raid_dev; } - raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128); + raid_dev->baseaddr = ioremap(raid_dev->baseport, 128); if (!raid_dev->baseaddr) { diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a4bc81479284..c60cd9fc4240 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5875,7 +5875,7 @@ static int megasas_init_fw(struct megasas_instance *instance) } base_addr = pci_resource_start(instance->pdev, instance->bar); - instance->reg_set = ioremap_nocache(base_addr, 8192); + instance->reg_set = ioremap(base_addr, 8192); if (!instance->reg_set) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index 539ac8ce4fcd..d4bd31a75b9d 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -3531,7 +3531,7 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev, spin_lock_init(&cb->queue_lock); if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; - cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size); + cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size); if (cb->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index eb0dd566330a..5c5666491c2e 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2311,7 +2311,7 @@ static struct myrs_hba *myrs_detect(struct pci_dev *pdev, /* Map the Controller Register Window. */ if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; - cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size); + cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size); if (cs->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 93616f9fd6d7..d79ce97a04bd 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -1560,7 +1560,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) goto next_entry; data->MmioAddress = (unsigned long) - ioremap_nocache(p_dev->resource[2]->start, + ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); data->MmioLength = resource_size(p_dev->resource[2]); } diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 605b59c76c90..a3a44d4ace1e 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -789,7 +789,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->cregbase = - ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); + ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); if (!ha->cregbase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); @@ -810,7 +810,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->iobase = - ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); + ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); if (!ha->iobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 65ce10c7989c..902b649fc8ef 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2958,15 +2958,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) q->limits.zoned = BLK_ZONED_HM; } else { sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) + if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) { /* Host-aware */ q->limits.zoned = BLK_ZONED_HA; - else + } else { /* - * Treat drive-managed devices as - * regular block devices. + * Treat drive-managed devices and host-aware devices + * with partitions as regular block devices. */ q->limits.zoned = BLK_ZONED_NONE; + } } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 412ac56ecd60..b7492568e02f 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -7457,7 +7457,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) goto disable_device; } - ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( + ctrl_info->iomem_base = ioremap(pci_resource_start( ctrl_info->pci_dev, 0), sizeof(struct pqi_ctrl_registers)); if (!ctrl_info->iomem_base) { diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index a85d52b5dc32..f8397978f8ab 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -71,7 +71,7 @@ static int snirm710_probe(struct platform_device *dev) hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); - hostdata->base = ioremap_nocache(base, 0x100); + hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; hostdata->clock = SNIRM710_CLOCK; diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 440a73eae647..f37df79e37e1 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -190,7 +190,7 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!res || !res->start) goto fail_unlink; - esp->regs = ioremap_nocache(res->start, 0x20); + esp->regs = ioremap(res->start, 0x20); if (!esp->regs) goto fail_unmap_regs; @@ -198,7 +198,7 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!res || !res->start) goto fail_unmap_regs; - esp->dma_regs = ioremap_nocache(res->start, 0x10); + esp->dma_regs = ioremap(res->start, 0x10); esp->command_block = dma_alloc_coherent(esp->dev, 16, &esp->command_block_dma, diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 77bce208210e..7eac76cccc4c 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -89,7 +89,7 @@ zalon_probe(struct parisc_device *dev) struct gsc_irq gsc_irq; u32 zalon_vers; int error = -ENODEV; - void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096); + void __iomem *zalon = ioremap(dev->hpa.start, 4096); void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET; static int unit = 0; struct Scsi_Host *host; diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c index a23a8e5794f5..bdd82e497d5f 100644 --- a/drivers/scsi/zorro_esp.c +++ b/drivers/scsi/zorro_esp.c @@ -801,7 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z, /* additional setup required for Fastlane */ if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { /* map full address space up to ESP base for DMA */ - zep->board_base = ioremap_nocache(board, + zep->board_base = ioremap(board, FASTLANE_ESP_ADDR-1); if (!zep->board_base) { pr_err("Cannot allocate board address space\n"); @@ -816,7 +816,7 @@ static int zorro_esp_probe(struct zorro_dev *z, esp->ops = zdd->esp_ops; if (ioaddr > 0xffffff) - esp->regs = ioremap_nocache(ioaddr, 0x20); + esp->regs = ioremap(ioaddr, 0x20); else /* ZorroII address space remapped nocache by early startup */ esp->regs = ZTWO_VADDR(ioaddr); @@ -842,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z, * Only Fastlane Z3 for now - add switch for correct struct * dma_registers size if adding any more */ - esp->dma_regs = ioremap_nocache(dmaaddr, + esp->dma_regs = ioremap(dmaaddr, sizeof(struct fastlane_dma_registers)); } else /* ZorroII address space remapped nocache by early startup */ diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index 9475353f49d6..d996782a7106 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -368,7 +368,7 @@ static int clk_establish_mapping(struct clk *clk) if (!mapping->base && mapping->phys) { kref_init(&mapping->ref); - mapping->base = ioremap_nocache(mapping->phys, mapping->len); + mapping->base = ioremap(mapping->phys, mapping->len); if (unlikely(!mapping->base)) return -ENXIO; } else if (mapping->base) { diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 8485e812d9b2..f8e070d67fa3 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -213,7 +213,7 @@ int __init register_intc_controller(struct intc_desc *desc) WARN_ON(resource_type(res) != IORESOURCE_MEM); d->window[k].phys = res->start; d->window[k].size = resource_size(res); - d->window[k].virt = ioremap_nocache(res->start, + d->window[k].virt = ioremap(res->start, resource_size(res)); if (!d->window[k].virt) goto err2; diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c index 87d69e7471f9..f9f043a3d90a 100644 --- a/drivers/sh/intc/userimask.c +++ b/drivers/sh/intc/userimask.c @@ -73,7 +73,7 @@ int register_intc_userimask(unsigned long addr) if (unlikely(uimask)) return -EBUSY; - uimask = ioremap_nocache(addr, SZ_4K); + uimask = ioremap(addr, SZ_4K); if (unlikely(!uimask)) return -ENOMEM; diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c index eb96a3086d6d..5db919d96aba 100644 --- a/drivers/soc/tegra/flowctrl.c +++ b/drivers/soc/tegra/flowctrl.c @@ -219,7 +219,7 @@ static int __init tegra_flowctrl_init(void) return 0; } - tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res)); + tegra_flowctrl_base = ioremap(res.start, resource_size(&res)); if (!tegra_flowctrl_base) return -ENXIO; diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 4d719d4b8d5a..606abbe55bba 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -408,7 +408,7 @@ static int __init tegra_init_fuse(void) } } - fuse->base = ioremap_nocache(regs.start, resource_size(®s)); + fuse->base = ioremap(regs.start, resource_size(®s)); if (!fuse->base) { pr_err("failed to map FUSE registers\n"); return -ENXIO; diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index df76778af601..a2fd6ccd48f9 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -159,11 +159,11 @@ void __init tegra_init_apbmisc(void) } } - apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc)); + apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc)); if (!apbmisc_base) pr_err("failed to map APBMISC registers\n"); - strapping_base = ioremap_nocache(straps.start, resource_size(&straps)); + strapping_base = ioremap(straps.start, resource_size(&straps)); if (!strapping_base) pr_err("failed to map strapping options registers\n"); diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index ea0e11a09c12..1699dda6b393 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2826,7 +2826,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, of_address_to_resource(np, index, ®s); - wake = ioremap_nocache(regs.start, resource_size(®s)); + wake = ioremap(regs.start, resource_size(®s)); if (!wake) { dev_err(pmc->dev, "failed to map PMC wake registers\n"); return; @@ -3097,7 +3097,7 @@ static int __init tegra_pmc_early_init(void) } } - pmc->base = ioremap_nocache(regs.start, resource_size(®s)); + pmc->base = ioremap(regs.start, resource_size(®s)); if (!pmc->base) { pr_err("failed to map PMC registers\n"); of_node_put(np); diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig index cf545f428d03..4486e055794c 100644 --- a/drivers/soc/ti/Kconfig +++ b/drivers/soc/ti/Kconfig @@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS called ti_sci_pm_domains. Note this is needed early in boot before rootfs may be available. +config TI_K3_RINGACC + bool "K3 Ring accelerator Sub System" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_INTA_IRQCHIP + help + Say y here to support the K3 Ring accelerator module. + The Ring Accelerator (RINGACC or RA) provides hardware acceleration + to enable straightforward passing of work between a producer + and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs + If unsure, say N. + endif # SOC_TI config TI_SCI_INTA_MSI_DOMAIN diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile index 788b5cd1e180..bec827937a5f 100644 --- a/drivers/soc/ti/Makefile +++ b/drivers/soc/ti/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o +obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c new file mode 100644 index 000000000000..5fb2ee2ac978 --- /dev/null +++ b/drivers/soc/ti/k3-ringacc.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI K3 NAVSS Ring Accelerator subsystem driver + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> + +static LIST_HEAD(k3_ringacc_list); +static DEFINE_MUTEX(k3_ringacc_list_lock); + +#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0) + +/** + * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region + * + * @resv_16: Reserved + * @db: Ring Doorbell Register + * @resv_4: Reserved + * @occ: Ring Occupancy Register + * @indx: Ring Current Index Register + * @hwocc: Ring Hardware Occupancy Register + * @hwindx: Ring Hardware Current Index Register + */ +struct k3_ring_rt_regs { + u32 resv_16[4]; + u32 db; + u32 resv_4[1]; + u32 occ; + u32 indx; + u32 hwocc; + u32 hwindx; +}; + +#define K3_RINGACC_RT_REGS_STEP 0x1000 + +/** + * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region + * + * @head_data: Ring Head Entry Data Registers + * @tail_data: Ring Tail Entry Data Registers + * @peek_head_data: Ring Peek Head Entry Data Regs + * @peek_tail_data: Ring Peek Tail Entry Data Regs + */ +struct k3_ring_fifo_regs { + u32 head_data[128]; + u32 tail_data[128]; + u32 peek_head_data[128]; + u32 peek_tail_data[128]; +}; + +/** + * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region + * + * @revision: Revision Register + * @config: Config Register + */ +struct k3_ringacc_proxy_gcfg_regs { + u32 revision; + u32 config; +}; + +#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0) + +/** + * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region + * + * @control: Proxy Control Register + * @status: Proxy Status Register + * @resv_512: Reserved + * @data: Proxy Data Register + */ +struct k3_ringacc_proxy_target_regs { + u32 control; + u32 status; + u8 resv_512[504]; + u32 data[128]; +}; + +#define K3_RINGACC_PROXY_TARGET_STEP 0x1000 +#define K3_RINGACC_PROXY_NOT_USED (-1) + +enum k3_ringacc_proxy_access_mode { + PROXY_ACCESS_MODE_HEAD = 0, + PROXY_ACCESS_MODE_TAIL = 1, + PROXY_ACCESS_MODE_PEEK_HEAD = 2, + PROXY_ACCESS_MODE_PEEK_TAIL = 3, +}; + +#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U) +#define K3_RINGACC_FIFO_REGS_STEP 0x1000 +#define K3_RINGACC_MAX_DB_RING_CNT (127U) + +struct k3_ring_ops { + int (*push_tail)(struct k3_ring *ring, void *elm); + int (*push_head)(struct k3_ring *ring, void *elm); + int (*pop_tail)(struct k3_ring *ring, void *elm); + int (*pop_head)(struct k3_ring *ring, void *elm); +}; + +/** + * struct k3_ring - RA Ring descriptor + * + * @rt: Ring control/status registers + * @fifos: Ring queues registers + * @proxy: Ring Proxy Datapath registers + * @ring_mem_dma: Ring buffer dma address + * @ring_mem_virt: Ring buffer virt address + * @ops: Ring operations + * @size: Ring size in elements + * @elm_size: Size of the ring element + * @mode: Ring mode + * @flags: flags + * @free: Number of free elements + * @occ: Ring occupancy + * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING) + * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING) + * @ring_id: Ring Id + * @parent: Pointer on struct @k3_ringacc + * @use_count: Use count for shared rings + * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY) + */ +struct k3_ring { + struct k3_ring_rt_regs __iomem *rt; + struct k3_ring_fifo_regs __iomem *fifos; + struct k3_ringacc_proxy_target_regs __iomem *proxy; + dma_addr_t ring_mem_dma; + void *ring_mem_virt; + struct k3_ring_ops *ops; + u32 size; + enum k3_ring_size elm_size; + enum k3_ring_mode mode; + u32 flags; +#define K3_RING_FLAG_BUSY BIT(1) +#define K3_RING_FLAG_SHARED BIT(2) + u32 free; + u32 occ; + u32 windex; + u32 rindex; + u32 ring_id; + struct k3_ringacc *parent; + u32 use_count; + int proxy_id; +}; + +/** + * struct k3_ringacc - Rings accelerator descriptor + * + * @dev: pointer on RA device + * @proxy_gcfg: RA proxy global config registers + * @proxy_target_base: RA proxy datapath region + * @num_rings: number of ring in RA + * @rings_inuse: bitfield for ring usage tracking + * @rm_gp_range: general purpose rings range from tisci + * @dma_ring_reset_quirk: DMA reset w/a enable + * @num_proxies: number of RA proxies + * @proxy_inuse: bitfield for proxy usage tracking + * @rings: array of rings descriptors (struct @k3_ring) + * @list: list of RAs in the system + * @req_lock: protect rings allocation + * @tisci: pointer ti-sci handle + * @tisci_ring_ops: ti-sci rings ops + * @tisci_dev_id: ti-sci device id + */ +struct k3_ringacc { + struct device *dev; + struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg; + void __iomem *proxy_target_base; + u32 num_rings; /* number of rings in Ringacc module */ + unsigned long *rings_inuse; + struct ti_sci_resource *rm_gp_range; + + bool dma_ring_reset_quirk; + u32 num_proxies; + unsigned long *proxy_inuse; + + struct k3_ring *rings; + struct list_head list; + struct mutex req_lock; /* protect rings allocation */ + + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_ringacc_ops *tisci_ring_ops; + u32 tisci_dev_id; +}; + +static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring) +{ + return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES - + (4 << ring->elm_size); +} + +static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx) +{ + return (ring->ring_mem_virt + idx * (4 << ring->elm_size)); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_ring_ops = { + .push_tail = k3_ringacc_ring_push_mem, + .pop_head = k3_ringacc_ring_pop_mem, +}; + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_msg_ops = { + .push_tail = k3_ringacc_ring_push_io, + .push_head = k3_ringacc_ring_push_head_io, + .pop_tail = k3_ringacc_ring_pop_tail_io, + .pop_head = k3_ringacc_ring_pop_io, +}; + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_proxy_ops = { + .push_tail = k3_ringacc_ring_push_tail_proxy, + .push_head = k3_ringacc_ring_push_head_proxy, + .pop_tail = k3_ringacc_ring_pop_tail_proxy, + .pop_head = k3_ringacc_ring_pop_head_proxy, +}; + +static void k3_ringacc_ring_dump(struct k3_ring *ring) +{ + struct device *dev = ring->parent->dev; + + dev_dbg(dev, "dump ring: %d\n", ring->ring_id); + dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt, + &ring->ring_mem_dma); + dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n", + ring->elm_size, ring->size, ring->mode, ring->proxy_id); + + dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db)); + dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ)); + dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx)); + dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc)); + dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx)); + + if (ring->ring_mem_virt) + print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE, + 16, 1, ring->ring_mem_virt, 16 * 8, false); +} + +struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, + int id, u32 flags) +{ + int proxy_id = K3_RINGACC_PROXY_NOT_USED; + + mutex_lock(&ringacc->req_lock); + + if (id == K3_RINGACC_RING_ID_ANY) { + /* Request for any general purpose ring */ + struct ti_sci_resource_desc *gp_rings = + &ringacc->rm_gp_range->desc[0]; + unsigned long size; + + size = gp_rings->start + gp_rings->num; + id = find_next_zero_bit(ringacc->rings_inuse, size, + gp_rings->start); + if (id == size) + goto error; + } else if (id < 0) { + goto error; + } + + if (test_bit(id, ringacc->rings_inuse) && + !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) + goto error; + else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) + goto out; + + if (flags & K3_RINGACC_RING_USE_PROXY) { + proxy_id = find_next_zero_bit(ringacc->proxy_inuse, + ringacc->num_proxies, 0); + if (proxy_id == ringacc->num_proxies) + goto error; + } + + if (proxy_id != K3_RINGACC_PROXY_NOT_USED) { + set_bit(proxy_id, ringacc->proxy_inuse); + ringacc->rings[id].proxy_id = proxy_id; + dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id, + proxy_id); + } else { + dev_dbg(ringacc->dev, "Giving ring#%d\n", id); + } + + set_bit(id, ringacc->rings_inuse); +out: + ringacc->rings[id].use_count++; + mutex_unlock(&ringacc->req_lock); + return &ringacc->rings[id]; + +error: + mutex_unlock(&ringacc->req_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(k3_ringacc_request_ring); + +static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + ring->size, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + k3_ringacc_ring_reset_sci(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset); + +static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring, + enum k3_ring_mode mode) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + mode, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + if (!ring->parent->dma_ring_reset_quirk) + goto reset; + + if (!occ) + occ = readl(&ring->rt->occ); + + if (occ) { + u32 db_ring_cnt, db_ring_cnt_cur; + + dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__, + ring->ring_id, occ); + /* TI-SCI ring reset */ + k3_ringacc_ring_reset_sci(ring); + + /* + * Setup the ring in ring/doorbell mode (if not already in this + * mode) + */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci( + ring, K3_RINGACC_RING_MODE_RING); + /* + * Ring the doorbell 2**22 – ringOcc times. + * This will wrap the internal UDMAP ring state occupancy + * counter (which is 21-bits wide) to 0. + */ + db_ring_cnt = (1U << 22) - occ; + + while (db_ring_cnt != 0) { + /* + * Ring the doorbell with the maximum count each + * iteration if possible to minimize the total + * of writes + */ + if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT) + db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT; + else + db_ring_cnt_cur = db_ring_cnt; + + writel(db_ring_cnt_cur, &ring->rt->db); + db_ring_cnt -= db_ring_cnt_cur; + } + + /* Restore the original ring mode (if not ring mode) */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); + } + +reset: + /* Reset the ring */ + k3_ringacc_ring_reset(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma); + +static void k3_ringacc_ring_free_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +int k3_ringacc_ring_free(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc; + + if (!ring) + return -EINVAL; + + ringacc = ring->parent; + + dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags); + + if (!test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + mutex_lock(&ringacc->req_lock); + + if (--ring->use_count) + goto out; + + if (!(ring->flags & K3_RING_FLAG_BUSY)) + goto no_init; + + k3_ringacc_ring_free_sci(ring); + + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, ring->ring_mem_dma); + ring->flags = 0; + ring->ops = NULL; + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { + clear_bit(ring->proxy_id, ringacc->proxy_inuse); + ring->proxy = NULL; + ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + +no_init: + clear_bit(ring->ring_id, ringacc->rings_inuse); + +out: + mutex_unlock(&ringacc->req_lock); + return 0; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_free); + +u32 k3_ringacc_get_ring_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->ring_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id); + +u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->parent->tisci_dev_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id); + +int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) +{ + int irq_num; + + if (!ring) + return -EINVAL; + + irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id); + if (irq_num <= 0) + irq_num = -EINVAL; + return irq_num; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num); + +static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + u32 ring_idx; + int ret; + + if (!ringacc->tisci) + return -EINVAL; + + ring_idx = ring->ring_id; + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring_idx, + lower_32_bits(ring->ring_mem_dma), + upper_32_bits(ring->ring_mem_dma), + ring->size, + ring->mode, + ring->elm_size, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", + ret, ring_idx); + + return ret; +} + +int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret = 0; + + if (!ring || !cfg) + return -EINVAL; + if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 || + cfg->mode >= K3_RINGACC_RING_MODE_INVALID || + cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK || + !test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE && + ring->proxy_id == K3_RINGACC_PROXY_NOT_USED && + cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) { + dev_err(ringacc->dev, + "Message mode must use proxy for %u element size\n", + 4 << ring->elm_size); + return -EINVAL; + } + + /* + * In case of shared ring only the first user (master user) can + * configure the ring. The sequence should be by the client: + * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user + * k3_ringacc_ring_cfg(ring, cfg); # master configuration + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + */ + if (ring->use_count != 1) + return 0; + + ring->size = cfg->size; + ring->elm_size = cfg->elm_size; + ring->mode = cfg->mode; + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) + ring->proxy = ringacc->proxy_target_base + + ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; + + switch (ring->mode) { + case K3_RINGACC_RING_MODE_RING: + ring->ops = &k3_ring_mode_ring_ops; + break; + case K3_RINGACC_RING_MODE_MESSAGE: + if (ring->proxy) + ring->ops = &k3_ring_mode_proxy_ops; + else + ring->ops = &k3_ring_mode_msg_ops; + break; + default: + ring->ops = NULL; + ret = -EINVAL; + goto err_free_proxy; + }; + + ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + &ring->ring_mem_dma, GFP_KERNEL); + if (!ring->ring_mem_virt) { + dev_err(ringacc->dev, "Failed to alloc ring mem\n"); + ret = -ENOMEM; + goto err_free_ops; + } + + ret = k3_ringacc_ring_cfg_sci(ring); + + if (ret) + goto err_free_mem; + + ring->flags |= K3_RING_FLAG_BUSY; + ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ? + K3_RING_FLAG_SHARED : 0; + + k3_ringacc_ring_dump(ring); + + return 0; + +err_free_mem: + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, + ring->ring_mem_dma); +err_free_ops: + ring->ops = NULL; +err_free_proxy: + ring->proxy = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg); + +u32 k3_ringacc_ring_get_size(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return ring->size; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size); + +u32 k3_ringacc_ring_get_free(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->free) + ring->free = ring->size - readl(&ring->rt->occ); + + return ring->free; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free); + +u32 k3_ringacc_ring_get_occ(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return readl(&ring->rt->occ); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ); + +u32 k3_ringacc_ring_is_full(struct k3_ring *ring) +{ + return !k3_ringacc_ring_get_free(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full); + +enum k3_ringacc_access_mode { + K3_RINGACC_ACCESS_MODE_PUSH_HEAD, + K3_RINGACC_ACCESS_MODE_POP_HEAD, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL, + K3_RINGACC_ACCESS_MODE_POP_TAIL, + K3_RINGACC_ACCESS_MODE_PEEK_HEAD, + K3_RINGACC_ACCESS_MODE_PEEK_TAIL, +}; + +#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16) +#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24) +static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring, + enum k3_ringacc_proxy_access_mode mode) +{ + u32 val; + + val = ring->ring_id; + val |= K3_RINGACC_PROXY_MODE(mode); + val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size); + writel(val, &ring->proxy->control); + return 0; +} + +static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + ptr = (void __iomem *)&ring->proxy->data; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free, + ring->occ); + return 0; +} + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + ptr = (void __iomem *)&ring->fifos->head_data; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + ptr = (void __iomem *)&ring->fifos->tail_data; + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free, + ring->windex, ring->occ, ring->rindex); + return 0; +} + +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex); + + memcpy(elem_ptr, elem, (4 << ring->elm_size)); + + ring->windex = (ring->windex + 1) % ring->size; + ring->free--; + writel(1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n", + ring->free, ring->windex); + + return 0; +} + +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex); + + memcpy(elem, elem_ptr, (4 << ring->elm_size)); + + ring->rindex = (ring->rindex + 1) % ring->size; + ring->occ--; + writel(-1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n", + ring->occ, ring->rindex, elem_ptr); + return 0; +} + +int k3_ringacc_ring_push(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free, + ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_tail) + ret = ring->ops->push_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push); + +int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n", + ring->free, ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_head) + ret = ring->ops->push_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head); + +int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_head) + ret = ring->ops->pop_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop); + +int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_tail) + ret = ring->ops->pop_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail); + +struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, + const char *property) +{ + struct device_node *ringacc_np; + struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER); + struct k3_ringacc *entry; + + ringacc_np = of_parse_phandle(np, property, 0); + if (!ringacc_np) + return ERR_PTR(-ENODEV); + + mutex_lock(&k3_ringacc_list_lock); + list_for_each_entry(entry, &k3_ringacc_list, list) + if (entry->dev->of_node == ringacc_np) { + ringacc = entry; + break; + } + mutex_unlock(&k3_ringacc_list_lock); + of_node_put(ringacc_np); + + return ringacc; +} +EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle); + +static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc) +{ + struct device_node *node = ringacc->dev->of_node; + struct device *dev = ringacc->dev; + struct platform_device *pdev = to_platform_device(dev); + int ret; + + if (!node) { + dev_err(dev, "device tree info unavailable\n"); + return -ENODEV; + } + + ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings); + if (ret) { + dev_err(dev, "ti,num-rings read failure %d\n", ret); + return ret; + } + + ringacc->dma_ring_reset_quirk = + of_property_read_bool(node, "ti,dma-ring-reset-quirk"); + + ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci"); + if (IS_ERR(ringacc->tisci)) { + ret = PTR_ERR(ringacc->tisci); + if (ret != -EPROBE_DEFER) + dev_err(dev, "ti,sci read fail %d\n", ret); + ringacc->tisci = NULL; + return ret; + } + + ret = of_property_read_u32(node, "ti,sci-dev-id", + &ringacc->tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read fail %d\n", ret); + return ret; + } + + pdev->id = ringacc->tisci_dev_id; + + ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev, + ringacc->tisci_dev_id, + "ti,sci-rm-range-gp-rings"); + if (IS_ERR(ringacc->rm_gp_range)) { + dev_err(dev, "Failed to allocate MSI interrupts\n"); + return PTR_ERR(ringacc->rm_gp_range); + } + + return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev, + ringacc->rm_gp_range); +} + +static int k3_ringacc_probe(struct platform_device *pdev) +{ + struct k3_ringacc *ringacc; + void __iomem *base_fifo, *base_rt; + struct device *dev = &pdev->dev; + struct resource *res; + int ret, i; + + ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); + if (!ringacc) + return -ENOMEM; + + ringacc->dev = dev; + mutex_init(&ringacc->req_lock); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + ret = k3_ringacc_probe_dt(ringacc); + if (ret) + return ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt"); + base_rt = devm_ioremap_resource(dev, res); + if (IS_ERR(base_rt)) + return PTR_ERR(base_rt); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos"); + base_fifo = devm_ioremap_resource(dev, res); + if (IS_ERR(base_fifo)) + return PTR_ERR(base_fifo); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg"); + ringacc->proxy_gcfg = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_gcfg)) + return PTR_ERR(ringacc->proxy_gcfg); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "proxy_target"); + ringacc->proxy_target_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_target_base)) + return PTR_ERR(ringacc->proxy_target_base); + + ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) & + K3_RINGACC_PROXY_CFG_THREADS_MASK; + + ringacc->rings = devm_kzalloc(dev, + sizeof(*ringacc->rings) * + ringacc->num_rings, + GFP_KERNEL); + ringacc->rings_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_rings), + sizeof(unsigned long), GFP_KERNEL); + ringacc->proxy_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_proxies), + sizeof(unsigned long), GFP_KERNEL); + + if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) + return -ENOMEM; + + for (i = 0; i < ringacc->num_rings; i++) { + ringacc->rings[i].rt = base_rt + + K3_RINGACC_RT_REGS_STEP * i; + ringacc->rings[i].fifos = base_fifo + + K3_RINGACC_FIFO_REGS_STEP * i; + ringacc->rings[i].parent = ringacc; + ringacc->rings[i].ring_id = i; + ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + dev_set_drvdata(dev, ringacc); + + ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; + + mutex_lock(&k3_ringacc_list_lock); + list_add_tail(&ringacc->list, &k3_ringacc_list); + mutex_unlock(&k3_ringacc_list_lock); + + dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", + ringacc->num_rings, + ringacc->rm_gp_range->desc[0].start, + ringacc->rm_gp_range->desc[0].num, + ringacc->tisci_dev_id); + dev_info(dev, "dma-ring-reset-quirk: %s\n", + ringacc->dma_ring_reset_quirk ? "enabled" : "disabled"); + dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n", + readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies); + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id k3_ringacc_of_match[] = { + { .compatible = "ti,am654-navss-ringacc", }, + {}, +}; + +static struct platform_driver k3_ringacc_driver = { + .probe = k3_ringacc_probe, + .driver = { + .name = "k3-ringacc", + .of_match_table = k3_ringacc_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(k3_ringacc_driver); diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c index a840c0272135..a3aa40996f13 100644 --- a/drivers/soc/xilinx/xlnx_vcu.c +++ b/drivers/soc/xilinx/xlnx_vcu.c @@ -511,7 +511,7 @@ static int xvcu_probe(struct platform_device *pdev) return -ENODEV; } - xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start, + xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!xvcu->vcu_slcr_ba) { dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n"); @@ -524,7 +524,7 @@ static int xvcu_probe(struct platform_device *pdev) return -ENODEV; } - xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start, + xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!xvcu->logicore_reg_ba) { dev_err(&pdev->dev, "logicore register mapping failed.\n"); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 870f7797b56b..d6ed0c355954 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI This controller does not support generic SPI messages. It only supports the high-level SPI memory interface. +config SPI_HISI_SFC_V3XX + tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets" + depends on (ARM64 && ACPI) || COMPILE_TEST + depends on HAS_IOMEM + select CONFIG_MTD_SPI_NOR + help + This enables support for HiSilicon v3xx SPI-NOR flash controller + found in hi16xx chipsets. + config SPI_NXP_FLEXSPI tristate "NXP Flex SPI controller" depends on ARCH_LAYERSCAPE || HAS_IOMEM diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index bb49c9e6d0a0..9b65ec5afc5e 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o +obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o obj-$(CONFIG_SPI_IMX) += spi-imx.o obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 56f0ca361deb..013458cabe3c 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master, master->dma_tx = dma_request_chan(dev, "tx"); if (IS_ERR(master->dma_tx)) { err = PTR_ERR(master->dma_tx); - if (err == -EPROBE_DEFER) { - dev_warn(dev, "no DMA channel available at the moment\n"); - goto error_clear; - } - dev_err(dev, - "DMA TX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + if (err != -EPROBE_DEFER) + dev_err(dev, "No TX DMA channel, DMA is disabled\n"); goto error_clear; } - /* - * No reason to check EPROBE_DEFER here since we have already requested - * tx channel. If it fails here, it's for another reason. - */ - master->dma_rx = dma_request_slave_channel(dev, "rx"); - - if (!master->dma_rx) { - dev_err(dev, - "DMA RX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + master->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(master->dma_rx)) { + err = PTR_ERR(master->dma_rx); + /* + * No reason to check EPROBE_DEFER here since we have already + * requested tx channel. + */ + dev_err(dev, "No RX DMA channel, DMA is disabled\n"); goto error; } @@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master, return 0; error: - if (master->dma_rx) + if (!IS_ERR(master->dma_rx)) dma_release_channel(master->dma_rx); if (!IS_ERR(master->dma_tx)) dma_release_channel(master->dma_tx); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 85bad70f59e3..23d295f36c80 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev, name = qspi_irq_tab[val].irq_name; if (qspi_irq_tab[val].irq_source == SINGLE_L2) { /* get the l2 interrupts */ - irq = platform_get_irq_byname(pdev, name); + irq = platform_get_irq_byname_optional(pdev, name); } else if (!num_ints && soc_intc) { /* all mspi, bspi intrs muxed to one L1 intr */ irq = platform_get_irq(pdev, 0); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index fb61a620effc..11c235879bb7 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -68,7 +68,7 @@ #define BCM2835_SPI_FIFO_SIZE 64 #define BCM2835_SPI_FIFO_SIZE_3_4 48 #define BCM2835_SPI_DMA_MIN_LENGTH 96 -#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */ +#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */ #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | SPI_NO_CS | SPI_3WIRE) @@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr, } } -static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, - struct bcm2835_spi *bs) +static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, + struct bcm2835_spi *bs) { struct dma_slave_config slave_config; const __be32 *addr; @@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); if (!addr) { dev_err(dev, "could not get DMA-register address - not using dma mode\n"); - goto err; + /* Fall back to interrupt mode */ + return 0; } dma_reg_base = be32_to_cpup(addr); /* get tx/rx dma */ - ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); - if (!ctlr->dma_tx) { + ctlr->dma_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(ctlr->dma_tx)) { dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_tx); + ctlr->dma_tx = NULL; goto err; } - ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); - if (!ctlr->dma_rx) { + ctlr->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(ctlr->dma_rx)) { dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_rx); + ctlr->dma_rx = NULL; goto err_release; } @@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, /* all went well, so set can_dma */ ctlr->can_dma = bcm2835_spi_can_dma; - return; + return 0; err_config: dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", @@ -1005,7 +1010,14 @@ err_config: err_release: bcm2835_dma_release(ctlr, bs); err: - return; + /* + * Only report error for deferred probing, otherwise fall back to + * interrupt mode + */ + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, @@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev) bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); - dev_err(&pdev->dev, "could not get clk: %d\n", err); + if (err == -EPROBE_DEFER) + dev_dbg(&pdev->dev, "could not get clk: %d\n", err); + else + dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_controller_put; } @@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev) clk_prepare_enable(bs->clk); - bcm2835_dma_init(ctlr, &pdev->dev, bs); + err = bcm2835_dma_init(ctlr, &pdev->dev, bs); + if (err) + goto out_clk_disable; /* initialise the hardware with the default polarities */ bcm2835_wr(bs, BCM2835_SPI_CS, @@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev) dev_name(&pdev->dev), ctlr); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } err = devm_spi_register_controller(&pdev->dev, ctlr); if (err) { dev_err(&pdev->dev, "could not register SPI controller: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); return 0; +out_dma_release: + bcm2835_dma_release(ctlr, bs); out_clk_disable: clk_disable_unprepare(bs->clk); out_controller_put: diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index d84e22dd6f9f..68491a8bf7b5 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable) int spi_bitbang_init(struct spi_bitbang *bitbang) { struct spi_master *master = bitbang->master; + bool custom_cs; - if (!master || !bitbang->chipselect) + if (!master) + return -EINVAL; + /* + * We only need the chipselect callback if we are actually using it. + * If we just use GPIO descriptors, it is surplus. If the + * SPI_MASTER_GPIO_SS flag is set, we always need to call the + * driver-specific chipselect routine. + */ + custom_cs = (!master->use_gpio_descriptors || + (master->flags & SPI_MASTER_GPIO_SS)); + + if (custom_cs && !bitbang->chipselect) return -EINVAL; mutex_init(&bitbang->lock); @@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang) master->prepare_transfer_hardware = spi_bitbang_prepare_hardware; master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware; master->transfer_one = spi_bitbang_transfer_one; - master->set_cs = spi_bitbang_set_cs; + /* + * When using GPIO descriptors, the ->set_cs() callback doesn't even + * get called unless SPI_MASTER_GPIO_SS is set. + */ + if (custom_cs) + master->set_cs = spi_bitbang_set_cs; if (!bitbang->txrx_bufs) { bitbang->use_dma = 0; diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 2663bb12d9ce..0d86c37e0aeb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -301,7 +301,7 @@ int dw_spi_mid_init(struct dw_spi *dws) void __iomem *clk_reg; u32 clk_cdiv; - clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16); + clk_reg = ioremap(MRST_CLK_SPI_REG, 16); if (!clk_reg) return -ENOMEM; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a25da377119..31e3f866d11a 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master, dws->len = transfer->len; spin_unlock_irqrestore(&dws->buf_lock, flags); + /* Ensure dw->rx and dw->rx_end are visible */ + smp_mb(); + spi_enable_chip(dws, 0); /* Handle per transfer options for bpw and speed */ @@ -469,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) struct spi_controller *master; int ret; - BUG_ON(dws == NULL); + if (!dws) + return -EINVAL; master = spi_alloc_master(dev, 0); if (!master) diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 8428b69c858b..6ec2dcb8c57a 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -396,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) if (!dma) return -ENOMEM; - dma->chan_rx = dma_request_slave_channel(dev, "rx"); - if (!dma->chan_rx) { + dma->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(dma->chan_rx)) { dev_err(dev, "rx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_rx); return ret; } - dma->chan_tx = dma_request_slave_channel(dev, "tx"); - if (!dma->chan_tx) { + dma->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(dma->chan_tx)) { dev_err(dev, "tx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_tx); goto err_tx_channel; } diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 2cc0ddb4a988..d0b8cc741a24 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller, fsl_lpspi->watermark = fsl_lpspi->txfifosize; if (fsl_lpspi_can_dma(controller, spi, t)) - fsl_lpspi->usedma = 1; + fsl_lpspi->usedma = true; else - fsl_lpspi->usedma = 0; + fsl_lpspi->usedma = false; return fsl_lpspi_config(fsl_lpspi); } @@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev) fsl_lpspi->dev = &pdev->dev; fsl_lpspi->is_slave = is_slave; + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + controller->transfer_one = fsl_lpspi_transfer_one; + controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; + controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + controller->dev.of_node = pdev->dev.of_node; + controller->bus_num = pdev->id; + controller->slave_abort = fsl_lpspi_slave_abort; + + ret = devm_spi_register_controller(&pdev->dev, controller); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_controller error.\n"); + goto out_controller_put; + } + if (!fsl_lpspi->is_slave) { for (i = 0; i < controller->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); @@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) controller->prepare_message = fsl_lpspi_prepare_message; } - controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - controller->transfer_one = fsl_lpspi_transfer_one; - controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; - controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; - controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; - controller->dev.of_node = pdev->dev.of_node; - controller->bus_num = pdev->id; - controller->slave_abort = fsl_lpspi_slave_abort; - init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); - ret = devm_spi_register_controller(&pdev->dev, controller); - if (ret < 0) { - dev_err(&pdev->dev, "spi_register_controller error.\n"); - goto out_controller_put; - } - return 0; out_controller_put: diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 79b1558b74b8..e8a499cd1f13 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem, op->data.nbytes > q->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } static void fsl_qspi_prepare_lut(struct fsl_qspi *q, diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index fb4159ad6bf6..3b81772fea0d 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -706,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; - int irq = 0, type; - int ret = -ENOMEM; + int irq, type; + int ret; ret = of_mpc8xxx_spi_probe(ofdev); if (ret) @@ -722,10 +722,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) if (spisel_boot) { pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4); - if (!pinfo->immr_spi_cs) { - ret = -ENOMEM; - goto err; - } + if (!pinfo->immr_spi_cs) + return -ENOMEM; } #endif /* @@ -744,24 +742,15 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) ret = of_address_to_resource(np, 0, &mem); if (ret) - goto err; + return ret; irq = platform_get_irq(ofdev, 0); - if (irq < 0) { - ret = irq; - goto err; - } + if (irq < 0) + return irq; master = fsl_spi_probe(dev, &mem, irq); - if (IS_ERR(master)) { - ret = PTR_ERR(master); - goto err; - } - - return 0; -err: - return ret; + return PTR_ERR_OR_ZERO(master); } static int of_fsl_spi_remove(struct platform_device *ofdev) diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c new file mode 100644 index 000000000000..4cf8fc80a7b7 --- /dev/null +++ b/drivers/spi/spi-hisi-sfc-v3xx.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets +// +// Copyright (c) 2019 HiSilicon Technologies Co., Ltd. +// Author: John Garry <john.garry@huawei.com> + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +#define HISI_SFC_V3XX_VERSION (0x1f8) + +#define HISI_SFC_V3XX_CMD_CFG (0x300) +#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9 +#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8) +#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7) +#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4 +#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3) +#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1 +#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0) +#define HISI_SFC_V3XX_CMD_INS (0x308) +#define HISI_SFC_V3XX_CMD_ADDR (0x30c) +#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400) + +struct hisi_sfc_v3xx_host { + struct device *dev; + void __iomem *regbase; + int max_cmd_dword; +}; + +#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000 +#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10 + +static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg, + !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK), + HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US, + HISI_SFC_V3XX_WAIT_TIMEOUT_US); +} + +static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct spi_device *spi = mem->spi; + struct hisi_sfc_v3xx_host *host; + uintptr_t addr = (uintptr_t)op->data.buf.in; + int max_byte_count; + + host = spi_controller_get_devdata(spi->master); + + max_byte_count = host->max_cmd_dword * 4; + + if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4) + op->data.nbytes = 4 - (addr % 4); + else if (op->data.nbytes > max_byte_count) + op->data.nbytes = max_byte_count; + + return 0; +} + +/* + * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the + * DATABUF registers -so use __io{read,write}32_copy when possible. For + * trailing bytes, copy them byte-by-byte from the DATABUF register, as we + * can't clobber outside the source/dest buffer. + * + * For efficient data read/write, we try to put any start 32b unaligned data + * into a separate transaction in hisi_sfc_v3xx_adjust_op_size(). + */ +static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host, + u8 *to, unsigned int len) +{ + void __iomem *from; + int i; + + from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)to, 4)) { + int words = len / 4; + + __ioread32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val; + + to += words * 4; + from += words * 4; + + val = __raw_readl(from); + + for (i = 0; i < len; i++, val >>= 8, to++) + *to = (u8)val; + } + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) { + u32 val = __raw_readl(from); + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + to++, val >>= 8, j++) + *to = (u8)val; + } + } +} + +static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host, + const u8 *from, unsigned int len) +{ + void __iomem *to; + int i; + + to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)from, 4)) { + int words = len / 4; + + __iowrite32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val = 0; + + to += words * 4; + from += words * 4; + + for (i = 0; i < len; i++, from++) + val |= *from << i * 8; + __raw_writel(val, to); + } + + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) { + u32 val = 0; + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + from++, j++) + val |= *from << j * 8; + __raw_writel(val, to); + } + } +} + +static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host, + const struct spi_mem_op *op, + u8 chip_select) +{ + int ret, len = op->data.nbytes; + u32 config = 0; + + if (op->addr.nbytes) + config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK; + + if (op->data.dir != SPI_MEM_NO_DATA) { + config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF; + config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK; + } + + if (op->data.dir == SPI_MEM_DATA_OUT) + hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len); + else if (op->data.dir == SPI_MEM_DATA_IN) + config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK; + + config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF | + chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF | + HISI_SFC_V3XX_CMD_CFG_START_MSK; + + writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR); + writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS); + + writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG); + + ret = hisi_sfc_v3xx_wait_cmd_idle(host); + if (ret) + return ret; + + if (op->data.dir == SPI_MEM_DATA_IN) + hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len); + + return 0; +} + +static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct hisi_sfc_v3xx_host *host; + struct spi_device *spi = mem->spi; + u8 chip_select = spi->chip_select; + + host = spi_controller_get_devdata(spi->master); + + return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select); +} + +static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = { + .adjust_op_size = hisi_sfc_v3xx_adjust_op_size, + .exec_op = hisi_sfc_v3xx_exec_op, +}; + +static int hisi_sfc_v3xx_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hisi_sfc_v3xx_host *host; + struct spi_controller *ctlr; + u32 version; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*host)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD; + + host = spi_controller_get_devdata(ctlr); + host->dev = dev; + + platform_set_drvdata(pdev, host); + + host->regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->regbase)) { + ret = PTR_ERR(host->regbase); + goto err_put_master; + } + + ctlr->bus_num = -1; + ctlr->num_chipselect = 1; + ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops; + + version = readl(host->regbase + HISI_SFC_V3XX_VERSION); + + switch (version) { + case 0x351: + host->max_cmd_dword = 64; + break; + default: + host->max_cmd_dword = 16; + break; + } + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) + goto err_put_master; + + dev_info(&pdev->dev, "hw version 0x%x\n", version); + + return 0; + +err_put_master: + spi_master_put(ctlr); + return ret; +} + +#if IS_ENABLED(CONFIG_ACPI) +static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = { + {"HISI0341", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids); +#endif + +static struct platform_driver hisi_sfc_v3xx_spi_driver = { + .driver = { + .name = "hisi-sfc-v3xx", + .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids), + }, + .probe = hisi_sfc_v3xx_probe, +}; + +module_platform_driver(hisi_sfc_v3xx_spi_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); +MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets"); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index f4a8f470aecc..8543f5ed1099 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev) master->unprepare_message = img_spfi_unprepare; master->handle_err = img_spfi_handle_err; - spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); - spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); + spfi->tx_ch = dma_request_chan(spfi->dev, "tx"); + if (IS_ERR(spfi->tx_ch)) { + ret = PTR_ERR(spfi->tx_ch); + spfi->tx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + + spfi->rx_ch = dma_request_chan(spfi->dev, "rx"); + if (IS_ERR(spfi->rx_ch)) { + ret = PTR_ERR(spfi->rx_ch); + spfi->rx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + if (!spfi->tx_ch || !spfi->rx_ch) { if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 49f0099db0cb..f4f28a400a96 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi, } if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) - spi_imx->usedma = 1; + spi_imx->usedma = true; else - spi_imx->usedma = 0; + spi_imx->usedma = false; if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) { spi_imx->rx = mx53_ecspi_rx_slave; diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c index cc49fa41fbab..bba10f030e33 100644 --- a/drivers/spi/spi-jcore.c +++ b/drivers/spi/spi-jcore.c @@ -170,7 +170,7 @@ static int jcore_spi_probe(struct platform_device *pdev) if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) goto exit_busy; - hw->base = devm_ioremap_nocache(&pdev->dev, res->start, + hw->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!hw->base) goto exit_busy; diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index f3f10443f9e2..7f5680fe2568 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -19,7 +19,6 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/reset.h> -#include <linux/gpio.h> /* * The Meson SPICC controller could support DMA based transfers, but is not @@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master) static int meson_spicc_setup(struct spi_device *spi) { - int ret = 0; - if (!spi->controller_state) spi->controller_state = spi_master_get_devdata(spi->master); - else if (gpio_is_valid(spi->cs_gpio)) - goto out_gpio; - else if (spi->cs_gpio == -ENOENT) - return 0; - - if (gpio_is_valid(spi->cs_gpio)) { - ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); - if (ret) { - dev_err(&spi->dev, "failed to request cs gpio\n"); - return ret; - } - } - -out_gpio: - ret = gpio_direction_output(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); - return ret; + return 0; } static void meson_spicc_cleanup(struct spi_device *spi) { - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); - spi->controller_state = NULL; } @@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev) master->prepare_message = meson_spicc_prepare_message; master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer; master->transfer_one = meson_spicc_transfer_one; + master->use_gpio_descriptors = true; /* Setup max rate according to the Meson GX datasheet */ if ((rate >> 2) > SPICC_MAX_FREQ) diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 996c1c8a9c71..dce85ee07cd0 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev) if (ret) goto out_master_free; - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(ssp->dev, "Failed to request DMA\n"); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_master_free; } diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index cb52fd8008d0..d25ee32862e0 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -603,7 +603,7 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc) if (!chip->flash_region_mapped_ptr) { chip->flash_region_mapped_ptr = - devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start + + devm_ioremap(fiu->dev, (fiu->res_mem->start + (fiu->info->max_map_size * desc->mem->spi->chip_select)), (u32)desc->info.length); diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index fe624731c74c..87cd0233c60b 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -12,6 +12,7 @@ #include <linux/spi/spi.h> #include <linux/gpio.h> #include <linux/of_gpio.h> +#include <linux/reset.h> #include <asm/unaligned.h> @@ -20,7 +21,7 @@ struct npcm_pspi { struct completion xfer_done; - struct regmap *rst_regmap; + struct reset_control *reset; struct spi_master *master; unsigned int tx_bytes; unsigned int rx_bytes; @@ -59,12 +60,6 @@ struct npcm_pspi { #define NPCM_PSPI_MIN_CLK_DIVIDER 4 #define NPCM_PSPI_DEFAULT_CLK 25000000 -/* reset register */ -#define NPCM7XX_IPSRST2_OFFSET 0x24 - -#define NPCM7XX_PSPI1_RESET BIT(22) -#define NPCM7XX_PSPI2_RESET BIT(23) - static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : 2; @@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, priv->mode = spi->mode; } + /* + * If transfer is even length, and 8 bits per word transfer, + * then implement 16 bits-per-word transfer. + */ + if (priv->bits_per_word == 8 && !(t->len & 0x1)) + t->bits_per_word = 16; + if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) { npcm_pspi_set_transfer_size(priv, t->bits_per_word); priv->bits_per_word = t->bits_per_word; @@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, static void npcm_pspi_send(struct npcm_pspi *priv) { int wsize; + u16 val; wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes); priv->tx_bytes -= wsize; @@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv) switch (wsize) { case 1: - iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + iowrite8(val, NPCM_PSPI_DATA + priv->base); break; case 2: - iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + val = *priv->tx_buf++ | (val << 8); + iowrite16(val, NPCM_PSPI_DATA + priv->base); break; default: WARN_ON_ONCE(1); return; } - - priv->tx_buf += wsize; } static void npcm_pspi_recv(struct npcm_pspi *priv) @@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv) switch (rsize) { case 1: - val = ioread8(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA); break; case 2: val = ioread16(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = (val >> 8); + *priv->rx_buf++ = val & 0xff; break; default: WARN_ON_ONCE(1); return; } - - *priv->rx_buf = val; - priv->rx_buf += rsize; } static int npcm_pspi_transfer_one(struct spi_master *master, @@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master) static void npcm_pspi_reset_hw(struct npcm_pspi *priv) { - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, - NPCM7XX_PSPI1_RESET << priv->id); - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0); + reset_control_assert(priv->reset); + udelay(5); + reset_control_deassert(priv->reset); } static irqreturn_t npcm_pspi_handler(int irq, void *dev_id) @@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (num_cs < 0) return num_cs; - pdev->id = of_alias_get_id(np, "spi"); - if (pdev->id < 0) - pdev->id = 0; - master = spi_alloc_master(&pdev->dev, sizeof(*priv)); if (!master) return -ENOMEM; @@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) priv = spi_master_get_devdata(master); priv->master = master; priv->is_save_param = false; - priv->id = pdev->id; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { @@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev) goto out_disable_clk; } - priv->rst_regmap = - syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst"); - if (IS_ERR(priv->rst_regmap)) { - dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n"); - return PTR_ERR(priv->rst_regmap); + priv->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(priv->reset)) { + ret = PTR_ERR(priv->reset); + goto out_disable_clk; } /* reset SPI-HW block */ @@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER); master->mode_bits = SPI_CPHA | SPI_CPOL; master->dev.of_node = pdev->dev.of_node; - master->bus_num = pdev->id; + master->bus_num = -1; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->transfer_one = npcm_pspi_transfer_one; master->prepare_transfer_hardware = @@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (ret) goto out_disable_clk; - pr_info("NPCM Peripheral SPI %d probed\n", pdev->id); + pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num); return 0; diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index e2331eb7b47a..9df7c5979c29 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c @@ -20,7 +20,6 @@ #include <linux/spi/spi_bitbang.h> #include <linux/spi/spi_oc_tiny.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/of.h> #define DRV_NAME "spi_oc_tiny" @@ -50,8 +49,6 @@ struct tiny_spi { unsigned int txc, rxc; const u8 *txp; u8 *rxp; - int gpio_cs_count; - int *gpio_cs; }; static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev) @@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz) return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1; } -static void tiny_spi_chipselect(struct spi_device *spi, int is_active) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - - if (hw->gpio_cs_count > 0) { - gpio_set_value(hw->gpio_cs[spi->chip_select], - (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); - } -} - static int tiny_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { @@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node; - unsigned int i; u32 val; if (!np) return 0; - hw->gpio_cs_count = of_gpio_count(np); - if (hw->gpio_cs_count > 0) { - hw->gpio_cs = devm_kcalloc(&pdev->dev, - hw->gpio_cs_count, sizeof(unsigned int), - GFP_KERNEL); - if (!hw->gpio_cs) - return -ENOMEM; - } - for (i = 0; i < hw->gpio_cs_count; i++) { - hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL); - if (hw->gpio_cs[i] < 0) - return -ENODEV; - } hw->bitbang.master->dev.of_node = pdev->dev.of_node; if (!of_property_read_u32(np, "clock-frequency", &val)) hw->freq = val; @@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev) struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev); struct tiny_spi *hw; struct spi_master *master; - unsigned int i; int err = -ENODEV; master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi)); @@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the master state. */ master->bus_num = pdev->id; - master->num_chipselect = 255; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->setup = tiny_spi_setup; + master->use_gpio_descriptors = true; hw = spi_master_get_devdata(master); platform_set_drvdata(pdev, hw); @@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the state for the bitbang driver */ hw->bitbang.master = master; hw->bitbang.setup_transfer = tiny_spi_setup_transfer; - hw->bitbang.chipselect = tiny_spi_chipselect; hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs; /* find and map our resources */ @@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev) } /* find platform data */ if (platp) { - hw->gpio_cs_count = platp->gpio_cs_count; - hw->gpio_cs = platp->gpio_cs; - if (platp->gpio_cs_count && !platp->gpio_cs) { - err = -EBUSY; - goto exit; - } hw->freq = platp->freq; hw->baudwidth = platp->baudwidth; } else { @@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev) if (err) goto exit; } - for (i = 0; i < hw->gpio_cs_count; i++) { - err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev)); - if (err) - goto exit_gpio; - gpio_direction_output(hw->gpio_cs[i], 1); - } - hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count); /* register our spi controller */ err = spi_bitbang_start(&hw->bitbang); @@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev) return 0; -exit_gpio: - while (i-- > 0) - gpio_free(hw->gpio_cs[i]); exit: spi_master_put(master); return err; @@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct spi_master *master = hw->bitbang.master; - unsigned int i; spi_bitbang_stop(&hw->bitbang); - for (i = 0; i < hw->gpio_cs_count; i++) - gpio_free(hw->gpio_cs[i]); spi_master_put(master); return 0; } diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 9071333ebdd8..4c7a71f0fb3e 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data) return limit; } +static void pxa2xx_spi_off(struct driver_data *drv_data) +{ + /* On MMP, disabling SSE seems to corrupt the rx fifo */ + if (drv_data->ssp_type == MMP2_SSP) + return; + + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); +} + static int null_writer(struct driver_data *drv_data) { u8 n_bytes = drv_data->n_bytes; @@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_err(&drv_data->pdev->dev, "%s\n", msg); @@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) static void handle_bad_msg(struct driver_data *drv_data) { - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1); if (!pxa25x_ssp_comp(drv_data)) @@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) != (cr1 & change_mask)) { /* stop the SSP, and update the other bits */ - pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); + if (drv_data->ssp_type != MMP2_SSP) + pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, chip->timeout); /* first set CR1 without interrupt and service enables */ @@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); @@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller, struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); /* Clear and disable interrupts and service requests */ write_SSSR_CS(drv_data, drv_data->clear_sr); pxa2xx_spi_write(drv_data, SSCR1, @@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP now */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); return 0; } @@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { /* KBL-H */ { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP }, { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP }, + /* CML-V */ + { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP }, + { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP }, /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP }, diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c index 250fd60e1678..3c4f83bf7084 100644 --- a/drivers/spi/spi-qcom-qspi.c +++ b/drivers/spi/spi-qcom-qspi.c @@ -137,7 +137,7 @@ enum qspi_clocks { struct qcom_qspi { void __iomem *base; struct device *dev; - struct clk_bulk_data clks[QSPI_NUM_CLKS]; + struct clk_bulk_data *clks; struct qspi_xfer xfer; /* Lock to protect xfer and IRQ accessed registers */ spinlock_t lock; @@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev) goto exit_probe_master_put; } + ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, + sizeof(*ctrl->clks), GFP_KERNEL); + if (!ctrl->clks) { + ret = -ENOMEM; + goto exit_probe_master_put; + } + ctrl->clks[QSPI_CLK_CORE].id = "core"; ctrl->clks[QSPI_CLK_IFACE].id = "iface"; ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 7222c7689c3c..85575d45901c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -159,7 +159,7 @@ #define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0 #define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1 #define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */ -#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */ +#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */ #define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */ #define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */ #define SPCMD_CPHA 0x0001 /* Clock Phase Setting */ @@ -242,6 +242,7 @@ struct spi_ops { u16 mode_bits; u16 flags; u16 fifo_size; + u8 num_hw_ss; }; /* @@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) return n; } -#define set_config_register(spi, n) spi->ops->set_config_register(spi, n) - static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) { rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR); @@ -620,9 +619,8 @@ no_dma_tx: dmaengine_terminate_all(rspi->ctlr->dma_rx); no_dma_rx: if (ret == -EAGAIN) { - pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->ctlr->dev), - dev_name(&rspi->ctlr->dev)); + dev_warn_once(&rspi->ctlr->dev, + "DMA not available, falling back to PIO\n"); } return ret; } @@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr, if (spi->mode & SPI_CPHA) rspi->spcmd |= SPCMD_CPHA; + /* Configure slave signal to assert */ + rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs + : spi->chip_select); + /* CMOS output mode and MOSI signal from previous transfer */ rspi->sppcr = 0; if (spi->mode & SPI_LOOP) rspi->sppcr |= SPPCR_SPLP; - set_config_register(rspi, 8); + rspi->ops->set_config_register(rspi, 8); if (msg->spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) { @@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, + .num_hw_ss = 2, }; static const struct spi_ops rspi_rz_ops = { @@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ + .num_hw_ss = 1, }; static const struct spi_ops qspi_ops = { @@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = { SPI_RX_DUAL | SPI_RX_QUAD, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, + .num_hw_ss = 1, }; #ifdef CONFIG_OF @@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev) ctlr->mode_bits = ops->mode_bits; ctlr->flags = ops->flags; ctlr->dev.of_node = pdev->dev.of_node; + ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = rspi->ops->num_hw_ss; ret = platform_get_irq_byname_optional(pdev, "rx"); if (ret < 0) { @@ -1314,8 +1321,6 @@ error1: static const struct platform_device_id spi_driver_ids[] = { { "rspi", (kernel_ulong_t)&rspi_ops }, - { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops }, - { "qspi", (kernel_ulong_t)&qspi_ops }, {}, }; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 8f134735291f..1c11a00a2c36 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -14,8 +14,6 @@ #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/err.h> -#include <linux/gpio.h> -#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -55,7 +53,6 @@ struct sh_msiof_spi_priv { void *rx_dma_page; dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; - unsigned short unused_ss; bool native_cs_inited; bool native_cs_high; bool slave_aborted; @@ -63,140 +60,140 @@ struct sh_msiof_spi_priv { #define MAX_SS 3 /* Maximum number of native chip selects */ -#define TMDR1 0x00 /* Transmit Mode Register 1 */ -#define TMDR2 0x04 /* Transmit Mode Register 2 */ -#define TMDR3 0x08 /* Transmit Mode Register 3 */ -#define RMDR1 0x10 /* Receive Mode Register 1 */ -#define RMDR2 0x14 /* Receive Mode Register 2 */ -#define RMDR3 0x18 /* Receive Mode Register 3 */ -#define TSCR 0x20 /* Transmit Clock Select Register */ -#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ -#define CTR 0x28 /* Control Register */ -#define FCTR 0x30 /* FIFO Control Register */ -#define STR 0x40 /* Status Register */ -#define IER 0x44 /* Interrupt Enable Register */ -#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ -#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ -#define TFDR 0x50 /* Transmit FIFO Data Register */ -#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ -#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ -#define RFDR 0x60 /* Receive FIFO Data Register */ - -/* TMDR1 and RMDR1 */ -#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ -#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ -#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */ -#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */ -#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ -#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ -#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ -#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ -#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ -#define MDR1_FLD_SHIFT 2 -#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ -/* TMDR1 */ -#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */ -#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ -#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ - -/* TMDR2 and RMDR2 */ -#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ -#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ -#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ - -/* TSCR and RSCR */ -#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ -#define SCR_BRPS(i) (((i) - 1) << 8) -#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ -#define SCR_BRDV_DIV_2 0 -#define SCR_BRDV_DIV_4 1 -#define SCR_BRDV_DIV_8 2 -#define SCR_BRDV_DIV_16 3 -#define SCR_BRDV_DIV_32 4 -#define SCR_BRDV_DIV_1 7 - -/* CTR */ -#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ -#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ -#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ -#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ -#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ -#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ -#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ -#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ -#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ -#define CTR_TXDIZ_LOW (0 << 22) /* 0 */ -#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */ -#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ -#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ -#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ -#define CTR_TXE BIT(9) /* Transmit Enable */ -#define CTR_RXE BIT(8) /* Receive Enable */ -#define CTR_TXRST BIT(1) /* Transmit Reset */ -#define CTR_RXRST BIT(0) /* Receive Reset */ - -/* FCTR */ -#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ -#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ -#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ -#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ -#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ -#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ -#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ -#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ -#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ -#define FCTR_TFUA_SHIFT 20 -#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT) -#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ -#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ -#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ -#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ -#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ -#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ -#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ -#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ -#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ -#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ -#define FCTR_RFUA_SHIFT 4 -#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT) - -/* STR */ -#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */ -#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */ -#define STR_TEOF BIT(23) /* Frame Transmission End */ -#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ -#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */ -#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */ -#define STR_RFFUL BIT(13) /* Receive FIFO Full */ -#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */ -#define STR_REOF BIT(7) /* Frame Reception End */ -#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ -#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */ -#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */ - -/* IER */ -#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ -#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ -#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ -#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */ -#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ -#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ -#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ -#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ -#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */ -#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ -#define IER_REOFE BIT(7) /* Frame Reception End Enable */ -#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ -#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ -#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ +#define SITMDR1 0x00 /* Transmit Mode Register 1 */ +#define SITMDR2 0x04 /* Transmit Mode Register 2 */ +#define SITMDR3 0x08 /* Transmit Mode Register 3 */ +#define SIRMDR1 0x10 /* Receive Mode Register 1 */ +#define SIRMDR2 0x14 /* Receive Mode Register 2 */ +#define SIRMDR3 0x18 /* Receive Mode Register 3 */ +#define SITSCR 0x20 /* Transmit Clock Select Register */ +#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ +#define SICTR 0x28 /* Control Register */ +#define SIFCTR 0x30 /* FIFO Control Register */ +#define SISTR 0x40 /* Status Register */ +#define SIIER 0x44 /* Interrupt Enable Register */ +#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ +#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ +#define SITFDR 0x50 /* Transmit FIFO Data Register */ +#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ +#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ +#define SIRFDR 0x60 /* Receive FIFO Data Register */ + +/* SITMDR1 and SIRMDR1 */ +#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ +#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ +#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */ +#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */ +#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ +#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ +#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ +#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ +#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ +#define SIMDR1_FLD_SHIFT 2 +#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ +/* SITMDR1 */ +#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ +#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ +#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ + +/* SITMDR2 and SIRMDR2 */ +#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ +#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ +#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ + +/* SITSCR and SIRSCR */ +#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ +#define SISCR_BRPS(i) (((i) - 1) << 8) +#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ +#define SISCR_BRDV_DIV_2 0 +#define SISCR_BRDV_DIV_4 1 +#define SISCR_BRDV_DIV_8 2 +#define SISCR_BRDV_DIV_16 3 +#define SISCR_BRDV_DIV_32 4 +#define SISCR_BRDV_DIV_1 7 + +/* SICTR */ +#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ +#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ +#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ +#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ +#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ +#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ +#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ +#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ +#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ +#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */ +#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */ +#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ +#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ +#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ +#define SICTR_TXE BIT(9) /* Transmit Enable */ +#define SICTR_RXE BIT(8) /* Receive Enable */ +#define SICTR_TXRST BIT(1) /* Transmit Reset */ +#define SICTR_RXRST BIT(0) /* Receive Reset */ + +/* SIFCTR */ +#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ +#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ +#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ +#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ +#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ +#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ +#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ +#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ +#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ +#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ +#define SIFCTR_TFUA_SHIFT 20 +#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) +#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ +#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ +#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ +#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ +#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ +#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ +#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ +#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ +#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ +#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ +#define SIFCTR_RFUA_SHIFT 4 +#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT) + +/* SISTR */ +#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ +#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ +#define SISTR_TEOF BIT(23) /* Frame Transmission End */ +#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ +#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ +#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ +#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ +#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ +#define SISTR_REOF BIT(7) /* Frame Reception End */ +#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ +#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ +#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ + +/* SIIER */ +#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ +#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ +#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ +#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ +#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ +#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ +#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ +#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ +#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ +#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ +#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ +#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ +#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ +#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: return ioread16(p->mapbase + reg_offs); default: return ioread32(p->mapbase + reg_offs); @@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, u32 value) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: iowrite16(value, p->mapbase + reg_offs); break; default: @@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, u32 mask = clr | set; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data &= ~clr; data |= set; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - return readl_poll_timeout_atomic(p->mapbase + CTR, data, + return readl_poll_timeout_atomic(p->mapbase + SICTR, data, (data & mask) == set, 1, 100); } @@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) struct sh_msiof_spi_priv *p = data; /* just disable the interrupt and wake up */ - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); complete(&p->done); return IRQ_HANDLED; @@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p) { - u32 mask = CTR_TXRST | CTR_RXRST; + u32 mask = SICTR_TXRST | SICTR_RXRST; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data |= mask; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1, + readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1, 100); } static const u32 sh_msiof_spi_div_array[] = { - SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4, - SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32, + SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4, + SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32, }; static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, @@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, div = DIV_ROUND_UP(parent_rate, spi_hz); if (div <= 1024) { - /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ + /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ if (!div_pow && div <= 32 && div > 2) div_pow = 1; @@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, brps = 32; } - scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); - sh_msiof_write(p, TSCR, scr); + scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps); + sh_msiof_write(p, SITSCR, scr); if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, RSCR, scr); + sh_msiof_write(p, SIRSCR, scr); } static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl) @@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) return 0; } - val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT; - val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT; + val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT; + val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT; return val; } @@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, * 1 0 11 11 0 0 * 1 1 11 11 1 1 */ - tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; - tmp |= !cs_high << MDR1_SYNCAC_SHIFT; - tmp |= lsb_first << MDR1_BITLSB_SHIFT; + tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP; + tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT; + tmp |= lsb_first << SIMDR1_BITLSB_SHIFT; tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); if (spi_controller_is_slave(p->ctlr)) { - sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); + sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON); } else { - sh_msiof_write(p, TMDR1, - tmp | MDR1_TRMD | TMDR1_PCON | - (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); + sh_msiof_write(p, SITMDR1, + tmp | SIMDR1_TRMD | SITMDR1_PCON | + (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT); } if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ tmp &= ~0x0000ffff; } - sh_msiof_write(p, RMDR1, tmp); + sh_msiof_write(p, SIRMDR1, tmp); tmp = 0; - tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT; - tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT; + tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT; + tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT; edge = cpol ^ !cpha; - tmp |= edge << CTR_TEDG_SHIFT; - tmp |= edge << CTR_REDG_SHIFT; - tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW; - sh_msiof_write(p, CTR, tmp); + tmp |= edge << SICTR_TEDG_SHIFT; + tmp |= edge << SICTR_REDG_SHIFT; + tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW; + sh_msiof_write(p, SICTR, tmp); } static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, const void *tx_buf, void *rx_buf, u32 bits, u32 words) { - u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); + u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words); if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, TMDR2, dr2); + sh_msiof_write(p, SITMDR2, dr2); else - sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); + sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1); if (rx_buf) - sh_msiof_write(p, RMDR2, dr2); + sh_msiof_write(p, SIRMDR2, dr2); } static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) { - sh_msiof_write(p, STR, - sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); + sh_msiof_write(p, SISTR, + sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ)); } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, @@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_8[k] << fs); + sh_msiof_write(p, SITFDR, buf_8[k] << fs); } static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, @@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_16[k] << fs); + sh_msiof_write(p, SITFDR, buf_16[k] << fs); } static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, @@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs); } static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, @@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_32[k] << fs); + sh_msiof_write(p, SITFDR, buf_32[k] << fs); } static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, @@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs); } static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, @@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs)); + sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs)); } static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs)); + sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs)); } static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, @@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_8[k] = sh_msiof_read(p, RFDR) >> fs; + buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, @@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_16[k] = sh_msiof_read(p, RFDR) >> fs; + buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, @@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]); } static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, @@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = sh_msiof_read(p, RFDR) >> fs; + buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, @@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]); } static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, @@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs); + buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs); } static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]); + put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]); } static int sh_msiof_spi_setup(struct spi_device *spi) @@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; /* Configure native chip select mode/polarity early */ - clr = MDR1_SYNCMD_MASK; - set = MDR1_SYNCMD_SPI; + clr = SIMDR1_SYNCMD_MASK; + set = SIMDR1_SYNCMD_SPI; if (spi->mode & SPI_CS_HIGH) - clr |= BIT(MDR1_SYNCAC_SHIFT); + clr |= BIT(SIMDR1_SYNCAC_SHIFT); else - set |= BIT(MDR1_SYNCAC_SHIFT); + set |= BIT(SIMDR1_SYNCAC_SHIFT); pm_runtime_get_sync(&p->pdev->dev); - tmp = sh_msiof_read(p, TMDR1) & ~clr; - sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON); - tmp = sh_msiof_read(p, RMDR1) & ~clr; - sh_msiof_write(p, RMDR1, tmp | set); + tmp = sh_msiof_read(p, SITMDR1) & ~clr; + sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON); + tmp = sh_msiof_read(p, SIRMDR1) & ~clr; + sh_msiof_write(p, SIRMDR1, tmp | set); pm_runtime_put(&p->pdev->dev); p->native_cs_high = spi->mode & SPI_CS_HIGH; p->native_cs_inited = true; @@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, /* Configure pins before asserting CS */ if (spi->cs_gpiod) { - ss = p->unused_ss; + ss = ctlr->unused_native_cs; cs_high = p->native_cs_high; } else { ss = spi->chip_select; @@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) /* setup clock and rx/tx signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE); /* start by setting frame bit */ if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE); return ret; } @@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) /* shut down frame, rx/tx and clock signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0); if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0); return ret; } @@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, fifo_shift = 32 - bits; /* default FIFO watermarks for PIO */ - sh_msiof_write(p, FCTR, 0); + sh_msiof_write(p, SIFCTR, 0); /* setup msiof transfer mode registers */ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); - sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE); /* write tx fifo */ if (tx_buf) @@ -731,7 +728,7 @@ stop_reset: sh_msiof_reset_str(p); sh_msiof_spi_stop(p, rx_buf); stop_ier: - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { - ier_bits |= IER_RDREQE | IER_RDMAE; + ier_bits |= SIIER_RDREQE | SIIER_RDMAE; desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, p->rx_dma_addr, len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } if (tx) { - ier_bits |= IER_TDREQE | IER_TDMAE; + ier_bits |= SIIER_TDREQE | SIIER_TDMAE; dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, p->tx_dma_addr, len, DMA_TO_DEVICE); desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, @@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } /* 1 stage FIFO watermarks for DMA */ - sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1); + sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1); /* setup msiof transfer mode registers (32-bit words) */ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); - sh_msiof_write(p, IER, ier_bits); + sh_msiof_write(p, SIIER, ier_bits); reinit_completion(&p->done); if (tx) @@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, if (ret) goto stop_reset; - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); } else { /* wait for tx fifo to be emptied */ - sh_msiof_write(p, IER, IER_TEOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE); ret = sh_msiof_wait_for_completion(p, &p->done); if (ret) goto stop_reset; @@ -856,7 +853,7 @@ stop_dma: no_dma_tx: if (rx) dmaengine_terminate_all(p->ctlr->dma_rx); - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) } #endif -static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p) -{ - struct device *dev = &p->pdev->dev; - unsigned int used_ss_mask = 0; - unsigned int cs_gpios = 0; - unsigned int num_cs, i; - int ret; - - ret = gpiod_count(dev, "cs"); - if (ret <= 0) - return 0; - - num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); - for (i = 0; i < num_cs; i++) { - struct gpio_desc *gpiod; - - gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); - if (!IS_ERR(gpiod)) { - devm_gpiod_put(dev, gpiod); - cs_gpios++; - continue; - } - - if (PTR_ERR(gpiod) != -ENOENT) - return PTR_ERR(gpiod); - - if (i >= MAX_SS) { - dev_err(dev, "Invalid native chip select %d\n", i); - return -EINVAL; - } - used_ss_mask |= BIT(i); - } - p->unused_ss = ffz(used_ss_mask); - if (cs_gpios && p->unused_ss >= MAX_SS) { - dev_err(dev, "No unused native chip select available\n"); - return -EINVAL; - } - return 0; -} - static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev, enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr) { @@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) ctlr = p->ctlr; ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - dma_tx_id, res->start + TFDR); + dma_tx_id, res->start + SITFDR); if (!ctlr->dma_tx) return -ENODEV; ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - dma_rx_id, res->start + RFDR); + dma_rx_id, res->start + SIRFDR); if (!ctlr->dma_rx) goto free_tx_chan; @@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) if (p->info->rx_fifo_override) p->rx_fifo_size = p->info->rx_fifo_override; - /* Setup GPIO chip selects */ - ctlr->num_chipselect = p->info->num_chipselect; - ret = sh_msiof_get_cs_gpios(p); - if (ret) - goto err1; - /* init controller code */ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; ctlr->flags = chipdata->ctlr_flags; ctlr->bus_num = pdev->id; + ctlr->num_chipselect = p->info->num_chipselect; ctlr->dev.of_node = pdev->dev.of_node; ctlr->setup = sh_msiof_spi_setup; ctlr->prepare_message = sh_msiof_prepare_message; @@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ctlr->auto_runtime_pm = true; ctlr->transfer_one = sh_msiof_transfer_one; ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = MAX_SS; ret = sh_msiof_request_dma(p); if (ret < 0) diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index e1e639191557..8419e6722e17 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) sspi->bitbang.master->dev.of_node = pdev->dev.of_node; /* request DMA channels */ - sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx"); - if (!sspi->rx_chan) { + sspi->rx_chan = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(sspi->rx_chan)) { dev_err(&pdev->dev, "can not allocate rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->rx_chan); goto free_master; } - sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx"); - if (!sspi->tx_chan) { + sspi->tx_chan = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(sspi->tx_chan)) { dev_err(&pdev->dev, "can not allocate tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->tx_chan); goto free_rx_dma; } diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 4e726929bb4f..4ef569b47aa6 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi) return 0; } -static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) +static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) { struct dma_slave_config dma_cfg; struct device *dev = qspi->dev; + int ret = 0; memset(&dma_cfg, 0, sizeof(dma_cfg)); @@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) dma_cfg.src_maxburst = 4; dma_cfg.dst_maxburst = 4; - qspi->dma_chrx = dma_request_slave_channel(dev, "rx"); - if (qspi->dma_chrx) { + qspi->dma_chrx = dma_request_chan(dev, "rx"); + if (IS_ERR(qspi->dma_chrx)) { + ret = PTR_ERR(qspi->dma_chrx); + qspi->dma_chrx = NULL; + if (ret == -EPROBE_DEFER) + goto out; + } else { if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) { dev_err(dev, "dma rx config failed\n"); dma_release_channel(qspi->dma_chrx); @@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } - qspi->dma_chtx = dma_request_slave_channel(dev, "tx"); - if (qspi->dma_chtx) { + qspi->dma_chtx = dma_request_chan(dev, "tx"); + if (IS_ERR(qspi->dma_chtx)) { + ret = PTR_ERR(qspi->dma_chtx); + qspi->dma_chtx = NULL; + } else { if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) { dev_err(dev, "dma tx config failed\n"); dma_release_channel(qspi->dma_chtx); @@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } +out: init_completion(&qspi->dma_completion); + + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static void stm32_qspi_dma_free(struct stm32_qspi *qspi) @@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev) qspi->dev = dev; platform_set_drvdata(pdev, qspi); - stm32_qspi_dma_setup(qspi); + ret = stm32_qspi_dma_setup(qspi); + if (ret) + goto err; + mutex_init(&qspi->lock); ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index b222ce8d083e..e041f9c4ec47 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -9,7 +9,6 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -974,29 +973,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } /** - * stm32_spi_setup - setup device chip select - */ -static int stm32_spi_setup(struct spi_device *spi_dev) -{ - int ret = 0; - - if (!gpio_is_valid(spi_dev->cs_gpio)) { - dev_err(&spi_dev->dev, "%d is not a valid gpio\n", - spi_dev->cs_gpio); - return -EINVAL; - } - - dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__, - spi_dev->cs_gpio, - (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high"); - - ret = gpio_direction_output(spi_dev->cs_gpio, - !(spi_dev->mode & SPI_CS_HIGH)); - - return ret; -} - -/** * stm32_spi_prepare_msg - set up the controller to transfer a single message */ static int stm32_spi_prepare_msg(struct spi_master *master, @@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev) struct spi_master *master; struct stm32_spi *spi; struct resource *res; - int i, ret; + int ret; master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi)); if (!master) { @@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev) master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; - master->setup = stm32_spi_setup; + master->use_gpio_descriptors = true; master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; master->unprepare_message = stm32_spi_unprepare_msg; - spi->dma_tx = dma_request_slave_channel(spi->dev, "tx"); - if (!spi->dma_tx) + spi->dma_tx = dma_request_chan(spi->dev, "tx"); + if (IS_ERR(spi->dma_tx)) { + ret = PTR_ERR(spi->dma_tx); + spi->dma_tx = NULL; + if (ret == -EPROBE_DEFER) + goto err_clk_disable; + dev_warn(&pdev->dev, "failed to request tx dma channel\n"); - else + } else { master->dma_tx = spi->dma_tx; + } + + spi->dma_rx = dma_request_chan(spi->dev, "rx"); + if (IS_ERR(spi->dma_rx)) { + ret = PTR_ERR(spi->dma_rx); + spi->dma_rx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma_release; - spi->dma_rx = dma_request_slave_channel(spi->dev, "rx"); - if (!spi->dma_rx) dev_warn(&pdev->dev, "failed to request rx dma channel\n"); - else + } else { master->dma_rx = spi->dma_rx; + } if (spi->dma_tx || spi->dma_rx) master->can_dma = stm32_spi_can_dma; @@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "spi master registration failed: %d\n", ret); - goto err_dma_release; + goto err_pm_disable; } - if (!master->cs_gpios) { + if (!master->cs_gpiods) { dev_err(&pdev->dev, "no CS gpios available\n"); ret = -EINVAL; - goto err_dma_release; - } - - for (i = 0; i < master->num_chipselect; i++) { - if (!gpio_is_valid(master->cs_gpios[i])) { - dev_err(&pdev->dev, "%i is not a valid gpio\n", - master->cs_gpios[i]); - ret = -EINVAL; - goto err_dma_release; - } - - ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], - DRIVER_NAME); - if (ret) { - dev_err(&pdev->dev, "can't get CS gpio %i\n", - master->cs_gpios[i]); - goto err_dma_release; - } + goto err_pm_disable; } dev_info(&pdev->dev, "driver initialized\n"); return 0; +err_pm_disable: + pm_runtime_disable(&pdev->dev); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); if (spi->dma_rx) dma_release_channel(spi->dma_rx); - - pm_runtime_disable(&pdev->dev); err_clk_disable: clk_disable_unprepare(spi->clk); err_master_put: diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index fc40ab146c86..83edabdb41ad 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param( if ((bits_per_word == 8 || bits_per_word == 16 || bits_per_word == 32) && t->len > 3) { - tspi->is_packed = 1; + tspi->is_packed = true; tspi->words_per_32bit = 32/bits_per_word; } else { - tspi->is_packed = 0; + tspi->is_packed = false; tspi->words_per_32bit = 1; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 66dcb6128539..366a3e5cca6b 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -80,8 +80,6 @@ struct ti_qspi { #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000) -#define QSPI_FCLK 192000000 - /* Clock Control */ #define QSPI_CLK_EN (1 << 31) #define QSPI_CLK_DIV_MAX 0xffff @@ -316,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, { int wlen; unsigned int cmd; + u32 rx; + u8 rxlen, rx_wlen; u8 *rxbuf; rxbuf = t->rx_buf; @@ -332,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, break; } wlen = t->bits_per_word >> 3; /* in bytes */ + rx_wlen = wlen; while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); if (qspi_is_busy(qspi)) return -EBUSY; + switch (wlen) { + case 1: + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + rxlen = QSPI_WLEN_MAX_BYTES; + } else { + rxlen = min(count, 4); + } + rx_wlen = rxlen << 3; + cmd &= ~QSPI_WLEN_MASK; + cmd |= QSPI_WLEN(rx_wlen); + break; + default: + rxlen = wlen; + break; + } + ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); if (ti_qspi_poll_wc(qspi)) { dev_err(qspi->dev, "read timed out\n"); return -ETIMEDOUT; } + switch (wlen) { case 1: - *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG); + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + u32 *rxp = (u32 *) rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG_3); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_2); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_1); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + *rxp++ = be32_to_cpu(rx); + } else { + u8 *rxp = rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + if (rx_wlen >= 8) + *rxp++ = rx >> (rx_wlen - 8); + if (rx_wlen >= 16) + *rxp++ = rx >> (rx_wlen - 16); + if (rx_wlen >= 24) + *rxp++ = rx >> (rx_wlen - 24); + if (rx_wlen >= 32) + *rxp++ = rx; + } break; case 2: *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG); @@ -354,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG); break; } - rxbuf += wlen; - count -= wlen; + rxbuf += rxlen; + count -= rxlen; } return 0; @@ -527,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, QSPI_SPI_SETUP_REG(spi->chip_select)); } +static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master); + size_t max_len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (op->addr.val < qspi->mmap_size) { + /* Limit MMIO to the mmaped region */ + if (op->addr.val + op->data.nbytes > qspi->mmap_size) { + max_len = qspi->mmap_size - op->addr.val; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } else { + /* + * Use fallback mode (SW generated transfers) above the + * mmaped region. + * Adjust size to comply with the QSPI max frame length. + */ + max_len = QSPI_FRAME; + max_len -= 1 + op->addr.nbytes + op->dummy.nbytes; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } + + return 0; +} + static int ti_qspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { @@ -577,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, static const struct spi_controller_mem_ops ti_qspi_mem_ops = { .exec_op = ti_qspi_exec_mem_op, + .adjust_op_size = ti_qspi_adjust_op_size, }; static int ti_qspi_start_transfer_one(struct spi_master *master, diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 223353fa2d8a..d7ea6af74743 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Tx DMA */ param = &dma->param_tx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2; /* Tx = 0, 2 */; + param->chan_id = data->ch * 2; /* Tx = 0, 2 */ param->tx_reg = data->io_base_addr + PCH_SPDWR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); @@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Rx DMA */ param = &dma->param_rx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */; + param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */ param->rx_reg = data->io_base_addr + PCH_SPDRR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index ce9b30112e26..0fa50979644d 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -8,6 +8,7 @@ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -23,6 +24,7 @@ struct uniphier_spi_priv { void __iomem *base; + dma_addr_t base_dma_addr; struct clk *clk; struct spi_master *master; struct completion xfer_done; @@ -32,6 +34,7 @@ struct uniphier_spi_priv { unsigned int rx_bytes; const u8 *tx_buf; u8 *rx_buf; + atomic_t dma_busy; bool is_save_param; u8 bits_per_word; @@ -61,11 +64,16 @@ struct uniphier_spi_priv { #define SSI_FPS_FSTRT BIT(14) #define SSI_SR 0x14 +#define SSI_SR_BUSY BIT(7) #define SSI_SR_RNE BIT(0) #define SSI_IE 0x18 +#define SSI_IE_TCIE BIT(4) #define SSI_IE_RCIE BIT(3) +#define SSI_IE_TXRE BIT(2) +#define SSI_IE_RXRE BIT(1) #define SSI_IE_RORIE BIT(0) +#define SSI_IE_ALL_MASK GENMASK(4, 0) #define SSI_IS 0x1c #define SSI_IS_RXRS BIT(9) @@ -87,15 +95,19 @@ struct uniphier_spi_priv { #define SSI_RXDR 0x24 #define SSI_FIFO_DEPTH 8U +#define SSI_FIFO_BURST_NUM 1 + +#define SSI_DMA_RX_BUSY BIT(1) +#define SSI_DMA_TX_BUSY BIT(0) static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4); } -static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) writel(val, priv->base + SSI_IE); } -static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -334,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable) writel(val, priv->base + SSI_FPS); } +static bool uniphier_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + if ((!master->dma_tx && !master->dma_rx) + || (!master->dma_tx && t->tx_buf) + || (!master->dma_rx && t->rx_buf)) + return false; + + return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH; +} + +static void uniphier_spi_dma_rxcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_RXRE); + + if (!(state & SSI_DMA_TX_BUSY)) + spi_finalize_current_transfer(master); +} + +static void uniphier_spi_dma_txcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_TXRE); + + if (!(state & SSI_DMA_RX_BUSY)) + spi_finalize_current_transfer(master); +} + +static int uniphier_spi_transfer_one_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; + int buswidth; + + atomic_set(&priv->dma_busy, 0); + + uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM); + + if (priv->bits_per_word <= 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (priv->bits_per_word <= 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + + if (priv->rx_buf) { + struct dma_slave_config rxconf = { + .direction = DMA_DEV_TO_MEM, + .src_addr = priv->base_dma_addr + SSI_RXDR, + .src_addr_width = buswidth, + .src_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_rx, &rxconf); + + rxdesc = dmaengine_prep_slave_sg( + master->dma_rx, + t->rx_sg.sgl, t->rx_sg.nents, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto out_err_prep; + + rxdesc->callback = uniphier_spi_dma_rxcb; + rxdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_RXRE); + atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy); + + dmaengine_submit(rxdesc); + dma_async_issue_pending(master->dma_rx); + } + + if (priv->tx_buf) { + struct dma_slave_config txconf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = priv->base_dma_addr + SSI_TXDR, + .dst_addr_width = buswidth, + .dst_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_tx, &txconf); + + txdesc = dmaengine_prep_slave_sg( + master->dma_tx, + t->tx_sg.sgl, t->tx_sg.nents, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto out_err_prep; + + txdesc->callback = uniphier_spi_dma_txcb; + txdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_TXRE); + atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy); + + dmaengine_submit(txdesc); + dma_async_issue_pending(master->dma_tx); + } + + /* signal that we need to wait for completion */ + return (priv->tx_buf || priv->rx_buf); + +out_err_prep: + if (rxdesc) + dmaengine_terminate_sync(master->dma_rx); + + return -EINVAL; +} + static int uniphier_spi_transfer_one_irq(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) @@ -346,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master, uniphier_spi_fill_tx_fifo(priv); - uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE); time_left = wait_for_completion_timeout(&priv->xfer_done, msecs_to_jiffies(SSI_TIMEOUT_MS)); - uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE); if (!time_left) { dev_err(dev, "transfer timeout.\n"); @@ -395,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master, { struct uniphier_spi_priv *priv = spi_master_get_devdata(master); unsigned long threshold; + bool use_dma; /* Terminate and return success for 0 byte length transfer */ if (!t->len) @@ -402,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master, uniphier_spi_setup_transfer(spi, t); + use_dma = master->can_dma ? master->can_dma(master, spi, t) : false; + if (use_dma) + return uniphier_spi_transfer_one_dma(master, spi, t); + /* * If the transfer operation will take longer than * SSI_POLL_TIMEOUT_US, it should use irq. @@ -432,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master) return 0; } +static void uniphier_spi_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + u32 val; + + /* stop running spi transfer */ + writel(0, priv->base + SSI_CTL); + + /* reset FIFOs */ + val = SSI_FC_TXFFL | SSI_FC_RXFFL; + writel(val, priv->base + SSI_FC); + + uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK); + + if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) { + dmaengine_terminate_async(master->dma_tx); + atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + } + + if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) { + dmaengine_terminate_async(master->dma_rx); + atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + } +} + static irqreturn_t uniphier_spi_handler(int irq, void *dev_id) { struct uniphier_spi_priv *priv = dev_id; @@ -477,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev) { struct uniphier_spi_priv *priv; struct spi_master *master; + struct resource *res; + struct dma_slave_caps caps; + u32 dma_tx_burst = 0, dma_rx_burst = 0; unsigned long clk_rate; int irq; int ret; @@ -491,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev) priv->master = master; priv->is_save_param = false; - priv->base = devm_platform_ioremap_resource(pdev, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out_master_put; } + priv->base_dma_addr = res->start; priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { @@ -538,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev) = uniphier_spi_prepare_transfer_hardware; master->unprepare_transfer_hardware = uniphier_spi_unprepare_transfer_hardware; + master->handle_err = uniphier_spi_handle_err; + master->can_dma = uniphier_spi_can_dma; + master->num_chipselect = 1; + master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + + master->dma_tx = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR_OR_NULL(master->dma_tx)) { + if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_tx = NULL; + dma_tx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_tx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_tx_burst = caps.max_burst; + } + + master->dma_rx = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR_OR_NULL(master->dma_rx)) { + if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_rx = NULL; + dma_rx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_rx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_rx_burst = caps.max_burst; + } + + master->max_dma_len = min(dma_tx_burst, dma_rx_burst); ret = devm_spi_register_master(&pdev->dev, master); if (ret) @@ -558,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev) { struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); + if (priv->master->dma_tx) + dma_release_channel(priv->master->dma_tx); + if (priv->master->dma_rx) + dma_release_channel(priv->master->dma_rx); + clk_disable_unprepare(priv->clk); return 0; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8994545367a2..38b4c78df506 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1674,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } + if (unlikely(ctlr->ptp_sts_supported)) { + list_for_each_entry(xfer, &mesg->transfers, transfer_list) { + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre); + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post); + } + } + spi_unmap_msg(ctlr, mesg); if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { @@ -2451,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) int nb, i; struct gpio_desc **cs; struct device *dev = &ctlr->dev; + unsigned long native_cs_mask = 0; + unsigned int num_cs_gpios = 0; nb = gpiod_count(dev, "cs"); ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); @@ -2492,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) if (!gpioname) return -ENOMEM; gpiod_set_consumer_name(cs[i], gpioname); + num_cs_gpios++; + continue; } + + if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { + dev_err(dev, "Invalid native chip select %d\n", i); + return -EINVAL; + } + native_cs_mask |= BIT(i); + } + + ctlr->unused_native_cs = ffz(native_cs_mask); + if (num_cs_gpios && ctlr->max_native_cs && + ctlr->unused_native_cs >= ctlr->max_native_cs) { + dev_err(dev, "No unused native chip select available\n"); + return -EINVAL; } return 0; diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c index 06b68dd6e022..bc275968fcc6 100644 --- a/drivers/ssb/driver_extif.c +++ b/drivers/ssb/driver_extif.c @@ -63,7 +63,7 @@ int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports for (i = 0; i < 2; i++) { void __iomem *uart_regs; - uart_regs = ioremap_nocache(SSB_EUART, 16); + uart_regs = ioremap(SSB_EUART, 16); if (uart_regs) { uart_regs += (i * 8); diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index 6a5622e0ded5..c1186415896b 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -122,7 +122,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, len); + mmio = ioremap(addr, len); if (!mmio) goto out; @@ -168,7 +168,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, len); + mmio = ioremap(addr, len); if (!mmio) goto out; @@ -382,7 +382,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) /* Ok, ready to run, register it to the system. * The following needs change, if we want to port hostmode * to non-MIPS platform. */ - ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000); + ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000); set_io_port_base(ssb_pcicore_controller.io_map_base); /* Give some time to the PCI controller to configure itself with the new * values. Not waiting at this point causes crashes of the machine. */ diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c index cd8be80d2076..be6b50f454b4 100644 --- a/drivers/staging/gasket/gasket_core.c +++ b/drivers/staging/gasket/gasket_core.c @@ -303,7 +303,7 @@ static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num) } gasket_dev->bar_data[bar_num].virt_base = - ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base, + ioremap(gasket_dev->bar_data[bar_num].phys_base, gasket_dev->bar_data[bar_num].length_bytes); if (!gasket_dev->bar_data[bar_num].virt_base) { dev_err(gasket_dev->dev, diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c index 0a23727d0dc3..93cf28febdf6 100644 --- a/drivers/staging/kpc2000/kpc2000/core.c +++ b/drivers/staging/kpc2000/kpc2000/core.c @@ -338,7 +338,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR); reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR); - pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE); + pcard->regs_bar_base = ioremap(reg_bar_phys_addr, PAGE_SIZE); if (!pcard->regs_bar_base) { dev_err(&pcard->pdev->dev, "probe: REG_BAR could not remap memory to virtual space\n"); @@ -367,7 +367,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR); dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR); - pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr, + pcard->dma_bar_base = ioremap(dma_bar_phys_addr, dma_bar_phys_len); if (!pcard->dma_bar_base) { dev_err(&pcard->pdev->dev, diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c index 5460bf973c9c..592099a1fca5 100644 --- a/drivers/staging/kpc2000/kpc2000_i2c.c +++ b/drivers/staging/kpc2000/kpc2000_i2c.c @@ -659,7 +659,7 @@ static int pi2c_probe(struct platform_device *pldev) if (!res) return -ENXIO; - priv->smba = (unsigned long)devm_ioremap_nocache(&pldev->dev, + priv->smba = (unsigned long)devm_ioremap(&pldev->dev, res->start, resource_size(res)); if (!priv->smba) diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c index 8becf972af9c..1c360daa703d 100644 --- a/drivers/staging/kpc2000/kpc2000_spi.c +++ b/drivers/staging/kpc2000/kpc2000_spi.c @@ -464,7 +464,7 @@ kp_spi_probe(struct platform_device *pldev) goto free_master; } - kpspi->base = devm_ioremap_nocache(&pldev->dev, r->start, + kpspi->base = devm_ioremap(&pldev->dev, r->start, resource_size(r)); status = spi_register_master(master); diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c index a05ae6d40db9..ec79a8500caf 100644 --- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c +++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c @@ -122,7 +122,7 @@ int kpc_dma_probe(struct platform_device *pldev) rv = -ENXIO; goto err_kfree; } - ldev->eng_regs = ioremap_nocache(r->start, resource_size(r)); + ldev->eng_regs = ioremap(r->start, resource_size(r)); if (!ldev->eng_regs) { dev_err(&ldev->pldev->dev, "%s: failed to ioremap engine regs!\n", __func__); rv = -ENXIO; diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c index 6f0cd0784786..3be41698df4c 100644 --- a/drivers/staging/media/allegro-dvt/allegro-core.c +++ b/drivers/staging/media/allegro-dvt/allegro-core.c @@ -2914,7 +2914,7 @@ static int allegro_probe(struct platform_device *pdev) "regs resource missing from device tree\n"); return -EINVAL; } - regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); + regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(regs)) { dev_err(&pdev->dev, "failed to map registers\n"); return PTR_ERR(regs); @@ -2932,7 +2932,7 @@ static int allegro_probe(struct platform_device *pdev) "sram resource missing from device tree\n"); return -EINVAL; } - sram_regs = devm_ioremap_nocache(&pdev->dev, + sram_regs = devm_ioremap(&pdev->dev, sram_res->start, resource_size(sram_res)); if (IS_ERR(sram_regs)) { diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 24d20f000435..ed5440d51332 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -4455,7 +4455,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, pdev->needs_freset = 1; pci_save_state(pdev); qdev->reg_base = - ioremap_nocache(pci_resource_start(pdev, 1), + ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!qdev->reg_base) { dev_err(&pdev->dev, "Register mapping failed.\n"); @@ -4465,7 +4465,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, qdev->doorbell_area_size = pci_resource_len(pdev, 3); qdev->doorbell_area = - ioremap_nocache(pci_resource_start(pdev, 3), + ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3)); if (!qdev->doorbell_area) { dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index a51d627284d1..60c652793f93 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2463,7 +2463,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev, } - ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len); + ioaddr = (unsigned long)ioremap(pmem_start, pmem_len); if (ioaddr == (unsigned long)NULL) { netdev_err(dev, "ioremap failed!"); goto err_rel_mem; diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index cb95ad6fa4f9..fbb42e5258fd 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -858,7 +858,7 @@ static int rtsx_probe(struct pci_dev *pci, dev_info(&pci->dev, "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci, 0)); dev->addr = pci_resource_start(pci, 0); - dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0)); + dev->remap_addr = ioremap(dev->addr, pci_resource_len(pci, 0)); if (!dev->remap_addr) { dev_err(&pci->dev, "ioremap error\n"); err = -ENXIO; diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c index ea1d3d4efbc2..b8d60701f898 100644 --- a/drivers/staging/sm750fb/sm750_hw.c +++ b/drivers/staging/sm750fb/sm750_hw.c @@ -50,7 +50,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev) } /* now map mmio and vidmem */ - sm750_dev->pvReg = ioremap_nocache(sm750_dev->vidreg_start, + sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start, sm750_dev->vidreg_size); if (!sm750_dev->pvReg) { pr_err("mmio failed\n"); diff --git a/drivers/staging/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c index 34020ed351ab..a5ab255d7d36 100644 --- a/drivers/staging/uwb/whc-rc.c +++ b/drivers/staging/uwb/whc-rc.c @@ -216,11 +216,11 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc) goto error_request_region; } - whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len); + whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len); if (whcrc->rc_base == NULL) { dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n", whcrc->rc_len, whcrc->area, result); - goto error_ioremap_nocache; + goto error_ioremap; } result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED, @@ -254,7 +254,7 @@ error_cmd_buffer: free_irq(umc_dev->irq, whcrc); error_request_irq: iounmap(whcrc->rc_base); -error_ioremap_nocache: +error_ioremap: release_mem_region(whcrc->area, whcrc->rc_len); error_request_region: return result; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7251a87bb576..b94ed4e30770 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4149,9 +4149,6 @@ int iscsit_close_connection( iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); - if (conn->conn_transport->iscsit_wait_conn) - conn->conn_transport->iscsit_wait_conn(conn); - /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands @@ -4237,6 +4234,9 @@ int iscsit_close_connection( target_sess_cmd_list_set_waiting(sess->se_sess); target_wait_for_sess_cmds(sess->se_sess); + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + ahash_request_free(conn->conn_tx_hash); if (conn->conn_rx_hash) { struct crypto_ahash *tfm; diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c index cf3fad2cb871..c5b17dd8f587 100644 --- a/drivers/tc/tc.c +++ b/drivers/tc/tc.c @@ -47,7 +47,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus) for (slot = 0; slot < tbus->num_tcslots; slot++) { slotaddr = tbus->slot_base + slot * slotsize; extslotaddr = tbus->ext_slot_base + slot * extslotsize; - module = ioremap_nocache(slotaddr, slotsize); + module = ioremap(slotaddr, slotsize); BUG_ON(!module); offset = TC_OLDCARD; diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig index 676ffcb64985..8da63f38e6bd 100644 --- a/drivers/tee/Kconfig +++ b/drivers/tee/Kconfig @@ -2,7 +2,7 @@ # Generic Trusted Execution Environment Configuration config TEE tristate "Trusted Execution Environment support" - depends on HAVE_ARM_SMCCC || COMPILE_TEST + depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD select DMA_SHARED_BUFFER select GENERIC_ALLOCATOR help @@ -14,7 +14,7 @@ if TEE menu "TEE drivers" source "drivers/tee/optee/Kconfig" - +source "drivers/tee/amdtee/Kconfig" endmenu endif diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile index 21f51fd88b07..68da044afbfa 100644 --- a/drivers/tee/Makefile +++ b/drivers/tee/Makefile @@ -4,3 +4,4 @@ tee-objs += tee_core.o tee-objs += tee_shm.o tee-objs += tee_shm_pool.o obj-$(CONFIG_OPTEE) += optee/ +obj-$(CONFIG_AMDTEE) += amdtee/ diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig new file mode 100644 index 000000000000..4e32b6413b41 --- /dev/null +++ b/drivers/tee/amdtee/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: MIT +# AMD-TEE Trusted Execution Environment Configuration +config AMDTEE + tristate "AMD-TEE" + default m + depends on CRYPTO_DEV_SP_PSP + help + This implements AMD's Trusted Execution Environment (TEE) driver. diff --git a/drivers/tee/amdtee/Makefile b/drivers/tee/amdtee/Makefile new file mode 100644 index 000000000000..ff1485266117 --- /dev/null +++ b/drivers/tee/amdtee/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT +obj-$(CONFIG_AMDTEE) += amdtee.o +amdtee-objs += core.o +amdtee-objs += call.o +amdtee-objs += shm_pool.o diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h new file mode 100644 index 000000000000..ff48c3e47375 --- /dev/null +++ b/drivers/tee/amdtee/amdtee_if.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: MIT */ + +/* + * Copyright 2019 Advanced Micro Devices, Inc. + */ + +/* + * This file has definitions related to Host and AMD-TEE Trusted OS interface. + * These definitions must match the definitions on the TEE side. + */ + +#ifndef AMDTEE_IF_H +#define AMDTEE_IF_H + +#include <linux/types.h> + +/***************************************************************************** + ** TEE Param + ******************************************************************************/ +#define TEE_MAX_PARAMS 4 + +/** + * struct memref - memory reference structure + * @buf_id: buffer ID of the buffer mapped by TEE_CMD_ID_MAP_SHARED_MEM + * @offset: offset in bytes from beginning of the buffer + * @size: data size in bytes + */ +struct memref { + u32 buf_id; + u32 offset; + u32 size; +}; + +struct value { + u32 a; + u32 b; +}; + +/* + * Parameters passed to open_session or invoke_command + */ +union tee_op_param { + struct memref mref; + struct value val; +}; + +struct tee_operation { + u32 param_types; + union tee_op_param params[TEE_MAX_PARAMS]; +}; + +/* Must be same as in GP TEE specification */ +#define TEE_OP_PARAM_TYPE_NONE 0 +#define TEE_OP_PARAM_TYPE_VALUE_INPUT 1 +#define TEE_OP_PARAM_TYPE_VALUE_OUTPUT 2 +#define TEE_OP_PARAM_TYPE_VALUE_INOUT 3 +#define TEE_OP_PARAM_TYPE_INVALID 4 +#define TEE_OP_PARAM_TYPE_MEMREF_INPUT 5 +#define TEE_OP_PARAM_TYPE_MEMREF_OUTPUT 6 +#define TEE_OP_PARAM_TYPE_MEMREF_INOUT 7 + +#define TEE_PARAM_TYPE_GET(t, i) (((t) >> ((i) * 4)) & 0xF) +#define TEE_PARAM_TYPES(t0, t1, t2, t3) \ + ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12)) + +/***************************************************************************** + ** TEE Commands + *****************************************************************************/ + +/* + * The shared memory between rich world and secure world may be physically + * non-contiguous. Below structures are meant to describe a shared memory region + * via scatter/gather (sg) list + */ + +/** + * struct tee_sg_desc - sg descriptor for a physically contiguous buffer + * @low_addr: [in] bits[31:0] of buffer's physical address. Must be 4KB aligned + * @hi_addr: [in] bits[63:32] of the buffer's physical address + * @size: [in] size in bytes (must be multiple of 4KB) + */ +struct tee_sg_desc { + u32 low_addr; + u32 hi_addr; + u32 size; +}; + +/** + * struct tee_sg_list - structure describing a scatter/gather list + * @count: [in] number of sg descriptors + * @size: [in] total size of all buffers in the list. Must be multiple of 4KB + * @buf: [in] list of sg buffer descriptors + */ +#define TEE_MAX_SG_DESC 64 +struct tee_sg_list { + u32 count; + u32 size; + struct tee_sg_desc buf[TEE_MAX_SG_DESC]; +}; + +/** + * struct tee_cmd_map_shared_mem - command to map shared memory + * @buf_id: [out] return buffer ID value + * @sg_list: [in] list describing memory to be mapped + */ +struct tee_cmd_map_shared_mem { + u32 buf_id; + struct tee_sg_list sg_list; +}; + +/** + * struct tee_cmd_unmap_shared_mem - command to unmap shared memory + * @buf_id: [in] buffer ID of memory to be unmapped + */ +struct tee_cmd_unmap_shared_mem { + u32 buf_id; +}; + +/** + * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE + * @low_addr: [in] bits [31:0] of the physical address of the TA binary + * @hi_addr: [in] bits [63:32] of the physical address of the TA binary + * @size: [in] size of TA binary in bytes + * @ta_handle: [out] return handle of the loaded TA + */ +struct tee_cmd_load_ta { + u32 low_addr; + u32 hi_addr; + u32 size; + u32 ta_handle; +}; + +/** + * struct tee_cmd_unload_ta - command to unload TA binary from TEE environment + * @ta_handle: [in] handle of the loaded TA to be unloaded + */ +struct tee_cmd_unload_ta { + u32 ta_handle; +}; + +/** + * struct tee_cmd_open_session - command to call TA_OpenSessionEntryPoint in TA + * @ta_handle: [in] handle of the loaded TA + * @session_info: [out] pointer to TA allocated session data + * @op: [in/out] operation parameters + * @return_origin: [out] origin of return code after TEE processing + */ +struct tee_cmd_open_session { + u32 ta_handle; + u32 session_info; + struct tee_operation op; + u32 return_origin; +}; + +/** + * struct tee_cmd_close_session - command to call TA_CloseSessionEntryPoint() + * in TA + * @ta_handle: [in] handle of the loaded TA + * @session_info: [in] pointer to TA allocated session data + */ +struct tee_cmd_close_session { + u32 ta_handle; + u32 session_info; +}; + +/** + * struct tee_cmd_invoke_cmd - command to call TA_InvokeCommandEntryPoint() in + * TA + * @ta_handle: [in] handle of the loaded TA + * @cmd_id: [in] TA command ID + * @session_info: [in] pointer to TA allocated session data + * @op: [in/out] operation parameters + * @return_origin: [out] origin of return code after TEE processing + */ +struct tee_cmd_invoke_cmd { + u32 ta_handle; + u32 cmd_id; + u32 session_info; + struct tee_operation op; + u32 return_origin; +}; + +#endif /*AMDTEE_IF_H*/ diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h new file mode 100644 index 000000000000..d7f798c3394b --- /dev/null +++ b/drivers/tee/amdtee/amdtee_private.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: MIT */ + +/* + * Copyright 2019 Advanced Micro Devices, Inc. + */ + +#ifndef AMDTEE_PRIVATE_H +#define AMDTEE_PRIVATE_H + +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/tee_drv.h> +#include <linux/kref.h> +#include <linux/types.h> +#include "amdtee_if.h" + +#define DRIVER_NAME "amdtee" +#define DRIVER_AUTHOR "AMD-TEE Linux driver team" + +/* Some GlobalPlatform error codes used in this driver */ +#define TEEC_SUCCESS 0x00000000 +#define TEEC_ERROR_GENERIC 0xFFFF0000 +#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_COMMUNICATION 0xFFFF000E + +#define TEEC_ORIGIN_COMMS 0x00000002 + +/* Maximum number of sessions which can be opened with a Trusted Application */ +#define TEE_NUM_SESSIONS 32 + +#define TA_LOAD_PATH "/amdtee" +#define TA_PATH_MAX 60 + +/** + * struct amdtee - main service struct + * @teedev: client device + * @pool: shared memory pool + */ +struct amdtee { + struct tee_device *teedev; + struct tee_shm_pool *pool; +}; + +/** + * struct amdtee_session - Trusted Application (TA) session related information. + * @ta_handle: handle to Trusted Application (TA) loaded in TEE environment + * @refcount: counter to keep track of sessions opened for the TA instance + * @session_info: an array pointing to TA allocated session data. + * @sess_mask: session usage bit-mask. If a particular bit is set, then the + * corresponding @session_info entry is in use or valid. + * + * Session structure is updated on open_session and this information is used for + * subsequent operations with the Trusted Application. + */ +struct amdtee_session { + struct list_head list_node; + u32 ta_handle; + struct kref refcount; + u32 session_info[TEE_NUM_SESSIONS]; + DECLARE_BITMAP(sess_mask, TEE_NUM_SESSIONS); + spinlock_t lock; /* synchronizes access to @sess_mask */ +}; + +/** + * struct amdtee_context_data - AMD-TEE driver context data + * @sess_list: Keeps track of sessions opened in current TEE context + */ +struct amdtee_context_data { + struct list_head sess_list; +}; + +struct amdtee_driver_data { + struct amdtee *amdtee; +}; + +struct shmem_desc { + void *kaddr; + u64 size; +}; + +/** + * struct amdtee_shm_data - Shared memory data + * @kaddr: Kernel virtual address of shared memory + * @buf_id: Buffer id of memory mapped by TEE_CMD_ID_MAP_SHARED_MEM + */ +struct amdtee_shm_data { + struct list_head shm_node; + void *kaddr; + u32 buf_id; +}; + +struct amdtee_shm_context { + struct list_head shmdata_list; +}; + +#define LOWER_TWO_BYTE_MASK 0x0000FFFF + +/** + * set_session_id() - Sets the session identifier. + * @ta_handle: [in] handle of the loaded Trusted Application (TA) + * @session_index: [in] Session index. Range: 0 to (TEE_NUM_SESSIONS - 1). + * @session: [out] Pointer to session id + * + * Lower two bytes of the session identifier represents the TA handle and the + * upper two bytes is session index. + */ +static inline void set_session_id(u32 ta_handle, u32 session_index, + u32 *session) +{ + *session = (session_index << 16) | (LOWER_TWO_BYTE_MASK & ta_handle); +} + +static inline u32 get_ta_handle(u32 session) +{ + return session & LOWER_TWO_BYTE_MASK; +} + +static inline u32 get_session_index(u32 session) +{ + return (session >> 16) & LOWER_TWO_BYTE_MASK; +} + +int amdtee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); + +int amdtee_close_session(struct tee_context *ctx, u32 session); + +int amdtee_invoke_func(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + +int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); + +int amdtee_map_shmem(struct tee_shm *shm); + +void amdtee_unmap_shmem(struct tee_shm *shm); + +int handle_load_ta(void *data, u32 size, + struct tee_ioctl_open_session_arg *arg); + +int handle_unload_ta(u32 ta_handle); + +int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info, + struct tee_param *p); + +int handle_close_session(u32 ta_handle, u32 info); + +int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id); + +void handle_unmap_shmem(u32 buf_id); + +int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo, + struct tee_param *p); + +struct tee_shm_pool *amdtee_config_shm(void); + +u32 get_buffer_id(struct tee_shm *shm); +#endif /*AMDTEE_PRIVATE_H*/ diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c new file mode 100644 index 000000000000..096dd4d92d39 --- /dev/null +++ b/drivers/tee/amdtee/call.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2019 Advanced Micro Devices, Inc. + */ + +#include <linux/device.h> +#include <linux/tee.h> +#include <linux/tee_drv.h> +#include <linux/psp-tee.h> +#include <linux/slab.h> +#include <linux/psp-sev.h> +#include "amdtee_if.h" +#include "amdtee_private.h" + +static int tee_params_to_amd_params(struct tee_param *tee, u32 count, + struct tee_operation *amd) +{ + int i, ret = 0; + u32 type; + + if (!count) + return 0; + + if (!tee || !amd || count > TEE_MAX_PARAMS) + return -EINVAL; + + amd->param_types = 0; + for (i = 0; i < count; i++) { + /* AMD TEE does not support meta parameter */ + if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT) + return -EINVAL; + + amd->param_types |= ((tee[i].attr & 0xF) << i * 4); + } + + for (i = 0; i < count; i++) { + type = TEE_PARAM_TYPE_GET(amd->param_types, i); + pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type); + + if (type == TEE_OP_PARAM_TYPE_INVALID) + return -EINVAL; + + if (type == TEE_OP_PARAM_TYPE_NONE) + continue; + + /* It is assumed that all values are within 2^32-1 */ + if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) { + u32 buf_id = get_buffer_id(tee[i].u.memref.shm); + + amd->params[i].mref.buf_id = buf_id; + amd->params[i].mref.offset = tee[i].u.memref.shm_offs; + amd->params[i].mref.size = tee[i].u.memref.size; + pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n", + __func__, + i, amd->params[i].mref.buf_id, + i, amd->params[i].mref.offset, + i, amd->params[i].mref.size); + } else { + if (tee[i].u.value.c) + pr_warn("%s: Discarding value c", __func__); + + amd->params[i].val.a = tee[i].u.value.a; + amd->params[i].val.b = tee[i].u.value.b; + pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__, + i, amd->params[i].val.a, + i, amd->params[i].val.b); + } + } + return ret; +} + +static int amd_params_to_tee_params(struct tee_param *tee, u32 count, + struct tee_operation *amd) +{ + int i, ret = 0; + u32 type; + + if (!count) + return 0; + + if (!tee || !amd || count > TEE_MAX_PARAMS) + return -EINVAL; + + /* Assumes amd->param_types is valid */ + for (i = 0; i < count; i++) { + type = TEE_PARAM_TYPE_GET(amd->param_types, i); + pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type); + + if (type == TEE_OP_PARAM_TYPE_INVALID || + type > TEE_OP_PARAM_TYPE_MEMREF_INOUT) + return -EINVAL; + + if (type == TEE_OP_PARAM_TYPE_NONE || + type == TEE_OP_PARAM_TYPE_VALUE_INPUT || + type == TEE_OP_PARAM_TYPE_MEMREF_INPUT) + continue; + + /* + * It is assumed that buf_id remains unchanged for + * both open_session and invoke_cmd call + */ + if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) { + tee[i].u.memref.shm_offs = amd->params[i].mref.offset; + tee[i].u.memref.size = amd->params[i].mref.size; + pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n", + __func__, + i, amd->params[i].mref.buf_id, + i, amd->params[i].mref.offset, + i, amd->params[i].mref.size); + } else { + /* field 'c' not supported by AMD TEE */ + tee[i].u.value.a = amd->params[i].val.a; + tee[i].u.value.b = amd->params[i].val.b; + tee[i].u.value.c = 0; + pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", + __func__, + i, amd->params[i].val.a, + i, amd->params[i].val.b); + } + } + return ret; +} + +int handle_unload_ta(u32 ta_handle) +{ + struct tee_cmd_unload_ta cmd = {0}; + u32 status; + int ret; + + if (!ta_handle) + return -EINVAL; + + cmd.ta_handle = ta_handle; + + ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd, + sizeof(cmd), &status); + if (!ret && status != 0) { + pr_err("unload ta: status = 0x%x\n", status); + ret = -EBUSY; + } + + return ret; +} + +int handle_close_session(u32 ta_handle, u32 info) +{ + struct tee_cmd_close_session cmd = {0}; + u32 status; + int ret; + + if (ta_handle == 0) + return -EINVAL; + + cmd.ta_handle = ta_handle; + cmd.session_info = info; + + ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd, + sizeof(cmd), &status); + if (!ret && status != 0) { + pr_err("close session: status = 0x%x\n", status); + ret = -EBUSY; + } + + return ret; +} + +void handle_unmap_shmem(u32 buf_id) +{ + struct tee_cmd_unmap_shared_mem cmd = {0}; + u32 status; + int ret; + + cmd.buf_id = buf_id; + + ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd, + sizeof(cmd), &status); + if (!ret) + pr_debug("unmap shared memory: buf_id %u status = 0x%x\n", + buf_id, status); +} + +int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo, + struct tee_param *p) +{ + struct tee_cmd_invoke_cmd cmd = {0}; + int ret; + + if (!arg || (!p && arg->num_params)) + return -EINVAL; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (arg->session == 0) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return -EINVAL; + } + + ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op); + if (ret) { + pr_err("invalid Params. Abort invoke command\n"); + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return ret; + } + + cmd.ta_handle = get_ta_handle(arg->session); + cmd.cmd_id = arg->func; + cmd.session_info = sinfo; + + ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd, + sizeof(cmd), &arg->ret); + if (ret) { + arg->ret = TEEC_ERROR_COMMUNICATION; + } else { + ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op); + if (unlikely(ret)) { + pr_err("invoke command: failed to copy output\n"); + arg->ret = TEEC_ERROR_GENERIC; + return ret; + } + arg->ret_origin = cmd.return_origin; + pr_debug("invoke command: RO = 0x%x ret = 0x%x\n", + arg->ret_origin, arg->ret); + } + + return ret; +} + +int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id) +{ + struct tee_cmd_map_shared_mem *cmd; + phys_addr_t paddr; + int ret, i; + u32 status; + + if (!count || !start || !buf_id) + return -EINVAL; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + /* Size must be page aligned */ + for (i = 0; i < count ; i++) { + if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) { + ret = -EINVAL; + goto free_cmd; + } + + if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) { + pr_err("map shared memory: page unaligned. addr 0x%llx", + (u64)start[i].kaddr); + ret = -EINVAL; + goto free_cmd; + } + } + + cmd->sg_list.count = count; + + /* Create buffer list */ + for (i = 0; i < count ; i++) { + paddr = __psp_pa(start[i].kaddr); + cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr); + cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr); + cmd->sg_list.buf[i].size = start[i].size; + cmd->sg_list.size += cmd->sg_list.buf[i].size; + + pr_debug("buf[%d]:hi addr = 0x%x\n", i, + cmd->sg_list.buf[i].hi_addr); + pr_debug("buf[%d]:low addr = 0x%x\n", i, + cmd->sg_list.buf[i].low_addr); + pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size); + pr_debug("list size = 0x%x\n", cmd->sg_list.size); + } + + *buf_id = 0; + + ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd, + sizeof(*cmd), &status); + if (!ret && !status) { + *buf_id = cmd->buf_id; + pr_debug("mapped buffer ID = 0x%x\n", *buf_id); + } else { + pr_err("map shared memory: status = 0x%x\n", status); + ret = -ENOMEM; + } + +free_cmd: + kfree(cmd); + + return ret; +} + +int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info, + struct tee_param *p) +{ + struct tee_cmd_open_session cmd = {0}; + int ret; + + if (!arg || !info || (!p && arg->num_params)) + return -EINVAL; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (arg->session == 0) { + arg->ret = TEEC_ERROR_GENERIC; + return -EINVAL; + } + + ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op); + if (ret) { + pr_err("invalid Params. Abort open session\n"); + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return ret; + } + + cmd.ta_handle = get_ta_handle(arg->session); + *info = 0; + + ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd, + sizeof(cmd), &arg->ret); + if (ret) { + arg->ret = TEEC_ERROR_COMMUNICATION; + } else { + ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op); + if (unlikely(ret)) { + pr_err("open session: failed to copy output\n"); + arg->ret = TEEC_ERROR_GENERIC; + return ret; + } + arg->ret_origin = cmd.return_origin; + *info = cmd.session_info; + pr_debug("open session: session info = 0x%x\n", *info); + } + + pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret, + arg->ret_origin); + + return ret; +} + +int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg) +{ + struct tee_cmd_load_ta cmd = {0}; + phys_addr_t blob; + int ret; + + if (size == 0 || !data || !arg) + return -EINVAL; + + blob = __psp_pa(data); + if (blob & (PAGE_SIZE - 1)) { + pr_err("load TA: page unaligned. blob 0x%llx", blob); + return -EINVAL; + } + + cmd.hi_addr = upper_32_bits(blob); + cmd.low_addr = lower_32_bits(blob); + cmd.size = size; + + ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd, + sizeof(cmd), &arg->ret); + if (ret) { + arg->ret_origin = TEEC_ORIGIN_COMMS; + arg->ret = TEEC_ERROR_COMMUNICATION; + } else { + set_session_id(cmd.ta_handle, 0, &arg->session); + } + + pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n", + cmd.ta_handle, arg->ret_origin, arg->ret); + + return 0; +} diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c new file mode 100644 index 000000000000..6370bb55f512 --- /dev/null +++ b/drivers/tee/amdtee/core.c @@ -0,0 +1,518 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2019 Advanced Micro Devices, Inc. + */ + +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/device.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/firmware.h> +#include "amdtee_private.h" +#include "../tee_private.h" +#include <linux/psp-tee.h> + +static struct amdtee_driver_data *drv_data; +static DEFINE_MUTEX(session_list_mutex); +static struct amdtee_shm_context shmctx; + +static void amdtee_get_version(struct tee_device *teedev, + struct tee_ioctl_version_data *vers) +{ + struct tee_ioctl_version_data v = { + .impl_id = TEE_IMPL_ID_AMDTEE, + .impl_caps = 0, + .gen_caps = TEE_GEN_CAP_GP, + }; + *vers = v; +} + +static int amdtee_open(struct tee_context *ctx) +{ + struct amdtee_context_data *ctxdata; + + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); + if (!ctxdata) + return -ENOMEM; + + INIT_LIST_HEAD(&ctxdata->sess_list); + INIT_LIST_HEAD(&shmctx.shmdata_list); + + ctx->data = ctxdata; + return 0; +} + +static void release_session(struct amdtee_session *sess) +{ + int i; + + /* Close any open session */ + for (i = 0; i < TEE_NUM_SESSIONS; ++i) { + /* Check if session entry 'i' is valid */ + if (!test_bit(i, sess->sess_mask)) + continue; + + handle_close_session(sess->ta_handle, sess->session_info[i]); + } + + /* Unload Trusted Application once all sessions are closed */ + handle_unload_ta(sess->ta_handle); + kfree(sess); +} + +static void amdtee_release(struct tee_context *ctx) +{ + struct amdtee_context_data *ctxdata = ctx->data; + + if (!ctxdata) + return; + + while (true) { + struct amdtee_session *sess; + + sess = list_first_entry_or_null(&ctxdata->sess_list, + struct amdtee_session, + list_node); + + if (!sess) + break; + + list_del(&sess->list_node); + release_session(sess); + } + kfree(ctxdata); + + ctx->data = NULL; +} + +/** + * alloc_session() - Allocate a session structure + * @ctxdata: TEE Context data structure + * @session: Session ID for which 'struct amdtee_session' structure is to be + * allocated. + * + * Scans the TEE context's session list to check if TA is already loaded in to + * TEE. If yes, returns the 'session' structure for that TA. Else allocates, + * initializes a new 'session' structure and adds it to context's session list. + * + * The caller must hold a mutex. + * + * Returns: + * 'struct amdtee_session *' on success and NULL on failure. + */ +static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata, + u32 session) +{ + struct amdtee_session *sess; + u32 ta_handle = get_ta_handle(session); + + /* Scan session list to check if TA is already loaded in to TEE */ + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (sess->ta_handle == ta_handle) { + kref_get(&sess->refcount); + return sess; + } + + /* Allocate a new session and add to list */ + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (sess) { + sess->ta_handle = ta_handle; + kref_init(&sess->refcount); + spin_lock_init(&sess->lock); + list_add(&sess->list_node, &ctxdata->sess_list); + } + + return sess; +} + +/* Requires mutex to be held */ +static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata, + u32 session) +{ + u32 ta_handle = get_ta_handle(session); + u32 index = get_session_index(session); + struct amdtee_session *sess; + + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (ta_handle == sess->ta_handle && + test_bit(index, sess->sess_mask)) + return sess; + + return NULL; +} + +u32 get_buffer_id(struct tee_shm *shm) +{ + u32 buf_id = 0; + struct amdtee_shm_data *shmdata; + + list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node) + if (shmdata->kaddr == shm->kaddr) { + buf_id = shmdata->buf_id; + break; + } + + return buf_id; +} + +static DEFINE_MUTEX(drv_mutex); +static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta, + size_t *ta_size) +{ + const struct firmware *fw; + char fw_name[TA_PATH_MAX]; + struct { + u32 lo; + u16 mid; + u16 hi_ver; + u8 seq_n[8]; + } *uuid = ptr; + int n, rc = 0; + + n = snprintf(fw_name, TA_PATH_MAX, + "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin", + TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver, + uuid->seq_n[0], uuid->seq_n[1], + uuid->seq_n[2], uuid->seq_n[3], + uuid->seq_n[4], uuid->seq_n[5], + uuid->seq_n[6], uuid->seq_n[7]); + if (n < 0 || n >= TA_PATH_MAX) { + pr_err("failed to get firmware name\n"); + return -EINVAL; + } + + mutex_lock(&drv_mutex); + n = request_firmware(&fw, fw_name, &ctx->teedev->dev); + if (n) { + pr_err("failed to load firmware %s\n", fw_name); + rc = -ENOMEM; + goto unlock; + } + + *ta_size = roundup(fw->size, PAGE_SIZE); + *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size)); + if (IS_ERR(*ta)) { + pr_err("%s: get_free_pages failed 0x%llx\n", __func__, + (u64)*ta); + rc = -ENOMEM; + goto rel_fw; + } + + memcpy(*ta, fw->data, fw->size); +rel_fw: + release_firmware(fw); +unlock: + mutex_unlock(&drv_mutex); + return rc; +} + +int amdtee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) +{ + struct amdtee_context_data *ctxdata = ctx->data; + struct amdtee_session *sess = NULL; + u32 session_info; + size_t ta_size; + int rc, i; + void *ta; + + if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) { + pr_err("unsupported client login method\n"); + return -EINVAL; + } + + rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size); + if (rc) { + pr_err("failed to copy TA binary\n"); + return rc; + } + + /* Load the TA binary into TEE environment */ + handle_load_ta(ta, ta_size, arg); + if (arg->ret == TEEC_SUCCESS) { + mutex_lock(&session_list_mutex); + sess = alloc_session(ctxdata, arg->session); + mutex_unlock(&session_list_mutex); + } + + if (arg->ret != TEEC_SUCCESS) + goto out; + + if (!sess) { + rc = -ENOMEM; + goto out; + } + + /* Find an empty session index for the given TA */ + spin_lock(&sess->lock); + i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS); + if (i < TEE_NUM_SESSIONS) + set_bit(i, sess->sess_mask); + spin_unlock(&sess->lock); + + if (i >= TEE_NUM_SESSIONS) { + pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS); + rc = -ENOMEM; + goto out; + } + + /* Open session with loaded TA */ + handle_open_session(arg, &session_info, param); + + if (arg->ret == TEEC_SUCCESS) { + sess->session_info[i] = session_info; + set_session_id(sess->ta_handle, i, &arg->session); + } else { + pr_err("open_session failed %d\n", arg->ret); + spin_lock(&sess->lock); + clear_bit(i, sess->sess_mask); + spin_unlock(&sess->lock); + } +out: + free_pages((u64)ta, get_order(ta_size)); + return rc; +} + +static void destroy_session(struct kref *ref) +{ + struct amdtee_session *sess = container_of(ref, struct amdtee_session, + refcount); + + /* Unload the TA from TEE */ + handle_unload_ta(sess->ta_handle); + mutex_lock(&session_list_mutex); + list_del(&sess->list_node); + mutex_unlock(&session_list_mutex); + kfree(sess); +} + +int amdtee_close_session(struct tee_context *ctx, u32 session) +{ + struct amdtee_context_data *ctxdata = ctx->data; + u32 i, ta_handle, session_info; + struct amdtee_session *sess; + + pr_debug("%s: sid = 0x%x\n", __func__, session); + + /* + * Check that the session is valid and clear the session + * usage bit + */ + mutex_lock(&session_list_mutex); + sess = find_session(ctxdata, session); + if (sess) { + ta_handle = get_ta_handle(session); + i = get_session_index(session); + session_info = sess->session_info[i]; + spin_lock(&sess->lock); + clear_bit(i, sess->sess_mask); + spin_unlock(&sess->lock); + } + mutex_unlock(&session_list_mutex); + + if (!sess) + return -EINVAL; + + /* Close the session */ + handle_close_session(ta_handle, session_info); + + kref_put(&sess->refcount, destroy_session); + + return 0; +} + +int amdtee_map_shmem(struct tee_shm *shm) +{ + struct shmem_desc shmem; + struct amdtee_shm_data *shmnode; + int rc, count; + u32 buf_id; + + if (!shm) + return -EINVAL; + + shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL); + if (!shmnode) + return -ENOMEM; + + count = 1; + shmem.kaddr = shm->kaddr; + shmem.size = shm->size; + + /* + * Send a MAP command to TEE and get the corresponding + * buffer Id + */ + rc = handle_map_shmem(count, &shmem, &buf_id); + if (rc) { + pr_err("map_shmem failed: ret = %d\n", rc); + kfree(shmnode); + return rc; + } + + shmnode->kaddr = shm->kaddr; + shmnode->buf_id = buf_id; + list_add(&shmnode->shm_node, &shmctx.shmdata_list); + + pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr); + + return 0; +} + +void amdtee_unmap_shmem(struct tee_shm *shm) +{ + struct amdtee_shm_data *shmnode; + u32 buf_id; + + if (!shm) + return; + + buf_id = get_buffer_id(shm); + /* Unmap the shared memory from TEE */ + handle_unmap_shmem(buf_id); + + list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node) + if (buf_id == shmnode->buf_id) { + list_del(&shmnode->shm_node); + kfree(shmnode); + break; + } +} + +int amdtee_invoke_func(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + struct amdtee_context_data *ctxdata = ctx->data; + struct amdtee_session *sess; + u32 i, session_info; + + /* Check that the session is valid */ + mutex_lock(&session_list_mutex); + sess = find_session(ctxdata, arg->session); + if (sess) { + i = get_session_index(arg->session); + session_info = sess->session_info[i]; + } + mutex_unlock(&session_list_mutex); + + if (!sess) + return -EINVAL; + + handle_invoke_cmd(arg, session_info, param); + + return 0; +} + +int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) +{ + return -EINVAL; +} + +static const struct tee_driver_ops amdtee_ops = { + .get_version = amdtee_get_version, + .open = amdtee_open, + .release = amdtee_release, + .open_session = amdtee_open_session, + .close_session = amdtee_close_session, + .invoke_func = amdtee_invoke_func, + .cancel_req = amdtee_cancel_req, +}; + +static const struct tee_desc amdtee_desc = { + .name = DRIVER_NAME "-clnt", + .ops = &amdtee_ops, + .owner = THIS_MODULE, +}; + +static int __init amdtee_driver_init(void) +{ + struct tee_device *teedev; + struct tee_shm_pool *pool; + struct amdtee *amdtee; + int rc; + + rc = psp_check_tee_status(); + if (rc) { + pr_err("amd-tee driver: tee not present\n"); + return rc; + } + + drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); + if (!drv_data) + return -ENOMEM; + + amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL); + if (!amdtee) { + rc = -ENOMEM; + goto err_kfree_drv_data; + } + + pool = amdtee_config_shm(); + if (IS_ERR(pool)) { + pr_err("shared pool configuration error\n"); + rc = PTR_ERR(pool); + goto err_kfree_amdtee; + } + + teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err_free_pool; + } + amdtee->teedev = teedev; + + rc = tee_device_register(amdtee->teedev); + if (rc) + goto err_device_unregister; + + amdtee->pool = pool; + + drv_data->amdtee = amdtee; + + pr_info("amd-tee driver initialization successful\n"); + return 0; + +err_device_unregister: + tee_device_unregister(amdtee->teedev); + +err_free_pool: + tee_shm_pool_free(pool); + +err_kfree_amdtee: + kfree(amdtee); + +err_kfree_drv_data: + kfree(drv_data); + drv_data = NULL; + + pr_err("amd-tee driver initialization failed\n"); + return rc; +} +module_init(amdtee_driver_init); + +static void __exit amdtee_driver_exit(void) +{ + struct amdtee *amdtee; + + if (!drv_data || !drv_data->amdtee) + return; + + amdtee = drv_data->amdtee; + + tee_device_unregister(amdtee->teedev); + tee_shm_pool_free(amdtee->pool); +} +module_exit(amdtee_driver_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION("AMD-TEE driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c new file mode 100644 index 000000000000..065854e2db18 --- /dev/null +++ b/drivers/tee/amdtee/shm_pool.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2019 Advanced Micro Devices, Inc. + */ + +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/psp-sev.h> +#include "amdtee_private.h" + +static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm, + size_t size) +{ + unsigned int order = get_order(size); + unsigned long va; + int rc; + + va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!va) + return -ENOMEM; + + shm->kaddr = (void *)va; + shm->paddr = __psp_pa((void *)va); + shm->size = PAGE_SIZE << order; + + /* Map the allocated memory in to TEE */ + rc = amdtee_map_shmem(shm); + if (rc) { + free_pages(va, order); + shm->kaddr = NULL; + return rc; + } + + return 0; +} + +static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm) +{ + /* Unmap the shared memory from TEE */ + amdtee_unmap_shmem(shm); + free_pages((unsigned long)shm->kaddr, get_order(shm->size)); + shm->kaddr = NULL; +} + +static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) +{ + kfree(poolm); +} + +static const struct tee_shm_pool_mgr_ops pool_ops = { + .alloc = pool_op_alloc, + .free = pool_op_free, + .destroy_poolmgr = pool_op_destroy_poolmgr, +}; + +static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void) +{ + struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + + if (!mgr) + return ERR_PTR(-ENOMEM); + + mgr->ops = &pool_ops; + + return mgr; +} + +struct tee_shm_pool *amdtee_config_shm(void) +{ + struct tee_shm_pool_mgr *priv_mgr; + struct tee_shm_pool_mgr *dmabuf_mgr; + void *rc; + + rc = pool_mem_mgr_alloc(); + if (IS_ERR(rc)) + return rc; + priv_mgr = rc; + + rc = pool_mem_mgr_alloc(); + if (IS_ERR(rc)) { + tee_shm_pool_mgr_destroy(priv_mgr); + return rc; + } + dmabuf_mgr = rc; + + rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); + if (IS_ERR(rc)) { + tee_shm_pool_mgr_destroy(priv_mgr); + tee_shm_pool_mgr_destroy(dmabuf_mgr); + } + + return rc; +} diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 3517883b5cdb..efae0c02d898 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -369,6 +369,7 @@ static int int3400_thermal_remove(struct platform_device *pdev) } static const struct acpi_device_id int3400_thermal_match[] = { + {"INT1040", 0}, {"INT3400", 0}, {} }; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index a7bbd8584ae2..aeece1e136a5 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -282,6 +282,7 @@ static int int3403_remove(struct platform_device *pdev) } static const struct acpi_device_id int3403_device_ids[] = { + {"INT1043", 0}, {"INT3403", 0}, {"", 0}, }; diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index 4562c8060d09..a6aabfd6e2da 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -3256,7 +3256,7 @@ static int __init cy_detect_isa(void) return nboard; /* probe for CD1400... */ - cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin); + cy_isa_address = ioremap(isa_address, CyISA_Ywin); if (cy_isa_address == NULL) { printk(KERN_ERR "Cyclom-Y/ISA: can't remap base " "address\n"); @@ -3690,13 +3690,13 @@ static int cy_pci_probe(struct pci_dev *pdev, device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { card_name = "Cyclom-Y"; - addr0 = ioremap_nocache(pci_resource_start(pdev, 0), + addr0 = ioremap(pci_resource_start(pdev, 0), CyPCI_Yctl); if (addr0 == NULL) { dev_err(&pdev->dev, "can't remap ctl region\n"); goto err_reg; } - addr2 = ioremap_nocache(pci_resource_start(pdev, 2), + addr2 = ioremap(pci_resource_start(pdev, 2), CyPCI_Ywin); if (addr2 == NULL) { dev_err(&pdev->dev, "can't remap base region\n"); @@ -3712,7 +3712,7 @@ static int cy_pci_probe(struct pci_dev *pdev, } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) { struct RUNTIME_9060 __iomem *ctl_addr; - ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0), + ctl_addr = addr0 = ioremap(pci_resource_start(pdev, 0), CyPCI_Zctl); if (addr0 == NULL) { dev_err(&pdev->dev, "can't remap ctl region\n"); @@ -3727,7 +3727,7 @@ static int cy_pci_probe(struct pci_dev *pdev, mailbox = readl(&ctl_addr->mail_box_0); - addr2 = ioremap_nocache(pci_resource_start(pdev, 2), + addr2 = ioremap(pci_resource_start(pdev, 2), mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin); if (addr2 == NULL) { dev_err(&pdev->dev, "can't remap base region\n"); diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 4c1cd49ae95b..620d8488b83e 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -898,7 +898,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev) atomic_set(&priv->xmit_total, 0); raw_spin_lock_init(&priv->lock); - priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start, + priv->reg = devm_ioremap(priv->dev, dev->res.start, resource_size(&dev->res)); if (!priv->reg) { dev_err(priv->dev, "ioremap failed for resource %pR\n", diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 3a1a5e0ee93f..9f13f7d49dd7 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -961,7 +961,7 @@ static int moxa_pci_probe(struct pci_dev *pdev, goto err; } - board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000); + board->basemem = ioremap(pci_resource_start(pdev, 2), 0x4000); if (board->basemem == NULL) { dev_err(&pdev->dev, "can't remap io space 2\n"); retval = -ENOMEM; @@ -1071,7 +1071,7 @@ static int __init moxa_init(void) brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 : numports[i]; brd->busType = MOXA_BUS_TYPE_ISA; - brd->basemem = ioremap_nocache(baseaddr[i], 0x4000); + brd->basemem = ioremap(baseaddr[i], 0x4000); if (!brd->basemem) { printk(KERN_ERR "MOXA: can't remap %lx\n", baseaddr[i]); diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 0809ae2aa9b1..673cda3d011d 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -55,7 +55,7 @@ static int __init serial_init_chip(struct parisc_device *dev) uart.port.uartclk = (dev->id.sversion != 0xad) ? 7272727 : 1843200; uart.port.mapbase = address; - uart.port.membase = ioremap_nocache(address, 16); + uart.port.membase = ioremap(address, 16); if (!uart.port.membase) { dev_warn(&dev->dev, "Failed to map memory\n"); return -ENOMEM; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 836e736ae188..e603c66d6cc4 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1147,7 +1147,7 @@ static int omap8250_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - membase = devm_ioremap_nocache(&pdev->dev, regs->start, + membase = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!membase) return -ENODEV; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 022924d5ad54..939685fed396 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -275,7 +275,7 @@ static int pci_plx9050_init(struct pci_dev *dev) /* * enable/disable interrupts */ - p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); + p = ioremap(pci_resource_start(dev, 0), 0x80); if (p == NULL) return -ENOMEM; writel(irq_config, p + 0x4c); @@ -299,7 +299,7 @@ static void pci_plx9050_exit(struct pci_dev *dev) /* * disable interrupts */ - p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); + p = ioremap(pci_resource_start(dev, 0), 0x80); if (p != NULL) { writel(0, p + 0x4c); @@ -475,7 +475,7 @@ static int pci_siig10x_init(struct pci_dev *dev) break; } - p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); + p = ioremap(pci_resource_start(dev, 0), 0x80); if (p == NULL) return -ENOMEM; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 90655910b0c7..9ff5dfad590a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2766,7 +2766,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) } if (port->flags & UPF_IOREMAP) { - port->membase = ioremap_nocache(port->mapbase, size); + port->membase = ioremap(port->mapbase, size); if (!port->membase) { release_mem_region(port->mapbase, size); ret = -ENOMEM; diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c index 7b57e840e255..730da413d8ed 100644 --- a/drivers/tty/serial/dz.c +++ b/drivers/tty/serial/dz.c @@ -677,7 +677,7 @@ static void dz_release_port(struct uart_port *uport) static int dz_map_port(struct uart_port *uport) { if (!uport->membase) - uport->membase = ioremap_nocache(uport->mapbase, + uport->membase = ioremap(uport->mapbase, dec_kn_slot_size); if (!uport->membase) { printk(KERN_ERR "dz: Cannot map MMIO\n"); diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index fcbea43dc334..f67226df30d4 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -549,7 +549,7 @@ lqasc_request_port(struct uart_port *port) } if (port->flags & UPF_IOREMAP) { - port->membase = devm_ioremap_nocache(&pdev->dev, + port->membase = devm_ioremap(&pdev->dev, port->mapbase, size); if (port->membase == NULL) return -ENOMEM; diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index fbc5bc022a39..164b18372c02 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -411,7 +411,7 @@ static int meson_uart_request_port(struct uart_port *port) return -EBUSY; } - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, port->mapsize); if (!port->membase) return -ENOMEM; diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c index 00ce31e8d19a..fc58a004bef4 100644 --- a/drivers/tty/serial/mux.c +++ b/drivers/tty/serial/mux.c @@ -474,7 +474,7 @@ static int __init mux_probe(struct parisc_device *dev) port->iobase = 0; port->mapbase = dev->hpa.start + MUX_OFFSET + (i * MUX_LINE_OFFSET); - port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET); + port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET); port->iotype = UPIO_MEM; port->type = PORT_MUX; port->irq = 0; diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index d2d8b3494685..42c8cc93b603 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -427,7 +427,7 @@ static int owl_uart_request_port(struct uart_port *port) return -EBUSY; if (port->flags & UPF_IOREMAP) { - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, resource_size(res)); if (!port->membase) return -EBUSY; diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c index 0bdf1687983f..484b7e8d5381 100644 --- a/drivers/tty/serial/pic32_uart.c +++ b/drivers/tty/serial/pic32_uart.c @@ -618,7 +618,7 @@ static int pic32_uart_request_port(struct uart_port *port) "pic32_uart_mem")) return -EBUSY; - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, resource_size(res_mem)); if (!port->membase) { dev_err(port->dev, "Unable to map registers\n"); diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index ff9a27d48bca..b5ef86ae2746 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -498,7 +498,7 @@ static int rda_uart_request_port(struct uart_port *port) return -EBUSY; if (port->flags & UPF_IOREMAP) { - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, resource_size(res)); if (!port->membase) return -EBUSY; diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c index 329aced26bd8..7c99340a3d66 100644 --- a/drivers/tty/serial/sb1250-duart.c +++ b/drivers/tty/serial/sb1250-duart.c @@ -668,7 +668,7 @@ static int sbd_map_port(struct uart_port *uport) struct sbd_duart *duart = sport->duart; if (!uport->membase) - uport->membase = ioremap_nocache(uport->mapbase, + uport->membase = ioremap(uport->mapbase, DUART_CHANREG_SPACING); if (!uport->membase) { printk(err); @@ -676,7 +676,7 @@ static int sbd_map_port(struct uart_port *uport) } if (!sport->memctrl) - sport->memctrl = ioremap_nocache(duart->mapctrl, + sport->memctrl = ioremap(duart->mapctrl, DUART_CHANREG_SPACING); if (!sport->memctrl) { printk(err); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 58bf9d496ba5..87ca6294de0e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2680,7 +2680,7 @@ static int sci_remap_port(struct uart_port *port) return 0; if (port->dev->of_node || (port->flags & UPF_IOREMAP)) { - port->membase = ioremap_nocache(port->mapbase, sport->reg_size); + port->membase = ioremap(port->mapbase, sport->reg_size); if (unlikely(!port->membase)) { dev_err(port->dev, "can't remap port#%d\n", port->line); return -ENXIO; diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c index b03d3e458ea2..89154ac4c577 100644 --- a/drivers/tty/serial/zs.c +++ b/drivers/tty/serial/zs.c @@ -992,7 +992,7 @@ static void zs_release_port(struct uart_port *uport) static int zs_map_port(struct uart_port *uport) { if (!uport->membase) - uport->membase = ioremap_nocache(uport->mapbase, + uport->membase = ioremap(uport->mapbase, ZS_CHAN_IO_SIZE); if (!uport->membase) { printk(KERN_ERR "zs: Cannot map MMIO\n"); diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c index 61dc6b4a43d0..08d2976593d5 100644 --- a/drivers/tty/synclink.c +++ b/drivers/tty/synclink.c @@ -4054,7 +4054,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info) } info->lcr_mem_requested = true; - info->memory_base = ioremap_nocache(info->phys_memory_base, + info->memory_base = ioremap(info->phys_memory_base, 0x40000); if (!info->memory_base) { printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n", @@ -4068,7 +4068,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info) goto errout; } - info->lcr_base = ioremap_nocache(info->phys_lcr_base, + info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE); if (!info->lcr_base) { printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n", diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 5d59e2369c8a..5759b6c5b3e2 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -3450,7 +3450,7 @@ static int claim_resources(struct slgt_info *info) else info->reg_addr_requested = true; - info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE); + info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE); if (!info->reg_addr) { DBGERR(("%s can't map device registers, addr=%08X\n", info->device_name, info->phys_reg_addr)); diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index 33181fa6eb18..7446b03fef02 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c @@ -3559,7 +3559,7 @@ static int claim_resources(SLMP_INFO *info) else info->sca_statctrl_requested = true; - info->memory_base = ioremap_nocache(info->phys_memory_base, + info->memory_base = ioremap(info->phys_memory_base, SCA_MEM_SIZE); if (!info->memory_base) { printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n", @@ -3568,7 +3568,7 @@ static int claim_resources(SLMP_INFO *info) goto errout; } - info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE); + info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE); if (!info->lcr_base) { printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); @@ -3577,7 +3577,7 @@ static int claim_resources(SLMP_INFO *info) } info->lcr_base += info->lcr_offset; - info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE); + info->sca_base = ioremap(info->phys_sca_base, PAGE_SIZE); if (!info->sca_base) { printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_sca_base ); @@ -3586,7 +3586,7 @@ static int claim_resources(SLMP_INFO *info) } info->sca_base += info->sca_offset; - info->statctrl_base = ioremap_nocache(info->phys_statctrl_base, + info->statctrl_base = ioremap(info->phys_statctrl_base, PAGE_SIZE); if (!info->statctrl_base) { printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n", diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 9ae2a7a93df2..f0a259937da8 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -222,7 +222,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) retval = -EBUSY; goto put_hcd; } - hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start, + hcd->regs = devm_ioremap(&dev->dev, hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&dev->dev, "error mapping memory\n"); diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 5567ed2cddbe..fa252870c926 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc) memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); if (dwc->usb3_lpm_capable) - props[prop_idx++].name = "usb3-lpm-capable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable"); if (dwc->usb2_lpm_disable) - props[prop_idx++].name = "usb2-lpm-disable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable"); /** * WORKAROUND: dwc3 revisions <=3.00a have a limitation @@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc) * This following flag tells XHCI to do just that. */ if (dwc->revision <= DWC3_REVISION_300A) - props[prop_idx++].name = "quirk-broken-port-ped"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped"); if (prop_idx) { ret = platform_device_add_properties(xhci, props); diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index cac991173ac0..971c6b92484a 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -971,7 +971,7 @@ static int __init xdbc_init(void) goto free_and_quit; } - base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length); + base = ioremap(xdbc.xhci_start, xdbc.xhci_length); if (!base) { xdbc_trace("failed to remap the io address\n"); ret = -ENOMEM; diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index 57b6f66331cf..bfd1c9e80a1f 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -116,7 +116,7 @@ static int udc_pci_probe( goto err_memreg; } - dev->virt_addr = ioremap_nocache(resource, len); + dev->virt_addr = ioremap(resource, len); if (!dev->virt_addr) { dev_dbg(&pdev->dev, "start address cannot be mapped\n"); retval = -EFAULT; diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c index c3721225b61e..4a46f661d0e4 100644 --- a/drivers/usb/gadget/udc/goku_udc.c +++ b/drivers/usb/gadget/udc/goku_udc.c @@ -1782,7 +1782,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) } dev->got_region = 1; - base = ioremap_nocache(resource, len); + base = ioremap(resource, len); if (base == NULL) { DBG(dev, "can't map memory\n"); retval = -EFAULT; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 247de0faaeb7..a8273b589456 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2323,7 +2323,7 @@ net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) goto err; } - mem_mapped_addr[i] = ioremap_nocache(resource, len); + mem_mapped_addr[i] = ioremap(resource, len); if (mem_mapped_addr[i] == NULL) { release_mem_region(resource, len); dev_dbg(dev->dev, "can't map memory\n"); @@ -2401,7 +2401,7 @@ net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) goto err; } - mem_mapped_addr[i] = ioremap_nocache(resource, len); + mem_mapped_addr[i] = ioremap(resource, len); if (mem_mapped_addr[i] == NULL) { release_mem_region(resource, len); dev_dbg(dev->dev, "can't map memory\n"); @@ -2625,7 +2625,7 @@ net2272_plat_probe(struct platform_device *pdev) ret = -EBUSY; goto err; } - dev->base_addr = ioremap_nocache(base, len); + dev->base_addr = ioremap(base, len); if (!dev->base_addr) { dev_dbg(dev->dev, "can't map memory\n"); ret = -EFAULT; diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 51efee21915f..1fd1b9186e46 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -3659,7 +3659,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) * 8051 code into the chip, e.g. to turn on PCI PM. */ - base = ioremap_nocache(resource, len); + base = ioremap(resource, len); if (base == NULL) { ep_dbg(dev, "can't map memory\n"); retval = -EFAULT; diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c index a2b610dbedfc..2d462fbbe0a6 100644 --- a/drivers/usb/host/ehci-pmcmsp.c +++ b/drivers/usb/host/ehci-pmcmsp.c @@ -107,7 +107,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev) if (!request_mem_region(res->start, res_len, "mab regs")) return -EBUSY; - dev->mab_regs = ioremap_nocache(res->start, res_len); + dev->mab_regs = ioremap(res->start, res_len); if (dev->mab_regs == NULL) { retval = -ENOMEM; goto err1; @@ -124,7 +124,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev) retval = -EBUSY; goto err2; } - dev->usbid_regs = ioremap_nocache(res->start, res_len); + dev->usbid_regs = ioremap(res->start, res_len); if (dev->usbid_regs == NULL) { retval = -ENOMEM; goto err3; @@ -178,7 +178,7 @@ int usb_hcd_msp_probe(const struct hc_driver *driver, retval = -EBUSY; goto err1; } - hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_debug("ioremap failed"); retval = -ENOMEM; diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 6c7f0a876b96..beb2efa71341 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -1150,7 +1150,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) if (!mmio_resource_enabled(pdev, 0)) return; - base = ioremap_nocache(pci_resource_start(pdev, 0), len); + base = ioremap(pci_resource_start(pdev, 0), len); if (base == NULL) return; diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c index 07cc82ff327c..ccd30f835888 100644 --- a/drivers/usb/isp1760/isp1760-if.c +++ b/drivers/usb/isp1760/isp1760-if.c @@ -50,7 +50,7 @@ static int isp1761_pci_init(struct pci_dev *dev) } /* map available memory */ - iobase = ioremap_nocache(mem_start, mem_length); + iobase = ioremap(mem_start, mem_length); if (!iobase) { printk(KERN_ERR "Error ioremap failed\n"); release_mem_region(mem_start, mem_length); @@ -101,7 +101,7 @@ static int isp1761_pci_init(struct pci_dev *dev) return -EBUSY; } - iobase = ioremap_nocache(mem_start, mem_length); + iobase = ioremap(mem_start, mem_length); if (!iobase) { printk(KERN_ERR "ioremap #1\n"); release_mem_region(mem_start, mem_length); diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c index 409851306e99..80d6559bbcb2 100644 --- a/drivers/usb/roles/intel-xhci-usb-role-switch.c +++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c @@ -161,7 +161,7 @@ static int intel_xhci_usb_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; - data->base = devm_ioremap_nocache(dev, res->start, resource_size(res)); + data->base = devm_ioremap(dev, res->start, resource_size(res)); if (!data->base) return -ENOMEM; diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 3f1786170098..9fc4f338e870 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -127,7 +127,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev) return -ENODEV; } - /* This will make sure we can use ioremap_nocache() */ + /* This will make sure we can use ioremap() */ status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1); if (ACPI_FAILURE(status)) return -ENOMEM; diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 0120d8324a40..a87992892a9f 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -230,7 +230,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, switch ((u32)pos) { case 0xa0000 ... 0xbffff: count = min(count, (size_t)(0xc0000 - pos)); - iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1); + iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1); off = pos - 0xa0000; rsrc = VGA_RSRC_LEGACY_MEM; is_ioport = false; diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c index 2d2babe21b2f..40d4fb9276ba 100644 --- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c +++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c @@ -54,13 +54,13 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev) if (!xgmac_regs->ioaddr) { xgmac_regs->ioaddr = - ioremap_nocache(xgmac_regs->addr, xgmac_regs->size); + ioremap(xgmac_regs->addr, xgmac_regs->size); if (!xgmac_regs->ioaddr) return -ENOMEM; } if (!xpcs_regs->ioaddr) { xpcs_regs->ioaddr = - ioremap_nocache(xpcs_regs->addr, xpcs_regs->size); + ioremap(xpcs_regs->addr, xpcs_regs->size); if (!xpcs_regs->ioaddr) return -ENOMEM; } diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c index 16165a62b86d..96064ef8f629 100644 --- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c +++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c @@ -82,7 +82,7 @@ static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev) /* Map FlexRM ring registers if not mapped */ if (!reg->ioaddr) { - reg->ioaddr = ioremap_nocache(reg->addr, reg->size); + reg->ioaddr = ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; } diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c index f67bab547501..09a9453b75c5 100644 --- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c +++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c @@ -52,7 +52,7 @@ static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev) if (!reg->ioaddr) { reg->ioaddr = - ioremap_nocache(reg->addr, reg->size); + ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; } diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index e8f2bdbe0542..c0771a9567fb 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -409,7 +409,7 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg, if (!reg->ioaddr) { reg->ioaddr = - ioremap_nocache(reg->addr, reg->size); + ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; @@ -486,7 +486,7 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg, if (!reg->ioaddr) { reg->ioaddr = - ioremap_nocache(reg->addr, reg->size); + ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c index 9f3be0258623..27ba2ed4138a 100644 --- a/drivers/video/fbdev/carminefb.c +++ b/drivers/video/fbdev/carminefb.c @@ -633,7 +633,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent) ret = -EBUSY; goto err_free_hw; } - hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start, + hw->v_regs = ioremap(carminefb_fix.mmio_start, carminefb_fix.mmio_len); if (!hw->v_regs) { printk(KERN_ERR "carminefb: Can't remap %s register.\n", @@ -664,7 +664,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent) goto err_unmap_vregs; } - hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start, + hw->screen_mem = ioremap(carminefb_fix.smem_start, carminefb_fix.smem_len); if (!hw->screen_mem) { printk(KERN_ERR "carmine: Can't ioremap smem area.\n"); diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c index d18f7b31932c..aa7583d963ac 100644 --- a/drivers/video/fbdev/i810/i810_main.c +++ b/drivers/video/fbdev/i810/i810_main.c @@ -1883,7 +1883,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par, } par->res_flags |= MMIO_REQ; - par->mmio_start_virtual = ioremap_nocache(par->mmio_start_phys, + par->mmio_start_virtual = ioremap(par->mmio_start_phys, MMIO_SIZE); if (!par->mmio_start_virtual) { printk("i810fb_init: cannot remap mmio region\n"); diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index a76c61512c60..a09fc2eaa40d 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -654,7 +654,7 @@ static int intelfb_pci_register(struct pci_dev *pdev, } dinfo->mmio_base = - (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys, + (u8 __iomem *)ioremap(dinfo->mmio_base_phys, INTEL_REG_SIZE); if (!dinfo->mmio_base) { ERR_MSG("Cannot remap MMIO region.\n"); diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index a7bd9f25911b..a8660926924b 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -683,7 +683,7 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) kyro_fix.mmio_len = pci_resource_len(pdev, 1); currentpar->regbase = deviceInfo.pSTGReg = - ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len); + ioremap(kyro_fix.mmio_start, kyro_fix.mmio_len); if (!currentpar->regbase) goto out_free_fb; diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 1a555f70923a..36cc718b96ae 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1710,7 +1710,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) memsize = mem; err = -ENOMEM; - minfo->mmio.vbase.vaddr = ioremap_nocache(ctrlptr_phys, 16384); + minfo->mmio.vbase.vaddr = ioremap(ctrlptr_phys, 16384); if (!minfo->mmio.vbase.vaddr) { printk(KERN_ERR "matroxfb: cannot ioremap(%lX, 16384), matroxfb disabled\n", ctrlptr_phys); goto failVideoMR; diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c index 50935252b50b..3de4b3ed990a 100644 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ b/drivers/video/fbdev/mbx/mbxfb.c @@ -938,7 +938,7 @@ static int mbxfb_probe(struct platform_device *dev) } mfbi->reg_phys_addr = mfbi->reg_res->start; - mfbi->reg_virt_addr = devm_ioremap_nocache(&dev->dev, + mfbi->reg_virt_addr = devm_ioremap(&dev->dev, mfbi->reg_phys_addr, res_size(mfbi->reg_req)); if (!mfbi->reg_virt_addr) { @@ -948,7 +948,7 @@ static int mbxfb_probe(struct platform_device *dev) } virt_base_2700 = mfbi->reg_virt_addr; - mfbi->fb_virt_addr = devm_ioremap_nocache(&dev->dev, mfbi->fb_phys_addr, + mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr, res_size(mfbi->fb_req)); if (!mfbi->fb_virt_addr) { dev_err(&dev->dev, "failed to ioremap frame buffer\n"); diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index 17174cd7a5bb..974e4c28b08b 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -485,7 +485,7 @@ static int mmphw_probe(struct platform_device *pdev) goto failed; } - ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, + ctrl->reg_base = devm_ioremap(ctrl->dev, res->start, resource_size(res)); if (ctrl->reg_base == NULL) { dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res); diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index 1dcf02e12af4..7cc1216b1389 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -1563,7 +1563,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_exit_neither; } default_par->v_regs = - ioremap_nocache(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); + ioremap(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); if (!default_par->v_regs) { printk(KERN_WARNING "pm2fb: Can't remap %s register area.\n", pm2fb_fix.id); diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c index 6130aa56a1e9..2fa46607e0fc 100644 --- a/drivers/video/fbdev/pm3fb.c +++ b/drivers/video/fbdev/pm3fb.c @@ -1236,7 +1236,7 @@ static unsigned long pm3fb_size_memory(struct pm3_par *par) return 0; } screen_mem = - ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len); + ioremap(pm3fb_fix.smem_start, pm3fb_fix.smem_len); if (!screen_mem) { printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n"); release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len); @@ -1347,7 +1347,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent) goto err_exit_neither; } par->v_regs = - ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len); + ioremap(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len); if (!par->v_regs) { printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n", pm3fb_fix.id); diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c index d1e78ce3a9c2..d5bf185fc376 100644 --- a/drivers/video/fbdev/pmag-aa-fb.c +++ b/drivers/video/fbdev/pmag-aa-fb.c @@ -188,7 +188,7 @@ static int pmagaafb_probe(struct device *dev) /* MMIO mapping setup. */ info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET; - par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; @@ -199,7 +199,7 @@ static int pmagaafb_probe(struct device *dev) /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET; - info->screen_base = ioremap_nocache(info->fix.smem_start, + info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c index 56b912bb28de..2ddcdf7919a2 100644 --- a/drivers/video/fbdev/pmag-ba-fb.c +++ b/drivers/video/fbdev/pmag-ba-fb.c @@ -180,7 +180,7 @@ static int pmagbafb_probe(struct device *dev) /* MMIO mapping setup. */ info->fix.mmio_start = start; - par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; @@ -190,7 +190,7 @@ static int pmagbafb_probe(struct device *dev) /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAG_BA_FBMEM; - info->screen_base = ioremap_nocache(info->fix.smem_start, + info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c index 2822b2225924..90d2b04feb42 100644 --- a/drivers/video/fbdev/pmagb-b-fb.c +++ b/drivers/video/fbdev/pmagb-b-fb.c @@ -287,7 +287,7 @@ static int pmagbbfb_probe(struct device *dev) /* MMIO mapping setup. */ info->fix.mmio_start = start; - par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; @@ -298,7 +298,7 @@ static int pmagbbfb_probe(struct device *dev) /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAGB_B_FBMEM; - par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); + par->smem = ioremap(info->fix.smem_start, info->fix.smem_len); if (!par->smem) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); err = -ENOMEM; diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index 0a3b2b7c7891..c680b3e651cb 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -770,7 +770,7 @@ static int __maybe_unused pvr2fb_common_init(void) struct pvr2fb_par *par = currentpar; unsigned long modememused, rev; - fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start, + fb_info->screen_base = ioremap(pvr2_fix.smem_start, pvr2_fix.smem_len); if (!fb_info->screen_base) { @@ -778,7 +778,7 @@ static int __maybe_unused pvr2fb_common_init(void) goto out_err; } - par->mmio_base = ioremap_nocache(pvr2_fix.mmio_start, + par->mmio_base = ioremap(pvr2_fix.mmio_start, pvr2_fix.mmio_len); if (!par->mmio_base) { printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n"); diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c index 1410f476e135..5615054a0cad 100644 --- a/drivers/video/fbdev/pxa168fb.c +++ b/drivers/video/fbdev/pxa168fb.c @@ -665,7 +665,7 @@ static int pxa168fb_probe(struct platform_device *pdev) /* * Map LCD controller registers. */ - fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start, + fbi->reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (fbi->reg_base == NULL) { ret = -ENOMEM; diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c index e04efb567b5c..8048499e398d 100644 --- a/drivers/video/fbdev/s1d13xxxfb.c +++ b/drivers/video/fbdev/s1d13xxxfb.c @@ -809,7 +809,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, info); default_par = info->par; - default_par->regs = ioremap_nocache(pdev->resource[1].start, + default_par->regs = ioremap(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1); if (!default_par->regs) { printk(KERN_ERR PFX "unable to map registers\n"); @@ -818,7 +818,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev) } info->pseudo_palette = default_par->pseudo_palette; - info->screen_base = ioremap_nocache(pdev->resource[0].start, + info->screen_base = ioremap(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1); if (!info->screen_base) { diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c index ab8fe838c776..f72b03594719 100644 --- a/drivers/video/fbdev/sh7760fb.c +++ b/drivers/video/fbdev/sh7760fb.c @@ -463,7 +463,7 @@ static int sh7760fb_probe(struct platform_device *pdev) goto out_fb; } - par->base = ioremap_nocache(res->start, resource_size(res)); + par->base = ioremap(res->start, resource_size(res)); if (!par->base) { dev_err(&pdev->dev, "cannot remap\n"); ret = -ENODEV; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index c249763dbf0b..54ee7e02a244 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -2588,7 +2588,7 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev) if (num_channels == 2) priv->forced_fourcc = pdata->ch[0].fourcc; - priv->base = ioremap_nocache(res->start, resource_size(res)); + priv->base = ioremap(res->start, resource_size(res)); if (!priv->base) { error = -ENOMEM; goto err1; diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c index 4e22ae383c87..1f171a527174 100644 --- a/drivers/video/fbdev/sstfb.c +++ b/drivers/video/fbdev/sstfb.c @@ -1363,14 +1363,14 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto fail_fb_mem; } - par->mmio_vbase = ioremap_nocache(fix->mmio_start, + par->mmio_vbase = ioremap(fix->mmio_start, fix->mmio_len); if (!par->mmio_vbase) { printk(KERN_ERR "sstfb: cannot remap register area %#lx\n", fix->mmio_start); goto fail_mmio_remap; } - info->screen_base = ioremap_nocache(fix->smem_start, 0x400000); + info->screen_base = ioremap(fix->smem_start, 0x400000); if (!info->screen_base) { printk(KERN_ERR "sstfb: cannot remap framebuffer %#lx\n", fix->smem_start); diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 9e88e3f594c2..46709443a82f 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1198,7 +1198,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) case S9000_ID_TOMCAT: /* Dual CRX, behaves else like a CRX */ /* FIXME: TomCat supports two heads: * fb.iobase = REGION_BASE(fb_info,3); - * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx); + * fb.screen_base = ioremap(REGION_BASE(fb_info,2),xxx); * for now we only support the left one ! */ xres = fb->ngle_rom.x_size_visible; yres = fb->ngle_rom.y_size_visible; @@ -1291,7 +1291,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) strcpy(fix->id, "stifb"); info->fbops = &stifb_ops; - info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len); + info->screen_base = ioremap(REGION_BASE(fb,1), fix->smem_len); if (!info->screen_base) { printk(KERN_ERR "stifb: failed to map memory\n"); goto out_err0; diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c index fdbb1ea66e6c..0337d1a1a70b 100644 --- a/drivers/video/fbdev/tdfxfb.c +++ b/drivers/video/fbdev/tdfxfb.c @@ -1417,7 +1417,7 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) } default_par->regbase_virt = - ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!default_par->regbase_virt) { printk(KERN_ERR "fb: Can't remap %s register area.\n", info->fix.id); diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c index 286b2371c7dd..1966f1d70899 100644 --- a/drivers/video/fbdev/tgafb.c +++ b/drivers/video/fbdev/tgafb.c @@ -1438,7 +1438,7 @@ static int tgafb_register(struct device *dev) } /* Map the framebuffer. */ - mem_base = ioremap_nocache(bar0_start, bar0_len); + mem_base = ioremap(bar0_start, bar0_len); if (!mem_base) { printk(KERN_ERR "tgafb: Cannot map MMIO\n"); goto err1; diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index da74bf6c5996..91b2f6ca2607 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c @@ -1556,7 +1556,7 @@ static int trident_pci_probe(struct pci_dev *dev, return -1; } - default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start, + default_par->io_virt = ioremap(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); if (!default_par->io_virt) { @@ -1579,7 +1579,7 @@ static int trident_pci_probe(struct pci_dev *dev, goto out_unmap1; } - info->screen_base = ioremap_nocache(tridentfb_fix.smem_start, + info->screen_base = ioremap(tridentfb_fix.smem_start, tridentfb_fix.smem_len); if (!info->screen_base) { diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c index e04fde9c1fcd..97a59b5a4570 100644 --- a/drivers/video/fbdev/valkyriefb.c +++ b/drivers/video/fbdev/valkyriefb.c @@ -356,7 +356,7 @@ int __init valkyriefb_init(void) p->total_vram = 0x100000; p->frame_buffer_phys = frame_buffer_phys; #ifdef CONFIG_MAC - p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram); + p->frame_buffer = ioremap(frame_buffer_phys, p->total_vram); #else p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram); #endif diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c index c1e3738e6789..79d42b23d850 100644 --- a/drivers/video/fbdev/vermilion/cr_pll.c +++ b/drivers/video/fbdev/vermilion/cr_pll.c @@ -159,7 +159,7 @@ static int __init cr_pll_init(void) pci_read_config_dword(mch_dev, CRVML_REG_MCHBAR, &mch_bar); mch_regs_base = - ioremap_nocache(mch_bar, CRVML_MCHMAP_SIZE); + ioremap(mch_bar, CRVML_MCHMAP_SIZE); if (!mch_regs_base) { printk(KERN_ERR "Carillo Ranch MCH device was not enabled.\n"); diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index 498038a964ee..ff61605b8764 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -317,7 +317,7 @@ static int vmlfb_enable_mmio(struct vml_par *par) ": Could not claim display controller MMIO.\n"); return -EBUSY; } - par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size); + par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size); if (par->vdc_mem == NULL) { printk(KERN_ERR MODULE_NAME ": Could not map display controller MMIO.\n"); @@ -332,7 +332,7 @@ static int vmlfb_enable_mmio(struct vml_par *par) err = -EBUSY; goto out_err_1; } - par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size); + par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size); if (par->gpu_mem == NULL) { printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n"); err = -ENOMEM; diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c index ffa2ca2d3f5e..703ddee9a244 100644 --- a/drivers/video/fbdev/via/via-core.c +++ b/drivers/video/fbdev/via/via-core.c @@ -442,7 +442,7 @@ static int via_pci_setup_mmio(struct viafb_dev *vdev) */ vdev->engine_start = pci_resource_start(vdev->pdev, 1); vdev->engine_len = pci_resource_len(vdev->pdev, 1); - vdev->engine_mmio = ioremap_nocache(vdev->engine_start, + vdev->engine_mmio = ioremap(vdev->engine_start, vdev->engine_len); if (vdev->engine_mmio == NULL) dev_err(&vdev->pdev->dev, diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index 3be07807edcd..0796b1d90981 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -648,12 +648,12 @@ int w100fb_probe(struct platform_device *pdev) return -EINVAL; /* Remap the chip base address */ - remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN); + remapped_base = ioremap(mem->start+W100_CFG_BASE, W100_CFG_LEN); if (remapped_base == NULL) goto out; /* Map the register space */ - remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN); + remapped_regs = ioremap(mem->start+W100_REG_BASE, W100_REG_LEN); if (remapped_regs == NULL) goto out; @@ -672,7 +672,7 @@ int w100fb_probe(struct platform_device *pdev) printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE); /* Remap the framebuffer */ - remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE); + remapped_fbuf = ioremap(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE); if (remapped_fbuf == NULL) goto out; diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 2307b0329aec..d823d558c0c4 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -6,6 +6,7 @@ */ #include <linux/device.h> +#include <linux/io.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/sizes.h> diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 43c391626a00..50920b6fc319 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -7,6 +7,7 @@ */ #include <linux/errno.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c index 1b6e42e5e8fd..51e056bae943 100644 --- a/drivers/vme/boards/vme_vmivme7805.c +++ b/drivers/vme/boards/vme_vmivme7805.c @@ -55,7 +55,7 @@ static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* Map registers in BAR 0 */ - vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16); + vmic_base = ioremap(pci_resource_start(pdev, 0), 16); if (!vmic_base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); retval = -EIO; diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index 1edb8a5de873..ea938dc29c5e 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -554,7 +554,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image, goto err_resource; } - image->kern_base = ioremap_nocache( + image->kern_base = ioremap( image->bus_resource.start, size); if (!image->kern_base) { dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n"); @@ -1638,7 +1638,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* map registers in BAR 0 */ - ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0), + ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0), 4096); if (!ca91cx42_device->base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 7e079d39bd76..50ae26977a02 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -770,7 +770,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image, goto err_resource; } - image->kern_base = ioremap_nocache( + image->kern_base = ioremap( image->bus_resource.start, size); if (!image->kern_base) { dev_err(tsi148_bridge->parent, "Failed to remap resource\n"); @@ -2317,7 +2317,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* map registers in BAR 0 */ - tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0), + tsi148_device->base = ioremap(pci_resource_start(pdev, 0), 4096); if (!tsi148_device->base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c index 3110791a2f1c..ee716c715710 100644 --- a/drivers/w1/masters/matrox_w1.c +++ b/drivers/w1/masters/matrox_w1.c @@ -139,7 +139,7 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent dev->phys_addr = pci_resource_start(pdev, 1); - dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384); + dev->virt_addr = ioremap(dev->phys_addr, 16384); if (!dev->virt_addr) { dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n", __func__, dev->phys_addr, 16384); diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c index 8a043b52aa2f..7cdb25363ea0 100644 --- a/drivers/watchdog/bcm63xx_wdt.c +++ b/drivers/watchdog/bcm63xx_wdt.c @@ -246,7 +246,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev) return -ENODEV; } - bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start, + bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!bcm63xx_wdt_device.regs) { dev_err(&pdev->dev, "failed to remap I/O resources\n"); diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c index 6ad5bf3451ec..804e35940983 100644 --- a/drivers/watchdog/intel_scu_watchdog.c +++ b/drivers/watchdog/intel_scu_watchdog.c @@ -463,7 +463,7 @@ static int __init intel_scu_watchdog_init(void) return -ENODEV; } - tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr, + tmp_addr = ioremap(watchdog_device.timer_tbl_ptr->phys_addr, 20); if (tmp_addr == NULL) { diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index 1dfede0abf18..aee3c2efd565 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c @@ -26,7 +26,7 @@ #include <linux/platform_device.h> /* For platform_driver framework */ #include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ -#include <linux/io.h> /* For devm_ioremap_nocache */ +#include <linux/io.h> /* For devm_ioremap */ #include <asm/mach-rc32434/integ.h> /* For the Watchdog registers */ @@ -267,7 +267,7 @@ static int rc32434_wdt_probe(struct platform_device *pdev) return -ENODEV; } - wdt_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); + wdt_reg = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!wdt_reg) { pr_err("failed to remap I/O resources\n"); return -ENXIO; diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 8b9919c26095..70650b248de5 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -8,7 +8,7 @@ #include <linux/sched.h> #include <xen/xen-ops.h> -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION /* * Some hypercalls issued by the toolstack can take many 10s of @@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void) __this_cpu_write(xen_in_preemptible_hcall, true); } } -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ |