diff options
Diffstat (limited to 'drivers')
1507 files changed, 38673 insertions, 10439 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 002838d23b86..cc57bab146b5 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -241,6 +241,7 @@ config ACPI_CPU_FREQ_PSS config ACPI_PROCESSOR_CSTATE def_bool y + depends on ACPI_PROCESSOR depends on IA64 || X86 config ACPI_PROCESSOR_IDLE diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index 433376e819bb..953437a216f6 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -104,7 +104,7 @@ static void lpit_update_residency(struct lpit_residency_info *info, info->gaddr = lpit_native->residency_counter; if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - info->iomem_addr = ioremap_nocache(info->gaddr.address, + info->iomem_addr = ioremap(info->gaddr.address, info->gaddr.bit_width / 8); if (!info->iomem_addr) return; diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 2c4dda0787e8..5379bc3f275d 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -705,3 +705,185 @@ void __init acpi_processor_init(void) acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); acpi_scan_add_handler(&processor_container_handler); } + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +/** + * acpi_processor_claim_cst_control - Request _CST control from the platform. + */ +bool acpi_processor_claim_cst_control(void) +{ + static bool cst_control_claimed; + acpi_status status; + + if (!acpi_gbl_FADT.cst_control || cst_control_claimed) + return true; + + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, + acpi_gbl_FADT.cst_control, 8); + if (ACPI_FAILURE(status)) { + pr_warn("ACPI: Failed to claim processor _CST control\n"); + return false; + } + + cst_control_claimed = true; + return true; +} +EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); + +/** + * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. + * @handle: ACPI handle of the processor object containing the _CST. + * @cpu: The numeric ID of the target CPU. + * @info: Object write the C-states information into. + * + * Extract the C-state information for the given CPU from the output of the _CST + * control method under the corresponding ACPI processor object (or processor + * device object) and populate @info with it. + * + * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke + * acpi_processor_ffh_cstate_probe() to verify them and update the + * cpu_cstate_entry data for @cpu. + */ +int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, + struct acpi_processor_power *info) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *cst; + acpi_status status; + u64 count; + int last_index = 0; + int i, ret = 0; + + status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "No _CST\n"); + return -ENODEV; + } + + cst = buffer.pointer; + + /* There must be at least 2 elements. */ + if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { + acpi_handle_warn(handle, "Invalid _CST output\n"); + ret = -EFAULT; + goto end; + } + + count = cst->package.elements[0].integer.value; + + /* Validate the number of C-states. */ + if (count < 1 || count != cst->package.count - 1) { + acpi_handle_warn(handle, "Inconsistent _CST data\n"); + ret = -EFAULT; + goto end; + } + + for (i = 1; i <= count; i++) { + union acpi_object *element; + union acpi_object *obj; + struct acpi_power_register *reg; + struct acpi_processor_cx cx; + + /* + * If there is not enough space for all C-states, skip the + * excess ones and log a warning. + */ + if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { + acpi_handle_warn(handle, + "No room for more idle states (limit: %d)\n", + ACPI_PROCESSOR_MAX_POWER - 1); + break; + } + + memset(&cx, 0, sizeof(cx)); + + element = &cst->package.elements[i]; + if (element->type != ACPI_TYPE_PACKAGE) + continue; + + if (element->package.count != 4) + continue; + + obj = &element->package.elements[0]; + + if (obj->type != ACPI_TYPE_BUFFER) + continue; + + reg = (struct acpi_power_register *)obj->buffer.pointer; + + obj = &element->package.elements[1]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.type = obj->integer.value; + /* + * There are known cases in which the _CST output does not + * contain C1, so if the type of the first state found is not + * C1, leave an empty slot for C1 to be filled in later. + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + last_index = 1; + + cx.address = reg->address; + cx.index = last_index + 1; + + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { + /* + * In the majority of cases _CST describes C1 as + * a FIXED_HARDWARE C-state, but if the command + * line forbids using MWAIT, use CSTATE_HALT for + * C1 regardless. + */ + if (cx.type == ACPI_STATE_C1 && + boot_option_idle_override == IDLE_NOMWAIT) { + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + cx.entry_method = ACPI_CSTATE_FFH; + } + } else if (cx.type == ACPI_STATE_C1) { + /* + * In the special case of C1, FIXED_HARDWARE can + * be handled by executing the HLT instruction. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + continue; + } + } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } else { + continue; + } + + if (cx.type == ACPI_STATE_C1) + cx.valid = 1; + + obj = &element->package.elements[2]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.latency = obj->integer.value; + + obj = &element->package.elements[3]; + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + memcpy(&info->states[++last_index], &cx, sizeof(cx)); + } + + acpi_handle_info(handle, "Found %d idle states\n", last_index); + + info->count = last_index; + + end: + kfree(buffer.pointer); + + return ret; +} +EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); +#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 2f380e7381d6..15c5b272e698 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -2187,7 +2187,7 @@ int acpi_video_register(void) if (register_count) { /* * if the function of acpi_video_register is already called, - * don't register the acpi_vide_bus again and return no error. + * don't register the acpi_video_bus again and return no error. */ goto leave; } diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index 863ade9add6d..173447d50acf 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -17,7 +17,7 @@ /* Common info for tool signons */ #define ACPICA_NAME "Intel ACPI Component Architecture" -#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation" +#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation" #if ACPI_MACHINE_WIDTH == 64 #define ACPI_WIDTH " (64-bit version)" diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index 54f81eac7ec9..89101e53324b 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h @@ -3,7 +3,7 @@ * * Name: accommon.h - Common include files for generation of ACPICA source * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h index d5478cd4a857..ede4b9cc9e85 100644 --- a/drivers/acpi/acpica/acconvert.h +++ b/drivers/acpi/acpica/acconvert.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 694cf206fa9a..a676daaa2da5 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -3,7 +3,7 @@ * * Name: acdebug.h - ACPI/AML debugger * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 82f81501566b..7ba6e308f146 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -3,7 +3,7 @@ * * Name: acdispat.h - dispatcher (parser to interpreter interface) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index c8652f91054e..79f292687bd6 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -3,7 +3,7 @@ * * Name: acevents.h - Event subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index fd3beea93421..38ffa2c0a496 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -3,7 +3,7 @@ * * Name: acglobal.h - Declarations for global variables * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index bcf8f7501db7..67f282e9e0af 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -3,7 +3,7 @@ * * Name: achware.h -- hardware specific interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 20706adbc148..a6d896cda2a5 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -3,7 +3,7 @@ * * Name: acinterp.h - Interpreter subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 1ea52576f0a2..af58cd2dc9d3 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -3,7 +3,7 @@ * * Name: aclocal.h - Internal data types used across the ACPI subsystem * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 283614e82a20..2269e10bc21b 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -3,7 +3,7 @@ * * Name: acmacros.h - C macros for the entire subsystem. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 7da1864798a0..e618ddfab2fd 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -3,7 +3,7 @@ * * Name: acnamesp.h - Namespace subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 8def0e3d690f..9f0219a8cb98 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -3,7 +3,7 @@ * * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -260,7 +260,8 @@ struct acpi_object_index_field { /* The buffer_field is different in that it is part of a Buffer, not an op_region */ struct acpi_object_buffer_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */ + ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */ + union acpi_operand_object *buffer_obj; /* Containing Buffer object */ }; /****************************************************************************** diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 9d78134428e3..8825394be9ab 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -3,7 +3,7 @@ * * Name: acopcode.h - AML opcode information for the AML parser and interpreter * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 6e32c97cba6c..bc00b85c0a8f 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -3,7 +3,7 @@ * * Module Name: acparser.h - AML Parser subcomponent prototypes and defines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 387163b962a7..cd0f5df0ea23 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -3,7 +3,7 @@ * * Name: acpredef - Information table for ACPI predefined methods and objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index 422cd8f2b92e..6de8a1650d3d 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -3,7 +3,7 @@ * * Name: acresrc.h - Resource Manager function prototypes * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 2043dff370b1..4c900c108f3f 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -3,7 +3,7 @@ * * Name: acstruct.h - Internal structs * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index dfbf1dbd4033..734624facda3 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -3,7 +3,7 @@ * * Name: actables.h - ACPI table management * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 5fb50634e08e..7c89b470ec81 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -3,7 +3,7 @@ * * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 49e412edd7c6..1d541bbac4a3 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -5,7 +5,7 @@ * Declarations and definitions contained herein are derived * directly from the ACPI specification. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 7c3bd4ab60fc..e5234e001acf 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -3,7 +3,7 @@ * * Module Name: amlresrc.h - AML resource descriptors * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c index 47d2e5059849..bb9600b867ee 100644 --- a/drivers/acpi/acpica/dbhistry.c +++ b/drivers/acpi/acpica/dbhistry.c @@ -3,7 +3,7 @@ * * Module Name: dbhistry - debugger HISTORY command * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index e1632b340182..aa71f65395d2 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -816,7 +816,7 @@ acpi_db_command_dispatch(char *input_buffer, if (ACPI_FAILURE(status) || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) { acpi_os_printf - ("Invalid adress space ID: must be between 0 and %u inclusive\n", + ("Invalid address space ID: must be between 0 and %u inclusive\n", ACPI_NUM_PREDEFINED_REGIONS - 1); return (AE_OK); } diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 85b34d02233e..ad17f62e51d9 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -4,7 +4,7 @@ * Module Name: dsargs - Support for execution of dynamic arguments for static * objects (regions, fields, buffer fields, etc.) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 5034fab9cf69..4b5b6e859f62 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -4,7 +4,7 @@ * Module Name: dscontrol - Support for execution control opcodes - * if/else/while/return * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 0d3e1ced1f57..63bc5f19fb82 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -3,7 +3,7 @@ * * Module Name: dsdebug - Parser/Interpreter interface - debugging * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index faa38a22263a..c901f5aec739 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -3,7 +3,7 @@ * * Module Name: dsfield - Dispatcher field routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -243,7 +243,7 @@ cleanup: * FUNCTION: acpi_ds_get_field_names * * PARAMETERS: info - create_field info structure - * ` walk_state - Current method state + * walk_state - Current method state * arg - First parser arg for the field name list * * RETURN: Status diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index a1ffed29903b..9be2a309424c 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -3,7 +3,7 @@ * * Module Name: dsinit - Object initialization namespace walk * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index f59b4d944f7f..cf67caff878a 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -3,7 +3,7 @@ * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 179129a2deb1..c0a14a6a2c20 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -3,7 +3,7 @@ * * Module Name: dsobject - Dispatcher object management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 10f32b62608e..d9c26e720cb7 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -3,7 +3,7 @@ * * Module Name: dsopcode - Dispatcher support for regions and fields * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -217,6 +217,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, } obj_desc->buffer_field.buffer_obj = buffer_desc; + obj_desc->buffer_field.is_create_field = + aml_opcode == AML_CREATE_FIELD_OP; /* Reference count for buffer_desc inherits obj_desc count */ diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c index 997faa10f615..d869568d55c2 100644 --- a/drivers/acpi/acpica/dspkginit.c +++ b/drivers/acpi/acpica/dspkginit.c @@ -3,7 +3,7 @@ * * Module Name: dspkginit - Completion of deferred package initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index d75aae304595..5e81a1ae44cf 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -4,7 +4,7 @@ * Module Name: dswexec - Dispatcher method execution callbacks; * dispatch to interpreter. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index c88fd31208a5..697974e37edf 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -3,7 +3,7 @@ * * Module Name: dswload - Dispatcher first pass namespace load callbacks * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -410,6 +410,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, walk_state)); + /* + * Disassembler: handle create field operators here. + * + * create_buffer_field is a deferred op that is typically processed in load + * pass 2. However, disassembly of control method contents walk the parse + * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed + * in a later walk. This is a problem when there is a control method that + * has the same name as the AML_CREATE object. In this case, any use of the + * name segment will be detected as a method call rather than a reference + * to a buffer field. + * + * This earlier creation during disassembly solves this issue by inserting + * the named object in the ACPI namespace so that references to this name + * would be a name string rather than a method call. + */ + if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) && + (walk_state->op_info->flags & AML_CREATE)) { + status = acpi_ds_create_buffer_field(op, walk_state); + return_ACPI_STATUS(status); + } + /* We are only interested in opcodes that have an associated name */ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 935a8e2623e4..b31457ca926c 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -3,7 +3,7 @@ * * Module Name: dswload2 - Dispatcher second pass namespace load callbacks * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 39acf7b286da..9c397642fed7 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -3,7 +3,7 @@ * * Module Name: dswscope - Scope stack manipulation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index de79f835a373..809a0c0536b5 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -3,7 +3,7 @@ * * Module Name: dswstate - Dispatcher parse tree walk management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index 9e2f5a05c066..8c83d8c620dc 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -3,7 +3,7 @@ * * Module Name: evevent - Fixed Event handling and dispatch * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 5c77bee5d31f..0ced84ae13e4 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -3,7 +3,7 @@ * * Module Name: evglock - Global Lock support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 344feba29063..3e39907fedd9 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -3,7 +3,7 @@ * * Module Name: evgpe - General Purpose Event handling and dispatch * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 9c7adaa7b582..132adff1e131 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -3,7 +3,7 @@ * * Module Name: evgpeblk - GPE block creation and initialization. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 70d21d5ec5f3..6effd8076dcc 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -3,7 +3,7 @@ * * Module Name: evgpeinit - System GPE initialization and update * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 917892227e09..738873e876ca 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -3,7 +3,7 @@ * * Module Name: evgpeutil - GPE utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 3ef4e27995f0..5884eba047f7 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -3,7 +3,7 @@ * * Module Name: evhandler - Support for Address Space handlers * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index aa98fe07cd1b..ce1eda6beb84 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -3,7 +3,7 @@ * * Module Name: evmisc - Miscellaneous event manager support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 1ff126460007..738d4b231f34 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -3,7 +3,7 @@ * * Module Name: evregion - Operation Region support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index aee09640d710..aefc0145e583 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -3,7 +3,7 @@ * * Module Name: evrgnini- ACPI address_space (op_region) init * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 279ef0557aa3..e4e012297eee 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -3,7 +3,7 @@ * * Module Name: evxface - External interfaces for ACPI events * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index e528fe56b755..1a15b0087379 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -3,7 +3,7 @@ * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 04a40d563dd6..2c39ff2a7406 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -3,7 +3,7 @@ * * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 47265b073e6f..da97fd0c6b51 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -4,7 +4,7 @@ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index c7af07566b7b..43711412722f 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -3,7 +3,7 @@ * * Module Name: exconcat - Concatenate-type AML operators * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 46a8baf28bd0..68efd704e2dc 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -3,7 +3,7 @@ * * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index ca2966bacb50..50c7aad2e86d 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -3,7 +3,7 @@ * * Module Name: exconvrt - Object conversion routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index f376fc00064e..a17482428b46 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -3,7 +3,7 @@ * * Module Name: excreate - Named object creation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index b1aeec8cac55..a5223dcaee70 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -3,7 +3,7 @@ * * Module Name: exdebug - Support for stores to the AML Debug Object * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index a9bc938a3b55..47a4d9a40d6b 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -3,7 +3,7 @@ * * Module Name: exdump - Interpreter debug output routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index d3d2dbfba680..e85eb31e5075 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -3,7 +3,7 @@ * * Module Name: exfield - AML execution - field_unit read/write * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ @@ -96,7 +96,8 @@ acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length) * RETURN: Status * * DESCRIPTION: Read from a named field. Returns either an Integer or a - * Buffer, depending on the size of the field. + * Buffer, depending on the size of the field and whether if a + * field is created by the create_field() operator. * ******************************************************************************/ @@ -154,12 +155,17 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, * the use of arithmetic operators on the returned value if the * field size is equal or smaller than an Integer. * + * However, all buffer fields created by create_field operator needs to + * remain as a buffer to match other AML interpreter implementations. + * * Note: Field.length is in bits. */ buffer_length = (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length); - if (buffer_length > acpi_gbl_integer_byte_width) { + if (buffer_length > acpi_gbl_integer_byte_width || + (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD && + obj_desc->buffer_field.is_create_field)) { /* Field is too large for an Integer, create a Buffer instead */ diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 95a0dcb4f7b9..ade35ff1c7ba 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -3,7 +3,7 @@ * * Module Name: exfldio - Aml Field I/O * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 60e854965af9..717e3998fd77 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -3,7 +3,7 @@ * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index 775cd62af5b3..9ff247cba571 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -3,7 +3,7 @@ * * Module Name: exmutex - ASL Mutex Acquire/Release functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index 6b76be5212a4..74f8b0d0452b 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -3,7 +3,7 @@ * * Module Name: exnames - interpreter/scanner name load/execute * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 06e35ea09823..a46d685a3ffc 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -3,7 +3,7 @@ * * Module Name: exoparg1 - AML execution - opcodes with 1 argument * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 5e4a31a11df4..03241d18ac1d 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -3,7 +3,7 @@ * * Module Name: exoparg2 - AML execution - opcodes with 2 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index a4ebce417930..c8d0d75fc450 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -3,7 +3,7 @@ * * Module Name: exoparg3 - AML execution - opcodes with 3 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 31385a0b2dab..55d0fa056fe7 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -3,7 +3,7 @@ * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 728d752f7adc..a4e306690a21 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -3,7 +3,7 @@ * * Module Name: exprep - ACPI AML field prep utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index c08521194b29..d15a66de26c0 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -3,7 +3,7 @@ * * Module Name: exregion - ACPI default op_region (address space) handlers * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index b223d01e6bf8..3e4018678c09 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -3,7 +3,7 @@ * * Module Name: exresnte - AML Interpreter object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 36da5c0ef69c..912a078c60a4 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -3,7 +3,7 @@ * * Module Name: exresolv - AML Interpreter object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index bdfe4d33b483..4d1b22971d58 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -3,7 +3,7 @@ * * Module Name: exresop - AML Interpreter operand/object resolution * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c index c5aa4b0deb70..760bc7cef55a 100644 --- a/drivers/acpi/acpica/exserial.c +++ b/drivers/acpi/acpica/exserial.c @@ -3,7 +3,7 @@ * * Module Name: exserial - field_unit support for serial address spaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 7f3c3571c292..3adc0a29d890 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -3,7 +3,7 @@ * * Module Name: exstore - AML Interpreter object store support * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 4e43c8277f07..8c34f4e2ab8f 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -4,7 +4,7 @@ * Module Name: exstoren - AML Interpreter object store support, * Store to Node (namespace object) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index dc9e2b1c1ad9..dc66696080a5 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -3,7 +3,7 @@ * * Module Name: exstorob - AML object store support, store to object * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index a538f7799b78..f329b01672bb 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -3,7 +3,7 @@ * * Module Name: exsystem - Interface to OS services * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index db7f93ca539f..832a47885b99 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -3,7 +3,7 @@ * * Module Name: extrace - Support for interpreter execution tracing * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 75380be1c2ef..8fefa6feac2f 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -3,7 +3,7 @@ * * Module Name: exutils - interpreter/scanner utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 926f7e080f22..9b9aac27ff7e 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -3,7 +3,7 @@ * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index dee3affaca49..d9be5d0545d4 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -4,7 +4,7 @@ * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the * extended FADT-V5 sleep registers. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 565bd3f29f31..1b4252bdcd0b 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -3,7 +3,7 @@ * * Module Name: hwgpe - Low level GPE enable/disable/clear functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index b62db8ec446f..243a25add28f 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -4,7 +4,7 @@ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the * original/legacy sleep/PM registers. * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 2fb9f75d71c5..07473ddfa9a9 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -3,7 +3,7 @@ * * Name: hwtimer.c - ACPI Power Management Timer Interface * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index cd576153257c..4d94861e6093 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -3,7 +3,7 @@ * * Module Name: hwvalid - I/O request validation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index c4fd97104024..134dbfadcd15 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -3,7 +3,7 @@ * * Module Name: hwxface - Public ACPICA hardware interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 2919746c9041..a4b66f4b2714 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -3,7 +3,7 @@ * * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index 0e97ed38973f..d5e8405e9d8f 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c @@ -3,7 +3,7 @@ * * Module Name: nsarguments - Validation of args for ACPI predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index c86d0770ed6e..c86c82939ebb 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -4,7 +4,7 @@ * Module Name: nsconvert - Object conversions for objects returned by * predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 9ad340f644a1..994f0b556c60 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 73e5c83c8c9f..b691fe20e384 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 61e9dfc9fe8c..e16f6a0c2c3f 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -3,7 +3,7 @@ * * Module Name: nsinit - namespace initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index d7c4d6e8e21e..9ba17891edb6 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -3,7 +3,7 @@ * * Module Name: nsload - namespace loading/expanding/contracting procedures * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index f16cf5e4742c..7e74a765e785 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -3,7 +3,7 @@ * * Module Name: nsparse - namespace interface to AML parser * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 2f9d93122d0c..0cea9c363ace 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -3,7 +3,7 @@ * * Module Name: nspredef - Validation of ACPI predefined methods and objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 9a80e3b23496..237b3ddeb075 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -3,7 +3,7 @@ * * Module Name: nsprepkg - Validation of package objects for predefined names * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index be86fea8e4d4..90db2d85e7f5 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -3,7 +3,7 @@ * * Module Name: nsrepair - Repair for objects returned by predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 663d85e0adba..125143c41bb8 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -4,7 +4,7 @@ * Module Name: nsrepair2 - Repair for objects returned by specific * predefined methods * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index b8d007c84d32..e66abdab8f31 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -4,7 +4,7 @@ * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index ceea6af79d12..b7f3e8603ad8 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -3,7 +3,7 @@ * * Module Name: nswalk - Functions for walking the ACPI namespace * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 161e60ddfb69..984129dcaa0c 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -4,7 +4,7 @@ * Module Name: nsxfname - Public interfaces to the ACPI subsystem * ACPI Namespace oriented interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index e62c7897fdf1..3b40db4ad9f3 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -3,7 +3,7 @@ * * Module Name: psargs - Parse AML opcode arguments * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 207805047bc4..3cf0687b9915 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -3,7 +3,7 @@ * * Module Name: psloop - Main AML parse loop * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index ded2779fc8ea..2480c26c5171 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -3,7 +3,7 @@ * * Module Name: psobject - Support for parse objects * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 43775c5ce17c..28af49263ebf 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -3,7 +3,7 @@ * * Module Name: psopcode - Parser/Interpreter opcode information table * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index 15e7563829f1..ab9327f6a63c 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -3,7 +3,7 @@ * * Module Name: psopinfo - AML opcode information functions and dispatch tables * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 9b386530ffbe..c780046bf294 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -3,7 +3,7 @@ * * Module Name: psparse - Parser top level AML parse routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index f153ca804740..fceb311995e9 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c @@ -3,7 +3,7 @@ * * Module Name: psscope - Parser scope stack management routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index 22d8a2becdd0..c8aef0694864 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -3,7 +3,7 @@ * * Module Name: pstree - Parser op tree manipulation/traversal/search * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 2512f584fa3c..00efae2f95ba 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -3,7 +3,7 @@ * * Module Name: psutils - Parser miscellaneous utilities (Parser only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index cf91841297c2..0fe3adf6b0e5 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -3,7 +3,7 @@ * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index ee2ee2c858f2..1bbfc8def388 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -3,7 +3,7 @@ * * Module Name: psxface - Parser external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 2cf36451e46f..523b1e9b98d4 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -3,7 +3,7 @@ * * Module Name: tbdata - Table manager data structure functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 0041bfba9abc..907edc5edba7 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -3,7 +3,7 @@ * * Module Name: tbfadt - FADT table utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index b2abb40023a6..56d81e490a5c 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -3,7 +3,7 @@ * * Module Name: tbfind - find table * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index ef1ffd36ab3f..0bb15add2245 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -3,7 +3,7 @@ * * Module Name: tbinstal - ACPI table installation and removal * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index 4764f849cb78..0b3494ad9a70 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -3,7 +3,7 @@ * * Module Name: tbprint - Table output utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index c5f0b8ec70cc..dfe1ac3ae34a 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -3,7 +3,7 @@ * * Module Name: tbutils - ACPI Table utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 1640685bf4ae..f8403d480318 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -3,7 +3,7 @@ * * Module Name: tbxface - ACPI table-oriented external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 0782acf85722..bcba993d4dac 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -3,7 +3,7 @@ * * Module Name: tbxfload - Table load/unload external interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index e2859d09ca2e..0edc6ef5d46d 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -3,7 +3,7 @@ * * Module Name: tbxfroot - Find the root ACPI table (RSDT) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index bb260376bd59..99fa48722cf6 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -3,7 +3,7 @@ * * Module Name: utaddress - op_region address range check * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index d64da4d9e8d0..303ab51b4fcf 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -3,7 +3,7 @@ * * Module Name: utalloc - local memory allocation routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c index f6cd7d4f698b..d78656d960e8 100644 --- a/drivers/acpi/acpica/utascii.c +++ b/drivers/acpi/acpica/utascii.c @@ -3,7 +3,7 @@ * * Module Name: utascii - Utility ascii functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index db897af1de05..f2ec427f4e29 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c @@ -3,7 +3,7 @@ * * Module Name: utbuffer - Buffer dump routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 8533fce7fa93..1b03a2747401 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c @@ -3,7 +3,7 @@ * * Module Name: utcache - local cache allocation routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index 1fb8327f3c3b..41bdd0278dd8 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -3,7 +3,7 @@ * * Module Name: utcopy - Internal to external object translation utilities * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index 5b169b5f0f1a..0c8cb0612414 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -3,7 +3,7 @@ * * Module Name: utdebug - Debug print/trace routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 65beaa237669..befdd13b403b 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -3,7 +3,7 @@ * * Module Name: utdecode - Utility decoding routines (value-to-string) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 558a9f3b0678..8180d1a458f5 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -3,7 +3,7 @@ * * Module Name: uteval - Object evaluation * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index b0622ec4bb85..e6dcbdc3fc6e 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -3,7 +3,7 @@ * * Module Name: utglobal - Global variables for the ACPI subsystem * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index b6da135d5f41..0e02f12513dc 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -3,7 +3,7 @@ * * Module Name: uthex -- Hex/ASCII support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 30198c828ab6..3bb06935a2ad 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -3,7 +3,7 @@ * * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 6f33e7c72327..fdbc397c038d 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -3,7 +3,7 @@ * * Module Name: utinit - Common ACPI subsystem initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index 8b4ff11d617a..46be549539e7 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c @@ -3,7 +3,7 @@ * * Module Name: utlock - Reader/Writer lock interfaces * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index eee97a902696..3e60bdac2200 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -3,7 +3,7 @@ * * Module Name: utobject - ACPI object create/delete/size/cache routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index ad2b218039d0..0a01c08dad8a 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -3,7 +3,7 @@ * * Module Name: utosi - Support for the _OSI predefined control method * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 1b0f68f5ed8c..05fe3470fb93 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -3,7 +3,7 @@ * * Module Name: utpredef - support functions for predefined names * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index 5839f2fa7400..a874dac7db5c 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -3,7 +3,7 @@ * * Module Name: utprint - Formatted printing routines * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 14de4d15e618..d366be431a84 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -3,7 +3,7 @@ * * Module Name: uttrack - Memory allocation tracking routines (debug only) * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c index 0a7cf8007643..b8039954b0d1 100644 --- a/drivers/acpi/acpica/utuuid.c +++ b/drivers/acpi/acpica/utuuid.c @@ -3,7 +3,7 @@ * * Module Name: utuuid -- UUID support functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index f497c4b30e65..ca7c9f0144ef 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -3,7 +3,7 @@ * * Module Name: utxface - External interfaces, miscellaneous utility functions * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index cf769e94fe0f..653e3bb20036 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -3,7 +3,7 @@ * * Module Name: utxfinit - External interfaces for ACPICA initialization * - * Copyright (C) 2000 - 2019, Intel Corp. + * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 8906c80175e6..103acbbfcf9a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1180,7 +1180,7 @@ static int ghes_probe(struct platform_device *ghes_dev) switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: - timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); + timer_setup(&ghes->timer, ghes_poll_func, 0); ghes_add_timer(ghes); break; case ACPI_HEST_NOTIFY_EXTERNAL: diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 33f71983e001..6078064684c6 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -298,6 +298,59 @@ out: return status; } +struct iort_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool apply_id_count_workaround; + +static struct iort_workaround_oem_info wa_info[] __initdata = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0, + } +}; + +static void __init +iort_check_id_count_workaround(struct acpi_table_header *tbl) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + wa_info[i].oem_revision == tbl->oem_revision) { + apply_id_count_workaround = true; + pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n"); + break; + } + } +} + +static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map) +{ + u32 map_max = map->input_base + map->id_count; + + /* + * The IORT specification revision D (Section 3, table 4, page 9) says + * Number of IDs = The number of IDs in the range minus one, but the + * IORT code ignored the "minus one", and some firmware did that too, + * so apply a workaround here to keep compatible with both the spec + * compliant and non-spec compliant firmwares. + */ + if (apply_id_count_workaround) + map_max--; + + return map_max; +} + static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) { @@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, return -ENXIO; } - if (rid_in < map->input_base || - (rid_in >= map->input_base + map->id_count)) + if (rid_in < map->input_base || rid_in > iort_get_map_max(map)) return -ENXIO; *rid_out = map->output_base + (rid_in - map->input_base); @@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void) return; } + iort_check_id_count_workaround(iort_table); iort_init_platform_devices(); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 8f0e0c8d8c3d..15cc7d5a6185 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -38,6 +38,8 @@ #define PREFIX "ACPI: " #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF +#define ACPI_BATTERY_CAPACITY_VALID(capacity) \ + ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN) #define ACPI_BATTERY_DEVICE_NAME "Battery" @@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) static bool acpi_battery_is_degraded(struct acpi_battery *battery) { - return battery->full_charge_capacity && battery->design_capacity && + return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) && battery->full_charge_capacity < battery->design_capacity; } @@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { - int ret = 0; + int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0; struct acpi_battery *battery = to_acpi_battery(psy); if (acpi_battery_present(battery)) { @@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: - if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) ret = -ENODEV; else val->intval = battery->design_capacity * 1000; break; case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL: - if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) ret = -ENODEV; else val->intval = battery->full_charge_capacity * 1000; @@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = battery->capacity_now * 1000; break; case POWER_SUPPLY_PROP_CAPACITY: - if (battery->capacity_now && battery->full_charge_capacity) - val->intval = battery->capacity_now * 100/ - battery->full_charge_capacity; + if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity)) + full_capacity = battery->full_charge_capacity; + else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_capacity = battery->design_capacity; + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN || + full_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + ret = -ENODEV; else - val->intval = 0; + val->intval = battery->capacity_now * 100/ + full_capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: if (battery->state & ACPI_BATTERY_STATE_CRITICAL) @@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; +static enum power_supply_property charge_battery_full_cap_broken_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -794,20 +817,34 @@ static void __exit battery_hook_exit(void) static int sysfs_add_battery(struct acpi_battery *battery) { struct power_supply_config psy_cfg = { .drv_data = battery, }; + bool full_cap_broken = false; + + if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && + !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity)) + full_cap_broken = true; if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) { - battery->bat_desc.properties = charge_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(charge_battery_props); - } else if (battery->full_charge_capacity == 0) { - battery->bat_desc.properties = - energy_battery_full_cap_broken_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_full_cap_broken_props); + if (full_cap_broken) { + battery->bat_desc.properties = + charge_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = charge_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(charge_battery_props); + } } else { - battery->bat_desc.properties = energy_battery_props; - battery->bat_desc.num_properties = - ARRAY_SIZE(energy_battery_props); + if (full_cap_broken) { + battery->bat_desc.properties = + energy_battery_full_cap_broken_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_full_cap_broken_props); + } else { + battery->bat_desc.properties = energy_battery_props; + battery->bat_desc.num_properties = + ARRAY_SIZE(energy_battery_props); + } } battery->bat_desc.name = acpi_device_bid(battery->device); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index b758b45737f5..f6925f16c4a2 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -122,6 +122,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = { }, .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, }, + { + /* + * Razer Blade Stealth 13 late 2019, notification of the LID device + * only happens on close, not on open and _LID always returns closed. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN, + }, {} }; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 08bb9f2f2d23..b64c62bfcea5 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1314,9 +1314,20 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off) */ int acpi_dev_pm_attach(struct device *dev, bool power_on) { + /* + * Skip devices whose ACPI companions match the device IDs below, + * because they require special power management handling incompatible + * with the generic ACPI PM domain. + */ + static const struct acpi_device_id special_pm_ids[] = { + {"PNP0C0B", }, /* Generic ACPI fan */ + {"INT1044", }, /* Fan for Tiger Lake generation */ + {"INT3404", }, /* Fan */ + {} + }; struct acpi_device *adev = ACPI_COMPANION(dev); - if (!adev) + if (!adev || !acpi_match_device_ids(adev, special_pm_ids)) return 0; /* diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index eb58fc475a03..387f27ef3368 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -97,6 +97,7 @@ static int dptf_power_remove(struct platform_device *pdev) } static const struct acpi_device_id int3407_device_ids[] = { + {"INT1047", 0}, {"INT3407", 0}, {"", 0}, }; diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 5c7a90186e3c..1ec7b6900662 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -13,6 +13,10 @@ #define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { + {"INT1040"}, + {"INT1043"}, + {"INT1044"}, + {"INT1047"}, {"INT3400"}, {"INT3401", INT3401_DEVICE}, {"INT3402"}, diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d05be13c1022..08bc9751fe66 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1053,28 +1053,20 @@ void acpi_ec_unblock_transactions(void) Event Management -------------------------------------------------------------------------- */ static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - -static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 816b0803f7fb..aaf4e8f348cf 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -25,6 +25,7 @@ static int acpi_fan_remove(struct platform_device *pdev); static const struct acpi_device_id fan_device_ids[] = { {"PNP0C0B", 0}, + {"INT1044", 0}, {"INT3404", 0}, {"", 0}, }; @@ -44,12 +45,16 @@ static const struct dev_pm_ops acpi_fan_pm = { #define FAN_PM_OPS_PTR NULL #endif +#define ACPI_FPS_NAME_LEN 20 + struct acpi_fan_fps { u64 control; u64 trip_point; u64 speed; u64 noise_level; u64 power; + char name[ACPI_FPS_NAME_LEN]; + struct device_attribute dev_attr; }; struct acpi_fan_fif { @@ -265,6 +270,39 @@ static int acpi_fan_speed_cmp(const void *a, const void *b) return fps1->speed - fps2->speed; } +static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr); + int count; + + if (fps->control == 0xFFFFFFFF || fps->control > 100) + count = snprintf(buf, PAGE_SIZE, "not-defined:"); + else + count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control); + + if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point); + + if (fps->speed == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed); + + if (fps->noise_level == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined:"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100); + + if (fps->power == 0xFFFFFFFF) + count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n"); + else + count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power); + + return count; +} + static int acpi_fan_get_fps(struct acpi_device *device) { struct acpi_fan *fan = acpi_driver_data(device); @@ -295,12 +333,13 @@ static int acpi_fan_get_fps(struct acpi_device *device) } for (i = 0; i < fan->fps_count; i++) { struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; - struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] }; + struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name), + &fan->fps[i] }; status = acpi_extract_package(&obj->package.elements[i + 1], &format, &fps); if (ACPI_FAILURE(status)) { dev_err(&device->dev, "Invalid _FPS element\n"); - break; + goto err; } } @@ -308,6 +347,24 @@ static int acpi_fan_get_fps(struct acpi_device *device) sort(fan->fps, fan->fps_count, sizeof(*fan->fps), acpi_fan_speed_cmp, NULL); + for (i = 0; i < fan->fps_count; ++i) { + struct acpi_fan_fps *fps = &fan->fps[i]; + + snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i); + fps->dev_attr.show = show_state; + fps->dev_attr.store = NULL; + fps->dev_attr.attr.name = fps->name; + fps->dev_attr.attr.mode = 0444; + status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr); + if (status) { + int j; + + for (j = 0; j < i; ++j) + sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr); + break; + } + } + err: kfree(obj); return status; @@ -330,14 +387,20 @@ static int acpi_fan_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fan); if (acpi_fan_is_acpi4(device)) { - if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device)) - goto end; + result = acpi_fan_get_fif(device); + if (result) + return result; + + result = acpi_fan_get_fps(device); + if (result) + return result; + fan->acpi4 = true; } else { result = acpi_device_update_power(device, NULL); if (result) { dev_err(&device->dev, "Failed to set initial power state\n"); - goto end; + goto err_end; } } @@ -350,7 +413,7 @@ static int acpi_fan_probe(struct platform_device *pdev) &fan_cooling_ops); if (IS_ERR(cdev)) { result = PTR_ERR(cdev); - goto end; + goto err_end; } dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id); @@ -365,10 +428,21 @@ static int acpi_fan_probe(struct platform_device *pdev) result = sysfs_create_link(&cdev->device.kobj, &pdev->dev.kobj, "device"); - if (result) + if (result) { dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); + goto err_end; + } + + return 0; + +err_end: + if (fan->acpi4) { + int i; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + } -end: return result; } @@ -376,6 +450,13 @@ static int acpi_fan_remove(struct platform_device *pdev) { struct acpi_fan *fan = platform_get_drvdata(pdev); + if (fan->acpi4) { + struct acpi_device *device = ACPI_COMPANION(&pdev->dev); + int i; + + for (i = 0; i < fan->fps_count; ++i) + sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr); + } sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); sysfs_remove_link(&fan->cdev->device.kobj, "device"); thermal_cooling_device_unregister(fan->cdev); diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index f31544d3656e..4ae93350b70d 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -98,11 +98,11 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * * Return: The cache structure and the level we terminated with. */ -static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, - int local_level, - struct acpi_subtable_header *res, - struct acpi_pptt_cache **found, - int level, int type) +static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, + unsigned int local_level, + struct acpi_subtable_header *res, + struct acpi_pptt_cache **found, + unsigned int level, int type) { struct acpi_pptt_cache *cache; @@ -119,7 +119,7 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, if (*found != NULL && cache != *found) pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); - pr_debug("Found cache @ level %d\n", level); + pr_debug("Found cache @ level %u\n", level); *found = cache; /* * continue looking at this node's resource list @@ -132,16 +132,17 @@ static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, return local_level; } -static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node, - int *starting_level, int level, - int type) +static struct acpi_pptt_cache * +acpi_find_cache_level(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + unsigned int *starting_level, unsigned int level, + int type) { struct acpi_subtable_header *res; - int number_of_levels = *starting_level; + unsigned int number_of_levels = *starting_level; int resource = 0; struct acpi_pptt_cache *ret = NULL; - int local_level; + unsigned int local_level; /* walk down from processor node */ while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) { @@ -321,12 +322,12 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta unsigned int level, struct acpi_pptt_processor **node) { - int total_levels = 0; + unsigned int total_levels = 0; struct acpi_pptt_cache *found = NULL; struct acpi_pptt_processor *cpu_node; u8 acpi_type = acpi_cache_type(type); - pr_debug("Looking for CPU %d's level %d cache type %d\n", + pr_debug("Looking for CPU %d's level %u cache type %d\n", acpi_cpu_id, level, acpi_type); cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2ae95df2e74f..dcc289e30166 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -299,164 +299,24 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr) static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) { - acpi_status status; - u64 count; - int current_count; - int i, ret = 0; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *cst; + int ret; if (nocst) return -ENODEV; - current_count = 0; - - status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return -ENODEV; - } - - cst = buffer.pointer; - - /* There must be at least 2 elements */ - if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - pr_err("not enough elements in _CST\n"); - ret = -EFAULT; - goto end; - } - - count = cst->package.elements[0].integer.value; + ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); + if (ret) + return ret; - /* Validate number of power states. */ - if (count < 1 || count != cst->package.count - 1) { - pr_err("count given by _CST is not valid\n"); - ret = -EFAULT; - goto end; - } + /* + * It is expected that there will be at least 2 states, C1 and + * something else (C2 or C3), so fail if that is not the case. + */ + if (pr->power.count < 2) + return -EFAULT; - /* Tell driver that at least _CST is supported. */ pr->flags.has_cst = 1; - - for (i = 1; i <= count; i++) { - union acpi_object *element; - union acpi_object *obj; - struct acpi_power_register *reg; - struct acpi_processor_cx cx; - - memset(&cx, 0, sizeof(cx)); - - element = &(cst->package.elements[i]); - if (element->type != ACPI_TYPE_PACKAGE) - continue; - - if (element->package.count != 4) - continue; - - obj = &(element->package.elements[0]); - - if (obj->type != ACPI_TYPE_BUFFER) - continue; - - reg = (struct acpi_power_register *)obj->buffer.pointer; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && - (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) - continue; - - /* There should be an easy way to extract an integer... */ - obj = &(element->package.elements[1]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.type = obj->integer.value; - /* - * Some buggy BIOSes won't list C1 in _CST - - * Let acpi_processor_get_power_info_default() handle them later - */ - if (i == 1 && cx.type != ACPI_STATE_C1) - current_count++; - - cx.address = reg->address; - cx.index = current_count + 1; - - cx.entry_method = ACPI_CSTATE_SYSTEMIO; - if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { - if (acpi_processor_ffh_cstate_probe - (pr->id, &cx, reg) == 0) { - cx.entry_method = ACPI_CSTATE_FFH; - } else if (cx.type == ACPI_STATE_C1) { - /* - * C1 is a special case where FIXED_HARDWARE - * can be handled in non-MWAIT way as well. - * In that case, save this _CST entry info. - * Otherwise, ignore this info and continue. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } else { - continue; - } - if (cx.type == ACPI_STATE_C1 && - (boot_option_idle_override == IDLE_NOMWAIT)) { - /* - * In most cases the C1 space_id obtained from - * _CST object is FIXED_HARDWARE access mode. - * But when the option of idle=halt is added, - * the entry_method type should be changed from - * CSTATE_FFH to CSTATE_HALT. - * When the option of idle=nomwait is added, - * the C1 entry_method type should be - * CSTATE_HALT. - */ - cx.entry_method = ACPI_CSTATE_HALT; - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); - } - } else { - snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", - cx.address); - } - - if (cx.type == ACPI_STATE_C1) { - cx.valid = 1; - } - - obj = &(element->package.elements[2]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.latency = obj->integer.value; - - obj = &(element->package.elements[3]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - current_count++; - memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); - - /* - * We support total ACPI_PROCESSOR_MAX_POWER - 1 - * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) - */ - if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { - pr_warn("Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - break; - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - current_count)); - - /* Validate number of power states discovered */ - if (current_count < 2) - ret = -EFAULT; - - end: - kfree(buffer.pointer); - - return ret; + return 0; } static void acpi_processor_power_verify_c3(struct acpi_processor *pr, @@ -909,7 +769,6 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) static inline void acpi_processor_cstate_first_run_checks(void) { - acpi_status status; static int first_run; if (first_run) @@ -921,13 +780,10 @@ static inline void acpi_processor_cstate_first_run_checks(void) max_cstate); first_run++; - if (acpi_gbl_FADT.cst_control && !nocst) { - status = acpi_os_write_port(acpi_gbl_FADT.smi_command, - acpi_gbl_FADT.cst_control, 8); - if (ACPI_FAILURE(status)) - ACPI_EXCEPTION((AE_INFO, status, - "Notifying BIOS of _CST ability failed")); - } + if (nocst) + return; + + acpi_processor_claim_cst_control(); } #else diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 6747a279621b..439880629839 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -61,8 +61,11 @@ static struct notifier_block tts_notifier = { static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP + unsigned long acpi_wakeup_address; + /* do we have a wakeup address for S2 and S3? */ if (acpi_state == ACPI_STATE_S3) { + acpi_wakeup_address = acpi_get_wakeup_address(); if (!acpi_wakeup_address) return -EFAULT; acpi_set_waking_vector(acpi_wakeup_address); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 31014c7d3793..419f814d596a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -303,6 +303,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { + .callback = video_detect_force_native, + .ident = "Lenovo E41-25", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FS"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Lenovo E41-45", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82BK"), + }, + }, + { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, .ident = "Apple MacBook Pro 12,1", @@ -336,6 +352,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + + /* + * Desktops which falsely report a backlight and which our heuristics + * for this do not catch. + */ { .callback = video_detect_force_none, .ident = "Dell OptiPlex 9020M", @@ -344,6 +365,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"), }, }, + { + .callback = video_detect_force_none, + .ident = "MSI MS-7721", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MSI"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"), + }, + }, { }, }; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index e9bc9fcc7ea5..b2dad43dbf82 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3310,7 +3310,7 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t parent_offset; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); - size_t num_valid = (buffer_offset - off_start_offset) * + size_t num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); struct binder_buffer_object *parent = binder_validate_ptr(target_proc, t->buffer, @@ -3384,7 +3384,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->user_data + sg_buf_offset; sg_buf_offset += ALIGN(bp->length, sizeof(u64)); - num_valid = (buffer_offset - off_start_offset) * + num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); ret = binder_fixup_parent(t, thread, bp, off_start_offset, diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 46dc54d18f0b..2a04e8abd397 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -218,7 +218,6 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) void *cmd_tbl; u32 opts; const u32 cmd_fis_len = 5; /* five dwords */ - unsigned int n_elem; /* * Fill in command table information. First, the header, @@ -232,9 +231,8 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); } - n_elem = 0; if (qc->flags & ATA_QCFLAG_DMAMAP) - n_elem = acard_ahci_fill_sg(qc, cmd_tbl); + acard_ahci_fill_sg(qc, cmd_tbl); /* * Fill in command slot information. diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f41744b9b38a..6853dbb4131d 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -73,11 +73,11 @@ enum brcm_ahci_version { BRCM_SATA_BCM7425 = 1, BRCM_SATA_BCM7445, BRCM_SATA_NSP, + BRCM_SATA_BCM7216, }; enum brcm_ahci_quirks { - BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), - BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), + BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0), }; struct brcm_ahci_priv { @@ -213,19 +213,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv) brcm_sata_phy_disable(priv, i); } -static u32 brcm_ahci_get_portmask(struct platform_device *pdev, +static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv, struct brcm_ahci_priv *priv) { - void __iomem *ahci; - struct resource *res; u32 impl; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"); - ahci = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(ahci)) - return 0; - - impl = readl(ahci + HOST_PORTS_IMPL); + impl = readl(hpriv->mmio + HOST_PORTS_IMPL); if (fls(impl) > SATA_TOP_MAX_PHYS) dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n", @@ -233,9 +226,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev, else if (!impl) dev_info(priv->dev, "no ports found\n"); - devm_iounmap(&pdev->dev, ahci); - devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); - return impl; } @@ -285,6 +275,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev, /* Perform the SATA PHY reset sequence */ brcm_sata_phy_disable(priv, ap->port_no); + /* Reset the SATA clock */ + ahci_platform_disable_clks(hpriv); + msleep(10); + + ahci_platform_enable_clks(hpriv); + msleep(10); + /* Bring the PHY back on */ brcm_sata_phy_enable(priv, ap->port_no); @@ -341,7 +338,6 @@ static const struct ata_port_info ahci_brcm_port_info = { .port_ops = &ahci_brcm_platform_ops, }; -#ifdef CONFIG_PM_SLEEP static int brcm_ahci_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); @@ -349,23 +345,70 @@ static int brcm_ahci_suspend(struct device *dev) struct brcm_ahci_priv *priv = hpriv->plat_data; int ret; - ret = ahci_platform_suspend(dev); brcm_sata_phys_disable(priv); + + if (IS_ENABLED(CONFIG_PM_SLEEP)) + ret = ahci_platform_suspend(dev); + else + ret = 0; + + if (priv->version != BRCM_SATA_BCM7216) + reset_control_assert(priv->rcdev); + return ret; } -static int brcm_ahci_resume(struct device *dev) +static int __maybe_unused brcm_ahci_resume(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; struct brcm_ahci_priv *priv = hpriv->plat_data; + int ret = 0; + + if (priv->version == BRCM_SATA_BCM7216) + ret = reset_control_reset(priv->rcdev); + else + ret = reset_control_deassert(priv->rcdev); + if (ret) + return ret; + + /* Make sure clocks are turned on before re-configuration */ + ret = ahci_platform_enable_clks(hpriv); + if (ret) + return ret; brcm_sata_init(priv); brcm_sata_phys_enable(priv); brcm_sata_alpm_init(hpriv); - return ahci_platform_resume(dev); + + /* Since we had to enable clocks earlier on, we cannot use + * ahci_platform_resume() as-is since a second call to + * ahci_platform_enable_resources() would bump up the resources + * (regulators, clocks, PHYs) count artificially so we copy the part + * after ahci_platform_enable_resources(). + */ + ret = ahci_platform_enable_phys(hpriv); + if (ret) + goto out_disable_phys; + + ret = ahci_platform_resume_host(dev); + if (ret) + goto out_disable_platform_phys; + + /* We resumed so update PM runtime state */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); + ahci_platform_disable_clks(hpriv); + return ret; } -#endif static struct scsi_host_template ahci_platform_sht = { AHCI_SHT(DRV_NAME), @@ -376,6 +419,7 @@ static const struct of_device_id ahci_of_match[] = { {.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445}, {.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445}, {.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP}, + {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216}, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); @@ -384,6 +428,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) { const struct of_device_id *of_id; struct device *dev = &pdev->dev; + const char *reset_name = NULL; struct brcm_ahci_priv *priv; struct ahci_host_priv *hpriv; struct resource *res; @@ -405,49 +450,86 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); - /* Reset is optional depending on platform */ - priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci"); - if (!IS_ERR_OR_NULL(priv->rcdev)) - reset_control_deassert(priv->rcdev); + /* Reset is optional depending on platform and named differently */ + if (priv->version == BRCM_SATA_BCM7216) + reset_name = "rescal"; + else + reset_name = "ahci"; + + priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name); + if (IS_ERR(priv->rcdev)) + return PTR_ERR(priv->rcdev); + + hpriv = ahci_platform_get_resources(pdev, 0); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); - if ((priv->version == BRCM_SATA_BCM7425) || - (priv->version == BRCM_SATA_NSP)) { - priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; + hpriv->plat_data = priv; + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO; + + switch (priv->version) { + case BRCM_SATA_BCM7425: + hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; + /* fall through */ + case BRCM_SATA_NSP: + hpriv->flags |= AHCI_HFLAG_NO_NCQ; priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; + break; + default: + break; } + if (priv->version == BRCM_SATA_BCM7216) + ret = reset_control_reset(priv->rcdev); + else + ret = reset_control_deassert(priv->rcdev); + if (ret) + return ret; + + ret = ahci_platform_enable_clks(hpriv); + if (ret) + goto out_reset; + + /* Must be first so as to configure endianness including that + * of the standard AHCI register space. + */ brcm_sata_init(priv); - priv->port_mask = brcm_ahci_get_portmask(pdev, priv); - if (!priv->port_mask) - return -ENODEV; + /* Initializes priv->port_mask which is used below */ + priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); + if (!priv->port_mask) { + ret = -ENODEV; + goto out_disable_clks; + } + /* Must be done before ahci_platform_enable_phys() */ brcm_sata_phys_enable(priv); - hpriv = ahci_platform_get_resources(pdev, 0); - if (IS_ERR(hpriv)) - return PTR_ERR(hpriv); - hpriv->plat_data = priv; - hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; - brcm_sata_alpm_init(hpriv); - ret = ahci_platform_enable_resources(hpriv); + ret = ahci_platform_enable_phys(hpriv); if (ret) - return ret; - - if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ) - hpriv->flags |= AHCI_HFLAG_NO_NCQ; - hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO; + goto out_disable_phys; ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info, &ahci_platform_sht); if (ret) - return ret; + goto out_disable_platform_phys; dev_info(dev, "Broadcom AHCI SATA3 registered\n"); return 0; + +out_disable_platform_phys: + ahci_platform_disable_phys(hpriv); +out_disable_phys: + brcm_sata_phys_disable(priv); +out_disable_clks: + ahci_platform_disable_clks(hpriv); +out_reset: + if (priv->version != BRCM_SATA_BCM7216) + reset_control_assert(priv->rcdev); + return ret; } static int brcm_ahci_remove(struct platform_device *pdev) @@ -457,20 +539,35 @@ static int brcm_ahci_remove(struct platform_device *pdev) struct brcm_ahci_priv *priv = hpriv->plat_data; int ret; + brcm_sata_phys_disable(priv); + ret = ata_platform_remove_one(pdev); if (ret) return ret; - brcm_sata_phys_disable(priv); - return 0; } +static void brcm_ahci_shutdown(struct platform_device *pdev) +{ + int ret; + + /* All resources releasing happens via devres, but our device, unlike a + * proper remove is not disappearing, therefore using + * brcm_ahci_suspend() here which does explicit power management is + * appropriate. + */ + ret = brcm_ahci_suspend(&pdev->dev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume); static struct platform_driver brcm_ahci_driver = { .probe = brcm_ahci_probe, .remove = brcm_ahci_remove, + .shutdown = brcm_ahci_shutdown, .driver = { .name = DRV_NAME, .of_match_table = ahci_of_match, diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 8befce036af8..129556fcf6be 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops); * RETURNS: * 0 on success otherwise a negative error code */ -static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) +int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) { int rc, i; @@ -74,6 +74,7 @@ disable_phys: } return rc; } +EXPORT_SYMBOL_GPL(ahci_platform_enable_phys); /** * ahci_platform_disable_phys - Disable PHYs @@ -81,7 +82,7 @@ disable_phys: * * This function disables all PHYs found in hpriv->phys. */ -static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) +void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) { int i; @@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) phy_exit(hpriv->phys[i]); } } +EXPORT_SYMBOL_GPL(ahci_platform_disable_phys); /** * ahci_platform_enable_clks - Enable platform clocks diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e9017c570bc5..6f4ab5c5b52d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5329,6 +5329,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc) } /** + * ata_qc_get_active - get bitmask of active qcs + * @ap: port in question + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Bitmask of active qcs + */ +u64 ata_qc_get_active(struct ata_port *ap) +{ + u64 qc_active = ap->qc_active; + + /* ATA_TAG_INTERNAL is sent to hw as tag 0 */ + if (qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (1 << 0); + qc_active &= ~(1ULL << ATA_TAG_INTERNAL); + } + + return qc_active; +} +EXPORT_SYMBOL_GPL(ata_qc_get_active); + +/** * ata_qc_complete_multiple - Complete multiple qcs successfully * @ap: port in question * @qc_active: new qc_active mask diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 135173c8d138..391dff0f25a2 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -824,7 +824,7 @@ static int arasan_cf_probe(struct platform_device *pdev) quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA; acdev->pbase = res->start; - acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, + acdev->vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!acdev->vbase) { dev_warn(&pdev->dev, "ioremap fail\n"); diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 1bfd0154dad5..e47a28271f5b 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -979,7 +979,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv) priv->aapl_bus_id = bidp ? *bidp : 0; /* Fixup missing Apple bus ID in case of media-bay */ - if (priv->mediabay && bidp == 0) + if (priv->mediabay && !bidp) priv->aapl_bus_id = 1; } diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index d3d851b014a3..bd87476ab481 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -891,7 +891,7 @@ static int octeon_cf_probe(struct platform_device *pdev) of_node_put(dma_node); return -EINVAL; } - cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start, + cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { of_node_put(dma_node); @@ -909,7 +909,7 @@ static int octeon_cf_probe(struct platform_device *pdev) if (!res_cs1) return -EINVAL; - cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, + cs1 = devm_ioremap(&pdev->dev, res_cs1->start, resource_size(res_cs1)); if (!cs1) return rv; @@ -925,7 +925,7 @@ static int octeon_cf_probe(struct platform_device *pdev) if (!res_cs0) return -EINVAL; - cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, + cs0 = devm_ioremap(&pdev->dev, res_cs0->start, resource_size(res_cs0)); if (!cs0) return rv; diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index deae466395de..479c4b29b856 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -140,7 +140,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) info->gpio_line = gpiod; info->irq = irq; - info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, + info->iobase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!info->iobase) return -ENOMEM; diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 9239615d8a04..d55ee244d693 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) i, ioread32(hcr_base + CC), ioread32(hcr_base + CA)); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); return; } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 277f11909fc1..d7228f8e9297 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp } if (work_done) { - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index f3e62f5528bd..eb9dc14e5147 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) check_commands = 0; check_commands &= ~(1 << pos); } - ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); } } diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b23d1e4bad33..17d47ad03ab7 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -31,12 +31,6 @@ #include "suni.h" #include "eni.h" -#if !defined(__i386__) && !defined(__x86_64__) -#ifndef ioremap_nocache -#define ioremap_nocache(X,Y) ioremap(X,Y) -#endif -#endif - /* * TODO: * @@ -374,7 +368,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, here = (eni_vcc->descr+skip) & (eni_vcc->words-1); dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1); if (!eff) size += skip; @@ -447,7 +441,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb, if (size != eff) { dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK; - j++; + dma[j++] = 0; } if (!j || j > 2*RX_DMA_BUF) { printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n"); @@ -1725,7 +1719,7 @@ static int eni_do_init(struct atm_dev *dev) } printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,", dev->number,pci_dev->revision,real_base,eni_dev->irq); - if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) { + if (!(base = ioremap(real_base,MAP_MAX_SIZE))) { printk("\n"); printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page " "mapping\n",dev->number); diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index aad00d2b28f5..cc87004d5e2d 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc) } if (!to) { printk ("No more free channels for FS50..\n"); + kfree(vcc); return -EBUSY; } vcc->channo = dev->channo; @@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc) if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) || ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) { printk ("Channel is in use for FS155.\n"); + kfree(vcc); return -EBUSY; } } @@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc) tc, sizeof (struct fs_transmit_config)); if (!tc) { fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n"); + kfree(vcc); return -ENOMEM; } diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 30d0523014e0..6cdbf1531238 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -359,7 +359,7 @@ static int handle_remove(const char *nodename, struct device *dev) * If configured, or requested by the commandline, devtmpfs will be * auto-mounted after the kernel mounted the root filesystem. */ -int devtmpfs_mount(const char *mntdir) +int devtmpfs_mount(void) { int err; @@ -369,7 +369,7 @@ int devtmpfs_mount(const char *mntdir) if (!thread) return 0; - err = ksys_mount("devtmpfs", mntdir, "devtmpfs", MS_SILENT, NULL); + err = do_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL); if (err) printk(KERN_INFO "devtmpfs: error mounting %i\n", err); else @@ -394,7 +394,7 @@ static int devtmpfsd(void *p) *err = ksys_unshare(CLONE_NEWNS); if (*err) goto out; - *err = ksys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL); + *err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL); if (*err) goto out; ksys_chdir("/.."); /* will traverse into overmounted root */ diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile index 4a66888e7253..5fa7ce3745a0 100644 --- a/drivers/base/firmware_loader/builtin/Makefile +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -17,7 +17,7 @@ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits filechk_fwbin = \ echo "/* Generated by $(src)/Makefile */" ;\ echo " .section .rodata" ;\ - echo " .p2align $(ASM_ALIGN)" ;\ + echo " .p2align 4" ;\ echo "_fw_$(FWSTR)_bin:" ;\ echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ echo "_fw_end:" ;\ diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 7c532548b0a6..cf6b6b722e5c 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1325,10 +1325,14 @@ struct device *platform_find_device_by_driver(struct device *start, } EXPORT_SYMBOL_GPL(platform_find_device_by_driver); +void __weak __init early_platform_cleanup(void) { } + int __init platform_bus_init(void) { int error; + early_platform_cleanup(); + error = device_register(&platform_bus); if (error) { put_device(&platform_bus); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 48616f358854..16134a69bf6f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1006,8 +1006,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); return 0; + } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); @@ -1038,8 +1040,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) int retval; if (rpmflags & RPM_GET_PUT) { - if (!atomic_dec_and_test(&dev->power.usage_count)) + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); return 0; + } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); @@ -1101,6 +1105,7 @@ int pm_runtime_get_if_in_use(struct device *dev) retval = dev->power.disable_depth > 0 ? -EINVAL : dev->power.runtime_status == RPM_ACTIVE && atomic_inc_not_zero(&dev->power.usage_count); + trace_rpm_usage_rcuidle(dev, 0); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } @@ -1434,6 +1439,8 @@ void pm_runtime_allow(struct device *dev) dev->power.runtime_auto = true; if (atomic_dec_and_test(&dev->power.usage_count)) rpm_idle(dev, RPM_AUTO | RPM_ASYNC); + else + trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); out: spin_unlock_irq(&dev->power.lock); @@ -1501,6 +1508,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use) if (!old_use || old_delay >= 0) { atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); + } else { + trace_rpm_usage_rcuidle(dev, 0); } } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 70a9edb5f525..27f3e60608e5 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m, break; } + if (!next_ws) + print_wakeup_source_stats(m, &deleted_ws); + return next_ws; } diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index ac9b31c57967..008f8da69d97 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, return i2c_smbus_write_byte_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_byte = { +static const struct regmap_bus regmap_smbus_byte = { .reg_write = regmap_smbus_byte_reg_write, .reg_read = regmap_smbus_byte_reg_read, }; @@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg, return i2c_smbus_write_word_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word = { +static const struct regmap_bus regmap_smbus_word = { .reg_write = regmap_smbus_word_reg_write, .reg_read = regmap_smbus_word_reg_read, }; @@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, return i2c_smbus_write_word_swapped(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word_swapped = { +static const struct regmap_bus regmap_smbus_word_swapped = { .reg_write = regmap_smbus_word_write_swapped, .reg_read = regmap_smbus_word_read_swapped, }; @@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context, return -EIO; } -static struct regmap_bus regmap_i2c = { +static const struct regmap_bus regmap_i2c = { .write = regmap_i2c_write, .gather_write = regmap_i2c_gather_write, .read = regmap_i2c_read, @@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, return -EIO; } -static struct regmap_bus regmap_i2c_smbus_i2c_block = { +static const struct regmap_bus regmap_i2c_smbus_i2c_block = { .write = regmap_i2c_smbus_i2c_write, .read = regmap_i2c_smbus_i2c_read, .max_raw_read = I2C_SMBUS_BLOCK_MAX, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 19f57ccfbe1d..59f911e57719 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, WARN_ON(!map->bus); - /* Check for unwritable registers before we start */ - for (i = 0; i < val_len / map->format.val_bytes; i++) - if (!regmap_writeable(map, - reg + regmap_get_offset(map, i))) - return -EINVAL; + /* Check for unwritable or noinc registers in range + * before we start + */ + if (!regmap_writeable_noinc(map, reg)) { + for (i = 0; i < val_len / map->format.val_bytes; i++) { + unsigned int element = + reg + regmap_get_offset(map, i); + if (!regmap_writeable(map, element) || + regmap_writeable_noinc(map, element)) + return -EINVAL; + } + } if (!map->cache_bypass && map->format.parse_val) { unsigned int ival; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index d8d0dc0ca5ac..0b081dee1e95 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -108,10 +108,7 @@ static const void *property_get_pointer(const struct property_entry *prop) if (!prop->length) return NULL; - if (prop->is_array) - return prop->pointer; - - return &prop->value; + return prop->is_inline ? &prop->value : prop->pointer; } static const void *property_entry_find(const struct property_entry *props, @@ -201,92 +198,91 @@ static int property_entry_read_string_array(const struct property_entry *props, static void property_entry_free_data(const struct property_entry *p) { - const void *pointer = property_get_pointer(p); const char * const *src_str; size_t i, nval; - if (p->is_array) { - if (p->type == DEV_PROP_STRING && p->pointer) { - src_str = p->pointer; - nval = p->length / sizeof(const char *); - for (i = 0; i < nval; i++) - kfree(src_str[i]); - } - kfree(pointer); - } else if (p->type == DEV_PROP_STRING) { - kfree(p->value.str); + if (p->type == DEV_PROP_STRING) { + src_str = property_get_pointer(p); + nval = p->length / sizeof(*src_str); + for (i = 0; i < nval; i++) + kfree(src_str[i]); } + + if (!p->is_inline) + kfree(p->pointer); + kfree(p->name); } -static const char * const * -property_copy_string_array(const struct property_entry *src) +static bool property_copy_string_array(const char **dst_ptr, + const char * const *src_ptr, + size_t nval) { - const char **d; - const char * const *src_str = src->pointer; - size_t nval = src->length / sizeof(*d); int i; - d = kcalloc(nval, sizeof(*d), GFP_KERNEL); - if (!d) - return NULL; - for (i = 0; i < nval; i++) { - d[i] = kstrdup(src_str[i], GFP_KERNEL); - if (!d[i] && src_str[i]) { + dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL); + if (!dst_ptr[i] && src_ptr[i]) { while (--i >= 0) - kfree(d[i]); - kfree(d); - return NULL; + kfree(dst_ptr[i]); + return false; } } - return d; + return true; } static int property_entry_copy_data(struct property_entry *dst, const struct property_entry *src) { const void *pointer = property_get_pointer(src); - const void *new; - - if (src->is_array) { - if (!src->length) - return -ENODATA; - - if (src->type == DEV_PROP_STRING) { - new = property_copy_string_array(src); - if (!new) - return -ENOMEM; - } else { - new = kmemdup(pointer, src->length, GFP_KERNEL); - if (!new) - return -ENOMEM; - } + void *dst_ptr; + size_t nval; + + /* + * Properties with no data should not be marked as stored + * out of line. + */ + if (!src->is_inline && !src->length) + return -ENODATA; + + /* + * Reference properties are never stored inline as + * they are too big. + */ + if (src->type == DEV_PROP_REF && src->is_inline) + return -EINVAL; - dst->is_array = true; - dst->pointer = new; - } else if (src->type == DEV_PROP_STRING) { - new = kstrdup(src->value.str, GFP_KERNEL); - if (!new && src->value.str) + if (src->length <= sizeof(dst->value)) { + dst_ptr = &dst->value; + dst->is_inline = true; + } else { + dst_ptr = kmalloc(src->length, GFP_KERNEL); + if (!dst_ptr) return -ENOMEM; + dst->pointer = dst_ptr; + } - dst->value.str = new; + if (src->type == DEV_PROP_STRING) { + nval = src->length / sizeof(const char *); + if (!property_copy_string_array(dst_ptr, pointer, nval)) { + if (!dst->is_inline) + kfree(dst->pointer); + return -ENOMEM; + } } else { - dst->value = src->value; + memcpy(dst_ptr, pointer, src->length); } dst->length = src->length; dst->type = src->type; dst->name = kstrdup(src->name, GFP_KERNEL); - if (!dst->name) - goto out_free_data; + if (!dst->name) { + property_entry_free_data(dst); + return -ENOMEM; + } return 0; - -out_free_data: - property_entry_free_data(dst); - return -ENOMEM; } /** @@ -483,31 +479,49 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, struct fwnode_reference_args *args) { struct swnode *swnode = to_swnode(fwnode); - const struct software_node_reference *ref; + const struct software_node_ref_args *ref_array; + const struct software_node_ref_args *ref; const struct property_entry *prop; struct fwnode_handle *refnode; + u32 nargs_prop_val; + int error; int i; - if (!swnode || !swnode->node->references) + if (!swnode) return -ENOENT; - for (ref = swnode->node->references; ref->name; ref++) - if (!strcmp(ref->name, propname)) - break; + prop = property_entry_get(swnode->node->properties, propname); + if (!prop) + return -ENOENT; + + if (prop->type != DEV_PROP_REF) + return -EINVAL; - if (!ref->name || index > (ref->nrefs - 1)) + /* + * We expect that references are never stored inline, even + * single ones, as they are too big. + */ + if (prop->is_inline) + return -EINVAL; + + if (index * sizeof(*ref) >= prop->length) return -ENOENT; - refnode = software_node_fwnode(ref->refs[index].node); + ref_array = prop->pointer; + ref = &ref_array[index]; + + refnode = software_node_fwnode(ref->node); if (!refnode) return -ENOENT; if (nargs_prop) { - prop = property_entry_get(swnode->node->properties, nargs_prop); - if (!prop) - return -EINVAL; + error = property_entry_read_int_array(swnode->node->properties, + nargs_prop, sizeof(u32), + &nargs_prop_val, 1); + if (error) + return error; - nargs = prop->value.u32_data; + nargs = nargs_prop_val; } if (nargs > NR_FWNODE_REFERENCE_ARGS) @@ -517,7 +531,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, args->nargs = nargs; for (i = 0; i < nargs; i++) - args->args[i] = ref->refs[index].args[i]; + args->args[i] = ref->args[i]; return 0; } diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig index 86e85daa80bf..305c7751184a 100644 --- a/drivers/base/test/Kconfig +++ b/drivers/base/test/Kconfig @@ -8,3 +8,6 @@ config TEST_ASYNC_DRIVER_PROBE The module name will be test_async_driver_probe.ko If unsure say N. +config KUNIT_DRIVER_PE_TEST + bool "KUnit Tests for property entry API" + depends on KUNIT=y diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile index 0f1f7277a013..3ca56367c84b 100644 --- a/drivers/base/test/Makefile +++ b/drivers/base/test/Makefile @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o + +obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c new file mode 100644 index 000000000000..abe03315180f --- /dev/null +++ b/drivers/base/test/property-entry-test.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +// Unit tests for property entries API +// +// Copyright 2019 Google LLC. + +#include <kunit/test.h> +#include <linux/property.h> +#include <linux/types.h> + +static void pe_test_uints(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8("prop-u8", 8), + PROPERTY_ENTRY_U16("prop-u16", 16), + PROPERTY_ENTRY_U32("prop-u32", 32), + PROPERTY_ENTRY_U64("prop-u64", 64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[2]; + u16 val_u16, array_u16[2]; + u32 val_u32, array_u32[2]; + u64 val_u64, array_u64[2]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_uint_arrays(struct kunit *test) +{ + static const u8 a_u8[16] = { 8, 9 }; + static const u16 a_u16[16] = { 16, 17 }; + static const u32 a_u32[16] = { 32, 33 }; + static const u64 a_u64[16] = { 64, 65 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8), + PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16), + PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32), + PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[32]; + u16 val_u16, array_u16[32]; + u32 val_u32, array_u32[32]; + u64 val_u64, array_u64[32]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_strings(struct kunit *test) +{ + static const char *strings[] = { + "string-a", + "string-b", + }; + + static const struct property_entry entries[] = { + PROPERTY_ENTRY_STRING("str", "single"), + PROPERTY_ENTRY_STRING("empty", ""), + PROPERTY_ENTRY_STRING_ARRAY("strs", strings), + { } + }; + + struct fwnode_handle *node; + const char *str; + const char *strs[10]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_string(node, "str", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "single"); + + error = fwnode_property_read_string_array(node, "str", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + /* asking for more data returns what we have */ + error = fwnode_property_read_string_array(node, "str", strs, 2); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + error = fwnode_property_read_string(node, "no-str", &str); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_string_array(node, "no-str", strs, 1); + KUNIT_EXPECT_LT(test, error, 0); + + error = fwnode_property_read_string(node, "empty", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, ""); + + error = fwnode_property_read_string_array(node, "strs", strs, 3); + KUNIT_EXPECT_EQ(test, error, 2); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + KUNIT_EXPECT_STREQ(test, strs[1], "string-b"); + + error = fwnode_property_read_string_array(node, "strs", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + + /* NULL argument -> returns size */ + error = fwnode_property_read_string_array(node, "strs", NULL, 0); + KUNIT_EXPECT_EQ(test, error, 2); + + /* accessing array as single value */ + error = fwnode_property_read_string(node, "strs", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "string-a"); + + fwnode_remove_software_node(node); +} + +static void pe_test_bool(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_BOOL("prop"), + { } + }; + + struct fwnode_handle *node; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop")); + KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop")); + + fwnode_remove_software_node(node); +} + +/* Verifies that small U8 array is stored inline when property is copied */ +static void pe_test_move_inline_u8(struct kunit *test) +{ + static const u8 u8_array_small[8] = { 1, 2, 3, 4 }; + static const u8 u8_array_big[128] = { 5, 6, 7, 8 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small), + PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big), + { } + }; + + struct property_entry *copy; + const u8 *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + data_ptr = (u8 *)©[0].value; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6); + + property_entries_free(copy); +} + +/* Verifies that single string array is stored inline when property is copied */ +static void pe_test_move_inline_str(struct kunit *test) +{ + static char *str_array_small[] = { "a" }; + static char *str_array_big[] = { "b", "c", "d", "e" }; + static char *str_array_small_empty[] = { "" }; + static struct property_entry entries[] = { + PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small), + PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big), + PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty), + { } + }; + + struct property_entry *copy; + const char * const *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a"); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_STREQ(test, data_ptr[0], "b"); + KUNIT_EXPECT_STREQ(test, data_ptr[1], "c"); + + KUNIT_EXPECT_TRUE(test, copy[2].is_inline); + KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], ""); + + property_entries_free(copy); +} + +/* Handling of reference properties */ +static void pe_test_reference(struct kunit *test) +{ + static const struct software_node nodes[] = { + { .name = "1", }, + { .name = "2", }, + { } + }; + + static const struct software_node_ref_args refs[] = { + { + .node = &nodes[0], + .nargs = 0, + }, + { + .node = &nodes[1], + .nargs = 2, + .args = { 3, 4 }, + }, + }; + + const struct property_entry entries[] = { + PROPERTY_ENTRY_REF("ref-1", &nodes[0]), + PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2), + PROPERTY_ENTRY_REF_ARRAY("ref-3", refs), + { } + }; + + struct fwnode_handle *node; + struct fwnode_reference_args ref; + int error; + + error = software_node_register_nodes(nodes); + KUNIT_ASSERT_EQ(test, error, 0); + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 1, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 1U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + + /* asking for more args, padded with zero data */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 3, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 3U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU); + KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 2, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + /* array of references */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* second reference in the array */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 2, 1, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 2U); + KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 2, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); + software_node_unregister_nodes(nodes); +} + +static struct kunit_case property_entry_test_cases[] = { + KUNIT_CASE(pe_test_uints), + KUNIT_CASE(pe_test_uint_arrays), + KUNIT_CASE(pe_test_strings), + KUNIT_CASE(pe_test_bool), + KUNIT_CASE(pe_test_move_inline_u8), + KUNIT_CASE(pe_test_move_inline_str), + KUNIT_CASE(pe_test_reference), + { } +}; + +static struct kunit_suite property_entry_test_suite = { + .name = "property-entry", + .test_cases = property_entry_test_cases, +}; + +kunit_test_suite(property_entry_test_suite); diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c index 57f10b58b47c..c153c96a6145 100644 --- a/drivers/bcma/driver_chipcommon_b.c +++ b/drivers/bcma/driver_chipcommon_b.c @@ -48,7 +48,7 @@ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb) return 0; ccb->setup_done = 1; - ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE); + ccb->mii = ioremap(ccb->core->addr_s[1], BCMA_CORE_SIZE); if (!ccb->mii) return -ENOMEM; diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index c42cec7c7ecc..88a93c266c19 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -115,7 +115,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, sizeof(val)); + mmio = ioremap(addr, sizeof(val)); if (!mmio) goto out; @@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, sizeof(val)); + mmio = ioremap(addr, sizeof(val)); if (!mmio) goto out; @@ -515,7 +515,7 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) /* Ok, ready to run, register it to the system. * The following needs change, if we want to port hostmode * to non-MIPS platform. */ - io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start, + io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start, resource_size(&pc_host->mem_resource)); pc_host->pci_controller.io_map_base = io_map_base; set_io_port_base(pc_host->pci_controller.io_map_base); diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index c8073b509a2b..90d5bdc12e03 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -172,7 +172,7 @@ int __init bcma_host_soc_register(struct bcma_soc *soc) /* iomap only first core. We have to read some register on this core * to scan the bus. */ - bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1); + bus->mmio = ioremap(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1); if (!bus->mmio) return -ENOMEM; diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index 4a2d1b235fb5..fd546c51b076 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -425,11 +425,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, } } if (bus->hosttype == BCMA_HOSTTYPE_SOC) { - core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE); + core->io_addr = ioremap(core->addr, BCMA_CORE_SIZE); if (!core->io_addr) return -ENOMEM; if (core->wrap) { - core->io_wrap = ioremap_nocache(core->wrap, + core->io_wrap = ioremap(core->wrap, BCMA_CORE_SIZE); if (!core->io_wrap) { iounmap(core->io_addr); @@ -472,7 +472,7 @@ int bcma_bus_scan(struct bcma_bus *bus) erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); if (bus->hosttype == BCMA_HOSTTYPE_SOC) { - eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE); + eromptr = ioremap(erombase, BCMA_CORE_SIZE); if (!eromptr) return -ENOMEM; } else { diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 57532465fb83..b4607dd96185 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); - if (ret) { + if (ret) sock_shutdown(nbd); - flush_workqueue(nbd->recv_workq); - } + flush_workqueue(nbd->recv_workq); + mutex_lock(&nbd->config_lock); nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index d4d88b581822..ed34785dd64b 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -129,11 +129,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return BLK_STS_IOERR; case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: /* Writes must be at the write pointer position */ if (sector != zone->wp) return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_EMPTY) + if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->wp += nr_sectors; @@ -186,7 +188,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, if (zone->cond == BLK_ZONE_COND_FULL) return BLK_STS_IOERR; - zone->cond = BLK_ZONE_COND_CLOSED; + if (zone->wp == zone->start) + zone->cond = BLK_ZONE_COND_EMPTY; + else + zone->cond = BLK_ZONE_COND_CLOSED; break; case REQ_OP_ZONE_FINISH: if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ee67bf929fac..861fc65a1b75 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = { .release = pkt_close, .ioctl = pkt_ioctl, #ifdef CONFIG_COMPAT - .ioctl = pkt_compat_ioctl, + .compat_ioctl = pkt_compat_ioctl, #endif .check_events = pkt_check_events, }; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 1f3f9e0f02a8..4eaf97d7a170 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -827,7 +827,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) goto failed_req_csr; } - card->csr_remap = ioremap_nocache(csr_base, csr_len); + card->csr_remap = ioremap(csr_base, csr_len); if (!card->csr_remap) { dev_printk(KERN_ERR, &card->dev->dev, "Unable to remap memory region\n"); diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index e8c5c54e1d26..4c5d99f87813 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -171,6 +171,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) blkif->domid = domid; atomic_set(&blkif->refcnt, 1); init_completion(&blkif->drain_complete); + + /* + * Because freeing back to the cache may be deferred, it is not + * safe to unload the module (and hence destroy the cache) until + * this has completed. To prevent premature unloading, take an + * extra module reference here and release only when the object + * has been freed back to the cache. + */ + __module_get(THIS_MODULE); INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); return blkif; @@ -181,6 +190,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, { int err; struct xen_blkif *blkif = ring->blkif; + const struct blkif_common_sring *sring_common; + RING_IDX rsp_prod, req_prod; + unsigned int size; /* Already connected through? */ if (ring->irq) @@ -191,46 +203,62 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, if (err < 0) return err; + sring_common = (struct blkif_common_sring *)ring->blk_ring; + rsp_prod = READ_ONCE(sring_common->rsp_prod); + req_prod = READ_ONCE(sring_common->req_prod); + switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: { - struct blkif_sring *sring; - sring = (struct blkif_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.native, sring, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_sring *sring_native = + (struct blkif_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.native, sring_native, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_32: { - struct blkif_x86_32_sring *sring_x86_32; - sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_32_sring *sring_x86_32 = + (struct blkif_x86_32_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs); break; } case BLKIF_PROTOCOL_X86_64: { - struct blkif_x86_64_sring *sring_x86_64; - sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring; - BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64, - XEN_PAGE_SIZE * nr_grefs); + struct blkif_x86_64_sring *sring_x86_64 = + (struct blkif_x86_64_sring *)ring->blk_ring; + + BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64, + rsp_prod, XEN_PAGE_SIZE * nr_grefs); + size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs); break; } default: BUG(); } + err = -EIO; + if (req_prod - rsp_prod > size) + goto fail; + err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); - if (err < 0) { - xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); - ring->blk_rings.common.sring = NULL; - return err; - } + if (err < 0) + goto fail; ring->irq = err; return 0; + +fail: + xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); + ring->blk_rings.common.sring = NULL; + return err; } static int xen_blkif_disconnect(struct xen_blkif *blkif) @@ -320,6 +348,7 @@ static void xen_blkif_free(struct xen_blkif *blkif) /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); + module_put(THIS_MODULE); } int __init xen_blkif_interface_init(void) @@ -1121,7 +1150,8 @@ static struct xenbus_driver xen_blkbk_driver = { .ids = xen_blkbk_ids, .probe = xen_blkbk_probe, .remove = xen_blkbk_remove, - .otherend_changed = frontend_changed + .otherend_changed = frontend_changed, + .allow_rebind = true, }; int xen_blkif_xenbus_init(void) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a74d03913822..c02be06c5299 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1113,8 +1113,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, if (!VDEV_IS_EXTENDED(info->vdevice)) { err = xen_translate_vdev(info->vdevice, &minor, &offset); if (err) - return err; - nr_parts = PARTS_PER_DISK; + return err; + nr_parts = PARTS_PER_DISK; } else { minor = BLKIF_MINOR_EXT(info->vdevice); nr_parts = PARTS_PER_EXT_DISK; diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index d9629fc13a15..6ae48ad80409 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -97,12 +97,12 @@ int __must_check fsl_create_mc_io(struct device *dev, return -EBUSY; } - mc_portal_virt_addr = devm_ioremap_nocache(dev, + mc_portal_virt_addr = devm_ioremap(dev, mc_portal_phys_addr, mc_portal_size); if (!mc_portal_virt_addr) { dev_err(dev, - "devm_ioremap_nocache failed for MC portal %pa\n", + "devm_ioremap failed for MC portal %pa\n", &mc_portal_phys_addr); return -ENXIO; } diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 56887c6877a7..ccb44fe790a7 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -343,6 +343,12 @@ static int sysc_get_clocks(struct sysc *ddata) return -EINVAL; } + /* Always add a slot for main clocks fck and ick even if unused */ + if (!nr_fck) + ddata->nr_clocks++; + if (!nr_ick) + ddata->nr_clocks++; + ddata->clocks = devm_kcalloc(ddata->dev, ddata->nr_clocks, sizeof(*ddata->clocks), GFP_KERNEL); @@ -421,7 +427,7 @@ static int sysc_enable_opt_clocks(struct sysc *ddata) struct clk *clock; int i, error; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return 0; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -455,7 +461,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata) struct clk *clock; int i; - if (!ddata->clocks) + if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1) return; for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) { @@ -981,7 +987,8 @@ static int sysc_disable_module(struct device *dev) return ret; } - if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY) + if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) || + ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY)) best_mode = SYSC_IDLE_FORCE; reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); @@ -1583,6 +1590,10 @@ static int sysc_reset(struct sysc *ddata) sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + if (ddata->cfg.srst_udelay) + usleep_range(ddata->cfg.srst_udelay, + ddata->cfg.srst_udelay * 2); + if (ddata->clk_enable_quirk) ddata->clk_enable_quirk(ddata); diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index ab154a75acf0..9e84239f88d4 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -941,7 +941,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) bridge->gatt_table = (u32 __iomem *)table; #else - bridge->gatt_table = ioremap_nocache(virt_to_phys(table), + bridge->gatt_table = ioremap(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); bridge->driver->cache_flush(); #endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c6271ce250b3..66a62d17a3f5 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1087,7 +1087,7 @@ static void intel_i9xx_setup_flush(void) } if (intel_private.ifp_resource.start) - intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); + intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE); if (!intel_private.i9xx_flush_page) dev_err(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing\n"); diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c index 31c374b1b91b..7ecf20a6d19c 100644 --- a/drivers/char/agp/isoch.c +++ b/drivers/char/agp/isoch.c @@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, unsigned int cdev = 0; u32 mnistat, tnistat, tstatus, mcmd; u16 tnicmd, mnicmd; - u8 mcapndx; u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; u32 step, rem, rem_isoch, rem_async; int ret = 0; @@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, cur = list_entry(pos, struct agp_3_5_dev, list); dev = cur->dev; - mcapndx = cur->capndx; - pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); master[cdev].maxbw = (mnistat >> 16) & 0xff; @@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, cur = master[cdev].dev; dev = cur->dev; - mcapndx = cur->capndx; - master[cdev].rq += (cdev == ndevs - 1) ? (rem_async + rem_isoch) : step; @@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) { struct pci_dev *td = bridge->dev, *dev = NULL; u8 mcapndx; - u32 isoch, arqsz; + u32 isoch; u32 tstatus, mstatus, ncapid; u32 mmajor; u16 mpstat; @@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) if (isoch == 0) /* isoch xfers not available, bail out. */ return -ENODEV; - arqsz = (tstatus >> 13) & 0x7; - /* * Allocate a head for our AGP 3.5 device list * (multiple AGP v3 devices are allowed behind a single bridge). diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index eb108b3c619a..51121a4b82c7 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -204,7 +204,7 @@ static int __init applicom_init(void) if (pci_enable_device(dev)) return -EIO; - RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO); + RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO); if (!RamIO) { printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " @@ -259,7 +259,7 @@ static int __init applicom_init(void) /* Now try the specified ISA cards */ for (i = 0; i < MAX_ISA_BOARD; i++) { - RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO); + RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO); if (!RamIO) { printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1); diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index 290c880266bf..9f205bd1acc0 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c @@ -317,7 +317,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n"; return -EBUSY; } - intel_rng_hw->mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); + intel_rng_hw->mem = ioremap(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); if (intel_rng_hw->mem == NULL) return -EBUSY; diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c index 8c78aa090492..7be8067ac4e8 100644 --- a/drivers/char/hw_random/octeon-rng.c +++ b/drivers/char/hw_random/octeon-rng.c @@ -81,13 +81,13 @@ static int octeon_rng_probe(struct platform_device *pdev) return -ENOENT; - rng->control_status = devm_ioremap_nocache(&pdev->dev, + rng->control_status = devm_ioremap(&pdev->dev, res_ports->start, sizeof(u64)); if (!rng->control_status) return -ENOENT; - rng->result = devm_ioremap_nocache(&pdev->dev, + rng->result = devm_ioremap(&pdev->dev, res_result->start, sizeof(u64)); if (!rng->result) diff --git a/drivers/char/random.c b/drivers/char/random.c index 909e0c3d82ea..cda12933a17d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2175,6 +2175,7 @@ const struct file_operations urandom_fops = { .read = urandom_read, .write = random_write, .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, }; diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 2ec47a69a2a6..87f449340202 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -61,6 +61,12 @@ static void tpm_dev_async_work(struct work_struct *work) mutex_lock(&priv->buffer_mutex); priv->command_enqueued = false; + ret = tpm_try_get_ops(priv->chip); + if (ret) { + priv->response_length = ret; + goto out; + } + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); @@ -68,6 +74,7 @@ static void tpm_dev_async_work(struct work_struct *work) priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } +out: mutex_unlock(&priv->buffer_mutex); wake_up_interruptible(&priv->async_wait); } @@ -123,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, priv->response_read = true; ret_size = min_t(ssize_t, size, priv->response_length); - if (!ret_size) { + if (ret_size <= 0) { priv->response_length = 0; goto out; } @@ -204,6 +211,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); + tpm_put_ops(priv->chip); mutex_unlock(&priv->buffer_mutex); return size; } diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index 1089fc0bb290..f3742bcc73e3 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -14,7 +14,7 @@ struct file_priv { struct work_struct timeout_work; struct work_struct async_work; wait_queue_head_t async_wait; - size_t response_length; + ssize_t response_length; bool response_read; bool command_enqueued; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 3b53b3e5ec3e..d52bf4df0bca 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(timeouts); -static struct attribute *tpm_dev_attrs[] = { +static ssize_t tpm_version_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tpm_chip *chip = to_tpm_chip(dev); + + return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2 + ? "2" : "1"); +} +static DEVICE_ATTR_RO(tpm_version_major); + +static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_enabled.attr, @@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = { &dev_attr_cancel.attr, &dev_attr_durations.attr, &dev_attr_timeouts.attr, + &dev_attr_tpm_version_major.attr, NULL, }; -static const struct attribute_group tpm_dev_group = { - .attrs = tpm_dev_attrs, +static struct attribute *tpm2_dev_attrs[] = { + &dev_attr_tpm_version_major.attr, + NULL +}; + +static const struct attribute_group tpm1_dev_group = { + .attrs = tpm1_dev_attrs, +}; + +static const struct attribute_group tpm2_dev_group = { + .attrs = tpm2_dev_attrs, }; void tpm_sysfs_add_device(struct tpm_chip *chip) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - return; - WARN_ON(chip->groups_cnt != 0); - chip->groups[chip->groups_cnt++] = &tpm_dev_group; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + chip->groups[chip->groups_cnt++] = &tpm2_dev_group; + else + chip->groups[chip->groups_cnt++] = &tpm1_dev_group; } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index b9e1547be6b5..5620747da0cf 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -218,7 +218,6 @@ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests); int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max); -void tpm2_flush_context(struct tpm_chip *chip, u32 handle); ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, const char *desc); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index fdb457704aa7..13696deceae8 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -362,6 +362,7 @@ void tpm2_flush_context(struct tpm_chip *chip, u32 handle) tpm_transmit_cmd(chip, &buf, 0, "flushing context"); tpm_buf_destroy(&buf); } +EXPORT_SYMBOL_GPL(tpm2_flush_context); struct tpm2_get_cap_out { u8 more_data; diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 6640a14dbe48..22bf553ccf9d 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -32,7 +32,7 @@ static const uuid_t ftpm_ta_uuid = 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96); /** - * ftpm_tee_tpm_op_recv - retrieve fTPM response. + * ftpm_tee_tpm_op_recv() - retrieve fTPM response. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h. * @buf: the buffer to store data. * @count: the number of bytes to read. @@ -61,7 +61,7 @@ static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count) } /** - * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory. + * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory. * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h * @buf: the buffer to send. * @len: the number of bytes to send. @@ -208,7 +208,7 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data) } /** - * ftpm_tee_probe - initialize the fTPM + * ftpm_tee_probe() - initialize the fTPM * @pdev: the platform_device description. * * Return: @@ -298,7 +298,7 @@ out_tee_session: } /** - * ftpm_tee_remove - remove the TPM device + * ftpm_tee_remove() - remove the TPM device * @pdev: the platform_device description. * * Return: @@ -328,6 +328,19 @@ static int ftpm_tee_remove(struct platform_device *pdev) return 0; } +/** + * ftpm_tee_shutdown() - shutdown the TPM device + * @pdev: the platform_device description. + */ +static void ftpm_tee_shutdown(struct platform_device *pdev) +{ + struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev); + + tee_shm_free(pvt_data->shm); + tee_client_close_session(pvt_data->ctx, pvt_data->session); + tee_client_close_context(pvt_data->ctx); +} + static const struct of_device_id of_ftpm_tee_ids[] = { { .compatible = "microsoft,ftpm" }, { } @@ -341,6 +354,7 @@ static struct platform_driver ftpm_tee_driver = { }, .probe = ftpm_tee_probe, .remove = ftpm_tee_remove, + .shutdown = ftpm_tee_shutdown, }; module_platform_driver(ftpm_tee_driver); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 8af2cee1a762..27c6ca031e23 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1059,8 +1059,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, goto out_err; } - tpm_chip_start(chip); - chip->flags |= TPM_CHIP_FLAG_IRQ; if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); @@ -1070,7 +1068,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } else { tpm_tis_probe_irq(chip, intmask); } - tpm_chip_stop(chip); } rc = tpm_chip_register(chip); diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index 0aabe49aed09..a9d4234758d7 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -348,7 +348,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index 0ac34cdaa106..77fe83a73bf4 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -83,7 +83,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 0855f3a80cc7..086cf0b4955c 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -146,7 +146,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 0b03cfae3a9d..b71515acdec1 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -275,7 +275,7 @@ static int __init pmc_register_ops(void) np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids); - pmcreg = syscon_node_to_regmap(np); + pmcreg = device_node_to_regmap(np); if (IS_ERR(pmcreg)) return PTR_ERR(pmcreg); diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 0de1108737db..ff7e3f727082 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -162,7 +162,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index 25b156d4e645..a6dee4a3b6e4 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -136,7 +136,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b68e200829f2..772258de2d1f 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3249,6 +3249,34 @@ static inline void clk_debug_unregister(struct clk_core *core) } #endif +static void clk_core_reparent_orphans_nolock(void) +{ + struct clk_core *orphan; + struct hlist_node *tmp2; + + /* + * walk the list of orphan clocks and reparent any that newly finds a + * parent. + */ + hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { + struct clk_core *parent = __clk_init_parent(orphan); + + /* + * We need to use __clk_set_parent_before() and _after() to + * to properly migrate any prepare/enable count of the orphan + * clock. This is important for CLK_IS_CRITICAL clocks, which + * are enabled during init but might not have a parent yet. + */ + if (parent) { + /* update the clk tree topology */ + __clk_set_parent_before(orphan, parent); + __clk_set_parent_after(orphan, parent, NULL); + __clk_recalc_accuracies(orphan); + __clk_recalc_rates(orphan, 0); + } + } +} + /** * __clk_core_init - initialize the data structures in a struct clk_core * @core: clk_core being initialized @@ -3259,8 +3287,6 @@ static inline void clk_debug_unregister(struct clk_core *core) static int __clk_core_init(struct clk_core *core) { int ret; - struct clk_core *orphan; - struct hlist_node *tmp2; unsigned long rate; if (!core) @@ -3400,34 +3426,21 @@ static int __clk_core_init(struct clk_core *core) if (core->flags & CLK_IS_CRITICAL) { unsigned long flags; - clk_core_prepare(core); + ret = clk_core_prepare(core); + if (ret) + goto out; flags = clk_enable_lock(); - clk_core_enable(core); + ret = clk_core_enable(core); clk_enable_unlock(flags); + if (ret) { + clk_core_unprepare(core); + goto out; + } } - /* - * walk the list of orphan clocks and reparent any that newly finds a - * parent. - */ - hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { - struct clk_core *parent = __clk_init_parent(orphan); + clk_core_reparent_orphans_nolock(); - /* - * We need to use __clk_set_parent_before() and _after() to - * to properly migrate any prepare/enable count of the orphan - * clock. This is important for CLK_IS_CRITICAL clocks, which - * are enabled during init but might not have a parent yet. - */ - if (parent) { - /* update the clk tree topology */ - __clk_set_parent_before(orphan, parent); - __clk_set_parent_after(orphan, parent, NULL); - __clk_recalc_accuracies(orphan); - __clk_recalc_rates(orphan, 0); - } - } kref_init(&core->ref); out: @@ -4179,6 +4192,13 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) EXPORT_SYMBOL_GPL(clk_notifier_unregister); #ifdef CONFIG_OF +static void clk_core_reparent_orphans(void) +{ + clk_prepare_lock(); + clk_core_reparent_orphans_nolock(); + clk_prepare_unlock(); +} + /** * struct of_clk_provider - Clock provider registration structure * @link: Entry in global list of clock providers @@ -4274,6 +4294,8 @@ int of_clk_add_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clock from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); @@ -4309,6 +4331,8 @@ int of_clk_add_hw_provider(struct device_node *np, mutex_unlock(&of_clk_mutex); pr_debug("Added clk_hw provider from %pOF\n", np); + clk_core_reparent_orphans(); + ret = of_clk_set_defaults(np, true); if (ret < 0) of_clk_del_provider(np); diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 388bdb94f841..d3486ee79ab5 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, mux->reg = reg; mux->shift = PCG_PCS_SHIFT; mux->mask = PCG_PCS_MASK; + mux->lock = &imx_ccm_lock; div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) @@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(const char *name, gate_hw = &gate->hw; gate->reg = reg; gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ops, div_hw, diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 3fdf3d494f0a..281191b55b3a 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -40,6 +40,7 @@ static const struct clk_div_table ulp_div_table[] = { { .val = 5, .div = 16, }, { .val = 6, .div = 32, }, { .val = 7, .div = 64, }, + { /* sentinel */ }, }; static const int pcc2_uart_clk_ids[] __initconst = { diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 5c458199060a..3636c8035c7d 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -159,7 +159,7 @@ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll) { u32 val; - return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0, + return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0, LOCK_TIMEOUT_US); } diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index a60a1be937ad..b4a95cbbda98 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock); static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; static DEFINE_SPINLOCK(timer_lock); -static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"}; +static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"}; static DEFINE_SPINLOCK(reset_lock); diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 38424e63bcae..7f59fb8da033 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -2186,7 +2186,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .pd = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, - .pwrsts = PWRSTS_OFF_ON | VOTABLE, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -2194,7 +2195,8 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .pd = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, - .pwrsts = PWRSTS_OFF_ON | VOTABLE, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc *gcc_sc7180_gdscs[] = { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index f7b370f3acef..f6ce888098be 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -3255,6 +3255,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { @@ -3263,6 +3264,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { @@ -3271,6 +3273,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { @@ -3279,6 +3282,7 @@ static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = { .name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { @@ -3287,6 +3291,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { @@ -3295,6 +3300,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { @@ -3303,6 +3309,7 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = { .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct clk_regmap *gcc_sdm845_clocks[] = { diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c index e5e2492b20c5..9b3923af02a1 100644 --- a/drivers/clk/qcom/gpucc-msm8998.c +++ b/drivers/clk/qcom/gpucc-msm8998.c @@ -242,10 +242,12 @@ static struct clk_branch gfx3d_isense_clk = { static struct gdsc gpu_cx_gdsc = { .gdscr = 0x1004, + .gds_hw_ctrl = 0x1008, .pd = { .name = "gpu_cx", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, }; static struct gdsc gpu_gx_gdsc = { diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c index fbc34beafc78..7b703f14e20b 100644 --- a/drivers/clk/renesas/clk-rz.c +++ b/drivers/clk/renesas/clk-rz.c @@ -37,8 +37,8 @@ static u16 __init rz_cpg_read_mode_pins(void) void __iomem *ppr0, *pibc0; u16 modes; - ppr0 = ioremap_nocache(PPR0, 2); - pibc0 = ioremap_nocache(PIBC0, 2); + ppr0 = ioremap(PPR0, 2); + pibc0 = ioremap(PIBC0, 2); BUG_ON(!ppr0 || !pibc0); iowrite16(4, pibc0); /* enable input buffer */ modes = ioread16(ppr0); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 3a991ca1ee36..c9e5a1fb6653 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -12,6 +12,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/clk.h> #include "clk.h" #include "clk-cpu.h" @@ -1646,6 +1647,13 @@ static void __init exynos5x_clk_init(struct device_node *np, exynos5x_subcmus); } + /* + * Keep top part of G3D clock path enabled permanently to ensure + * that the internal busses get their clock regardless of the + * main G3D clock enablement status. + */ + clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d")); + samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 45a1ed3fe674..50f8d1bc7046 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -23,9 +23,9 @@ */ static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k", - "pll-periph0", "iosc" }; + "iosc", "pll-periph0" }; static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = { - { .index = 2, .shift = 0, .width = 5 }, + { .index = 3, .shift = 0, .width = 5 }, }; static struct ccu_div ar100_clk = { @@ -51,17 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div r_apb1_clk = { - .div = _SUNXI_CCU_DIV(0, 2), - - .common = { - .reg = 0x00c, - .hw.init = CLK_HW_INIT("r-apb1", - "r-ahb", - &ccu_div_ops, - 0), - }, -}; +static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0); static struct ccu_div r_apb2_clk = { .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO), diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index 4646fdc61053..4c8c491b87c2 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = { static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0); -static struct ccu_div apb0_clk = { - .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), - - .common = { - .reg = 0x0c, - .hw.init = CLK_HW_INIT_HW("apb0", - &ahb0_clk.hw, - &ccu_div_ops, - 0), - }, -}; - -static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); +static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); /* * Define the parent as an array that can be reused to save space @@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = { static struct ccu_common *sun8i_a83t_r_ccu_clks[] = { &ar100_clk.common, - &a83t_apb0_clk.common, + &apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, &apb0_timer_clk.common, @@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = { .hws = { [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, - [CLK_APB0] = &a83t_apb0_clk.common.hw, + [CLK_APB0] = &apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, [CLK_APB0_IR] = &apb0_ir_clk.common.hw, [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, @@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node, static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) { - /* Fix apb0 bus gate parents here */ - apb0_gate_parent[0] = &a83t_apb0_clk.common.hw; - sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc); } CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 897490800102..23bfe1d12f21 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -761,7 +761,8 @@ static struct ccu_mp outa_clk = { .reg = 0x1f0, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outa", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; @@ -779,7 +780,8 @@ static struct ccu_mp outb_clk = { .reg = 0x1f4, .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("outb", out_parents, - &ccu_mp_ops, 0), + &ccu_mp_ops, + CLK_SET_RATE_PARENT), } }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 5c779eec454b..0e36ca3bf3d5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_PLL_DDR1 + 1, }; static struct clk_hw_onecell_data sun8i_v3_hw_clks = { @@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = { [CLK_MBUS] = &mbus_clk.common.hw, [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw, }, - .num = CLK_NUMBER, + .num = CLK_I2S0 + 1, }; static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h index b0160d305a67..108eeeedcbf7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h @@ -51,6 +51,4 @@ #define CLK_PLL_DDR1 74 -#define CLK_NUMBER (CLK_I2S0 + 1) - #endif /* _CCU_SUN8I_H3_H_ */ diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index e6bd6d1ea012..f6cdce441cf7 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -231,8 +231,10 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks) periph_banks = banks; clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL); - if (!clks) + if (!clks) { kfree(periph_clk_enb_refcnt); + return NULL; + } clk_num = num; diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index f65e16c4f3c4..8d4c08b034bd 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -233,7 +233,6 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) cinfo->iobase = of_iomap(node, 0); cinfo->dev = &pdev->dev; pm_runtime_enable(cinfo->dev); - pm_runtime_irq_safe(cinfo->dev); pm_runtime_get_sync(cinfo->dev); atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 5fdd76cb1768..cc909e465823 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -88,7 +88,7 @@ config ROCKCHIP_TIMER select TIMER_OF select CLKSRC_MMIO help - Enables the support for the rockchip timer driver. + Enables the support for the Rockchip timer driver. config ARMADA_370_XP_TIMER bool "Armada 370 and XP timer driver" if COMPILE_TEST @@ -162,13 +162,13 @@ config NPCM7XX_TIMER select CLKSRC_MMIO help Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, - While TIMER0 serves as clockevent and TIMER1 serves as clocksource. + where TIMER0 serves as clockevent and TIMER1 serves as clocksource. config CADENCE_TTC_TIMER bool "Cadence TTC timer driver" if COMPILE_TEST depends on COMMON_CLK help - Enables support for the cadence ttc driver. + Enables support for the Cadence TTC driver. config ASM9260_TIMER bool "ASM9260 timer driver" if COMPILE_TEST @@ -190,10 +190,10 @@ config CLKSRC_DBX500_PRCMU bool "Clocksource PRCMU Timer" if COMPILE_TEST depends on HAS_IOMEM help - Use the always on PRCMU Timer as clocksource + Use the always on PRCMU Timer as clocksource. config CLPS711X_TIMER - bool "Cirrus logic timer driver" if COMPILE_TEST + bool "Cirrus Logic timer driver" if COMPILE_TEST select CLKSRC_MMIO help Enables support for the Cirrus Logic PS711 timer. @@ -205,11 +205,11 @@ config ATLAS7_TIMER Enables support for the Atlas7 timer. config MXS_TIMER - bool "Mxs timer driver" if COMPILE_TEST + bool "MXS timer driver" if COMPILE_TEST select CLKSRC_MMIO select STMP_DEVICE help - Enables support for the Mxs timer. + Enables support for the MXS timer. config PRIMA2_TIMER bool "Prima2 timer driver" if COMPILE_TEST @@ -238,10 +238,10 @@ config KEYSTONE_TIMER Enables support for the Keystone timer. config INTEGRATOR_AP_TIMER - bool "Integrator-ap timer driver" if COMPILE_TEST + bool "Integrator-AP timer driver" if COMPILE_TEST select CLKSRC_MMIO help - Enables support for the Integrator-ap timer. + Enables support for the Integrator-AP timer. config CLKSRC_EFM32 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 @@ -283,8 +283,8 @@ config CLKSRC_NPS select TIMER_OF if OF help NPS400 clocksource support. - Got 64 bit counter with update rate up to 1000MHz. - This counter is accessed via couple of 32 bit memory mapped registers. + It has a 64-bit counter with update rate up to 1000MHz. + This counter is accessed via couple of 32-bit memory-mapped registers. config CLKSRC_STM32 bool "Clocksource for STM32 SoCs" if !ARCH_STM32 @@ -305,14 +305,14 @@ config ARC_TIMERS help These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores (ARC700 as well as ARC HS38). - TIMER0 serves as clockevent while TIMER1 provides clocksource + TIMER0 serves as clockevent while TIMER1 provides clocksource. config ARC_TIMERS_64BIT bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST depends on ARC_TIMERS select TIMER_OF help - This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP) + This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP). RTC is implemented inside the core, while GFRC sits outside the core in ARConnect IP block. Driver automatically picks one of them for clocksource as appropriate. @@ -390,7 +390,7 @@ config ARM_GLOBAL_TIMER select TIMER_OF if OF depends on ARM help - This options enables support for the ARM global timer unit + This option enables support for the ARM global timer unit. config ARM_TIMER_SP804 bool "Support for Dual Timer SP804 module" if COMPILE_TEST @@ -403,14 +403,14 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK depends on ARM_GLOBAL_TIMER default y help - Use ARM global timer clock source as sched_clock + Use ARM global timer clock source as sched_clock. config ARMV7M_SYSTICK bool "Support for the ARMv7M system time" if COMPILE_TEST select TIMER_OF if OF select CLKSRC_MMIO help - This options enables support for the ARMv7M system timer unit + This option enables support for the ARMv7M system timer unit. config ATMEL_PIT bool "Atmel PIT support" if COMPILE_TEST @@ -460,7 +460,7 @@ config VF_PIT_TIMER bool select CLKSRC_MMIO help - Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. + Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs. config OXNAS_RPS_TIMER bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST @@ -470,7 +470,7 @@ config OXNAS_RPS_TIMER This enables support for the Oxford Semiconductor OXNAS RPS timers. config SYS_SUPPORTS_SH_CMT - bool + bool config MTK_TIMER bool "Mediatek timer driver" if COMPILE_TEST @@ -490,13 +490,13 @@ config SPRD_TIMER Enables support for the Spreadtrum timer driver. config SYS_SUPPORTS_SH_MTU2 - bool + bool config SYS_SUPPORTS_SH_TMU - bool + bool config SYS_SUPPORTS_EM_STI - bool + bool config CLKSRC_JCORE_PIT bool "J-Core PIT timer driver" if COMPILE_TEST @@ -523,7 +523,7 @@ config SH_TIMER_MTU2 help This enables build of a clockevent driver for the Multi-Function Timer Pulse Unit 2 (MTU2) hardware available on SoCs from Renesas. - This hardware comes with 16 bit-timer registers. + This hardware comes with 16-bit timer registers. config RENESAS_OSTM bool "Renesas OSTM timer driver" if COMPILE_TEST @@ -580,7 +580,7 @@ config CLKSRC_TANGO_XTAL select TIMER_OF select CLKSRC_MMIO help - This enables the clocksource for Tango SoC + This enables the clocksource for Tango SoC. config CLKSRC_PXA bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST @@ -591,24 +591,24 @@ config CLKSRC_PXA platforms. config H8300_TMR8 - bool "Clockevent timer for the H8300 platform" if COMPILE_TEST - depends on HAS_IOMEM + bool "Clockevent timer for the H8300 platform" if COMPILE_TEST + depends on HAS_IOMEM help This enables the 8 bits timer for the H8300 platform. config H8300_TMR16 - bool "Clockevent timer for the H83069 platform" if COMPILE_TEST - depends on HAS_IOMEM + bool "Clockevent timer for the H83069 platform" if COMPILE_TEST + depends on HAS_IOMEM help This enables the 16 bits timer for the H8300 platform with the - H83069 cpu. + H83069 CPU. config H8300_TPU - bool "Clocksource for the H8300 platform" if COMPILE_TEST - depends on HAS_IOMEM + bool "Clocksource for the H8300 platform" if COMPILE_TEST + depends on HAS_IOMEM help This enables the clocksource for the H8300 platform with the - H8S2678 cpu. + H8S2678 CPU. config CLKSRC_IMX_GPT bool "Clocksource using i.MX GPT" if COMPILE_TEST @@ -666,8 +666,8 @@ config CSKY_MP_TIMER help Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP system. - csky,mptimer is not only used in SMP system, it also could be used - single core system. It's not a mmio reg and it use mtcr/mfcr instruction. + csky,mptimer is not only used in SMP system, it also could be used in + single core system. It's not a mmio reg and it uses mtcr/mfcr instruction. config GX6605S_TIMER bool "Gx6605s SOC system timer driver" if COMPILE_TEST @@ -697,4 +697,14 @@ config INGENIC_TIMER help Support for the timer/counter unit of the Ingenic JZ SoCs. +config MICROCHIP_PIT64B + bool "Microchip PIT64B support" + depends on OF || COMPILE_TEST + select CLKSRC_MMIO + help + This option enables Microchip PIT64B timer for Atmel + based system. It supports the oneshot, the periodic + modes and high resolution. It is used as a clocksource + and a clockevent. + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 4dfe4225ece7..713686faa549 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -88,3 +88,4 @@ obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o +obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 2b196cbfadb6..b235f446ee50 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -121,7 +121,7 @@ static int __init bcm2835_timer_init(struct device_node *node) ret = setup_irq(irq, &timer->act); if (ret) { pr_err("Can't set up timer IRQ\n"); - goto err_iounmap; + goto err_timer_free; } clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); @@ -130,6 +130,9 @@ static int __init bcm2835_timer_init(struct device_node *node) return 0; +err_timer_free: + kfree(timer); + err_iounmap: iounmap(base); return ret; diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index 9039df4f90e2..ab190dffb1ed 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -279,9 +279,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) static int em_sti_probe(struct platform_device *pdev) { struct em_sti_priv *p; - struct resource *res; - int irq; - int ret; + int irq, ret; p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); if (p == NULL) @@ -295,8 +293,7 @@ static int em_sti_probe(struct platform_device *pdev) return irq; /* map memory, let base point to the STI instance */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - p->base = devm_ioremap_resource(&pdev->dev, res); + p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) return PTR_ERR(p->base); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 74cb299f5089..a267fe31ef13 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -4,7 +4,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * EXYNOS4 MCT(Multi-Core Timer) support + * Exynos4 MCT(Multi-Core Timer) support */ #include <linux/interrupt.h> diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 287d8d58c21a..9d808d595ca8 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -66,7 +66,7 @@ static int hv_ce_set_next_event(unsigned long delta, { u64 current_tick; - current_tick = hyperv_cs->read(NULL); + current_tick = hv_read_reference_counter(); current_tick += delta; hv_init_timer(0, current_tick); return 0; @@ -302,22 +302,33 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); * the other that uses the TSC reference page feature as defined in the * TLFS. The MSR version is for compatibility with old versions of * Hyper-V and 32-bit x86. The TSC reference page version is preferred. + * + * The Hyper-V clocksource ratings of 250 are chosen to be below the + * TSC clocksource rating of 300. In configurations where Hyper-V offers + * an InvariantTSC, the TSC is not marked "unstable", so the TSC clocksource + * is available and preferred. With the higher rating, it will be the + * default. On older hardware and Hyper-V versions, the TSC is marked + * "unstable", so no TSC clocksource is created and the selected Hyper-V + * clocksource will be the default. */ -struct clocksource *hyperv_cs; -EXPORT_SYMBOL_GPL(hyperv_cs); +u64 (*hv_read_reference_counter)(void); +EXPORT_SYMBOL_GPL(hv_read_reference_counter); -static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE); +static union { + struct ms_hyperv_tsc_page page; + u8 reserved[PAGE_SIZE]; +} tsc_pg __aligned(PAGE_SIZE); struct ms_hyperv_tsc_page *hv_get_tsc_page(void) { - return &tsc_pg; + return &tsc_pg.page; } EXPORT_SYMBOL_GPL(hv_get_tsc_page); -static u64 notrace read_hv_clock_tsc(struct clocksource *arg) +static u64 notrace read_hv_clock_tsc(void) { - u64 current_tick = hv_read_tsc_page(&tsc_pg); + u64 current_tick = hv_read_tsc_page(hv_get_tsc_page()); if (current_tick == U64_MAX) hv_get_time_ref_count(current_tick); @@ -325,20 +336,50 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg) return current_tick; } +static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) +{ + return read_hv_clock_tsc(); +} + static u64 read_hv_sched_clock_tsc(void) { - return read_hv_clock_tsc(NULL) - hv_sched_clock_offset; + return read_hv_clock_tsc() - hv_sched_clock_offset; +} + +static void suspend_hv_clock_tsc(struct clocksource *arg) +{ + u64 tsc_msr; + + /* Disable the TSC page */ + hv_get_reference_tsc(tsc_msr); + tsc_msr &= ~BIT_ULL(0); + hv_set_reference_tsc(tsc_msr); +} + + +static void resume_hv_clock_tsc(struct clocksource *arg) +{ + phys_addr_t phys_addr = virt_to_phys(&tsc_pg); + u64 tsc_msr; + + /* Re-enable the TSC page */ + hv_get_reference_tsc(tsc_msr); + tsc_msr &= GENMASK_ULL(11, 0); + tsc_msr |= BIT_ULL(0) | (u64)phys_addr; + hv_set_reference_tsc(tsc_msr); } static struct clocksource hyperv_cs_tsc = { .name = "hyperv_clocksource_tsc_page", - .rating = 400, - .read = read_hv_clock_tsc, + .rating = 250, + .read = read_hv_clock_tsc_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .suspend= suspend_hv_clock_tsc, + .resume = resume_hv_clock_tsc, }; -static u64 notrace read_hv_clock_msr(struct clocksource *arg) +static u64 notrace read_hv_clock_msr(void) { u64 current_tick; /* @@ -350,15 +391,20 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg) return current_tick; } +static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) +{ + return read_hv_clock_msr(); +} + static u64 read_hv_sched_clock_msr(void) { - return read_hv_clock_msr(NULL) - hv_sched_clock_offset; + return read_hv_clock_msr() - hv_sched_clock_offset; } static struct clocksource hyperv_cs_msr = { .name = "hyperv_clocksource_msr", - .rating = 400, - .read = read_hv_clock_msr, + .rating = 250, + .read = read_hv_clock_msr_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -371,8 +417,8 @@ static bool __init hv_init_tsc_clocksource(void) if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) return false; - hyperv_cs = &hyperv_cs_tsc; - phys_addr = virt_to_phys(&tsc_pg); + hv_read_reference_counter = read_hv_clock_tsc; + phys_addr = virt_to_phys(hv_get_tsc_page()); /* * The Hyper-V TLFS specifies to preserve the value of reserved @@ -389,7 +435,7 @@ static bool __init hv_init_tsc_clocksource(void) hv_set_clocksource_vdso(hyperv_cs_tsc); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); - hv_sched_clock_offset = hyperv_cs->read(hyperv_cs); + hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_tsc); return true; @@ -411,10 +457,10 @@ void __init hv_init_clocksource(void) if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)) return; - hyperv_cs = &hyperv_cs_msr; + hv_read_reference_counter = read_hv_clock_msr; clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); - hv_sched_clock_offset = hyperv_cs->read(hyperv_cs); + hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_msr); } EXPORT_SYMBOL_GPL(hv_init_clocksource); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 9cde50cb3220..12ac75f7571f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -905,7 +905,7 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt) return -ENXIO; } - cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); + cmt->mapbase = ioremap(mem->start, resource_size(mem)); if (cmt->mapbase == NULL) { dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); return -ENXIO; diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 64526e50d471..bfccb31e94ad 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -377,7 +377,7 @@ static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) return -ENXIO; } - mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); + mtu->mapbase = ioremap(res->start, resource_size(res)); if (mtu->mapbase == NULL) return -ENXIO; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index d49690d15536..d41df9ba3725 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -486,7 +486,7 @@ static int sh_tmu_map_memory(struct sh_tmu_device *tmu) return -ENXIO; } - tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); + tmu->mapbase = ioremap(res->start, resource_size(res)); if (tmu->mapbase == NULL) return -ENXIO; diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c index 88fe2e9ba9a3..38858e141731 100644 --- a/drivers/clocksource/timer-cadence-ttc.c +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -15,6 +15,8 @@ #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/sched_clock.h> +#include <linux/module.h> +#include <linux/of_platform.h> /* * This driver configures the 2 16/32-bit count-up timers as follows: @@ -464,13 +466,7 @@ static int __init ttc_setup_clockevent(struct clk *clk, return 0; } -/** - * ttc_timer_init - Initialize the timer - * - * Initializes the timer hardware and register the clock source and clock event - * timers with Linux kernal timer framework - */ -static int __init ttc_timer_init(struct device_node *timer) +static int __init ttc_timer_probe(struct platform_device *pdev) { unsigned int irq; void __iomem *timer_baseaddr; @@ -478,6 +474,7 @@ static int __init ttc_timer_init(struct device_node *timer) static int initialized; int clksel, ret; u32 timer_width = 16; + struct device_node *timer = pdev->dev.of_node; if (initialized) return 0; @@ -532,4 +529,17 @@ static int __init ttc_timer_init(struct device_node *timer) return 0; } -TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init); +static const struct of_device_id ttc_timer_of_match[] = { + {.compatible = "cdns,ttc"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, ttc_timer_of_match); + +static struct platform_driver ttc_timer_driver = { + .driver = { + .name = "cdns_ttc_timer", + .of_match_table = ttc_timer_of_match, + }, +}; +builtin_platform_driver_probe(ttc_timer_driver, ttc_timer_probe); diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c new file mode 100644 index 000000000000..bd63d3484838 --- /dev/null +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * 64-bit Periodic Interval Timer driver + * + * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries + * + * Author: Claudiu Beznea <claudiu.beznea@microchip.com> + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> +#include <linux/slab.h> + +#define MCHP_PIT64B_CR 0x00 /* Control Register */ +#define MCHP_PIT64B_CR_START BIT(0) +#define MCHP_PIT64B_CR_SWRST BIT(8) + +#define MCHP_PIT64B_MR 0x04 /* Mode Register */ +#define MCHP_PIT64B_MR_CONT BIT(0) +#define MCHP_PIT64B_MR_ONE_SHOT (0) +#define MCHP_PIT64B_MR_SGCLK BIT(3) +#define MCHP_PIT64B_MR_PRES GENMASK(11, 8) + +#define MCHP_PIT64B_LSB_PR 0x08 /* LSB Period Register */ + +#define MCHP_PIT64B_MSB_PR 0x0C /* MSB Period Register */ + +#define MCHP_PIT64B_IER 0x10 /* Interrupt Enable Register */ +#define MCHP_PIT64B_IER_PERIOD BIT(0) + +#define MCHP_PIT64B_ISR 0x1C /* Interrupt Status Register */ + +#define MCHP_PIT64B_TLSBR 0x20 /* Timer LSB Register */ + +#define MCHP_PIT64B_TMSBR 0x24 /* Timer MSB Register */ + +#define MCHP_PIT64B_PRES_MAX 0x10 +#define MCHP_PIT64B_LSBMASK GENMASK_ULL(31, 0) +#define MCHP_PIT64B_PRES_TO_MODE(p) (MCHP_PIT64B_MR_PRES & ((p) << 8)) +#define MCHP_PIT64B_MODE_TO_PRES(m) ((MCHP_PIT64B_MR_PRES & (m)) >> 8) +#define MCHP_PIT64B_DEF_CS_FREQ 5000000UL /* 5 MHz */ +#define MCHP_PIT64B_DEF_CE_FREQ 32768 /* 32 KHz */ + +#define MCHP_PIT64B_NAME "pit64b" + +/** + * struct mchp_pit64b_timer - PIT64B timer data structure + * @base: base address of PIT64B hardware block + * @pclk: PIT64B's peripheral clock + * @gclk: PIT64B's generic clock + * @mode: precomputed value for mode register + */ +struct mchp_pit64b_timer { + void __iomem *base; + struct clk *pclk; + struct clk *gclk; + u32 mode; +}; + +/** + * mchp_pit64b_clkevt - PIT64B clockevent data structure + * @timer: PIT64B timer + * @clkevt: clockevent + */ +struct mchp_pit64b_clkevt { + struct mchp_pit64b_timer timer; + struct clock_event_device clkevt; +}; + +#define to_mchp_pit64b_timer(x) \ + ((struct mchp_pit64b_timer *)container_of(x,\ + struct mchp_pit64b_clkevt, clkevt)) + +/* Base address for clocksource timer. */ +static void __iomem *mchp_pit64b_cs_base; +/* Default cycles for clockevent timer. */ +static u64 mchp_pit64b_ce_cycles; + +static inline u64 mchp_pit64b_cnt_read(void __iomem *base) +{ + unsigned long flags; + u32 low, high; + + raw_local_irq_save(flags); + + /* + * When using a 64 bit period TLSB must be read first, followed by the + * read of TMSB. This sequence generates an atomic read of the 64 bit + * timer value whatever the lapse of time between the accesses. + */ + low = readl_relaxed(base + MCHP_PIT64B_TLSBR); + high = readl_relaxed(base + MCHP_PIT64B_TMSBR); + + raw_local_irq_restore(flags); + + return (((u64)high << 32) | low); +} + +static inline void mchp_pit64b_reset(struct mchp_pit64b_timer *timer, + u64 cycles, u32 mode, u32 irqs) +{ + u32 low, high; + + low = cycles & MCHP_PIT64B_LSBMASK; + high = cycles >> 32; + + writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + writel_relaxed(mode | timer->mode, timer->base + MCHP_PIT64B_MR); + writel_relaxed(high, timer->base + MCHP_PIT64B_MSB_PR); + writel_relaxed(low, timer->base + MCHP_PIT64B_LSB_PR); + writel_relaxed(irqs, timer->base + MCHP_PIT64B_IER); + writel_relaxed(MCHP_PIT64B_CR_START, timer->base + MCHP_PIT64B_CR); +} + +static u64 mchp_pit64b_clksrc_read(struct clocksource *cs) +{ + return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); +} + +static u64 mchp_pit64b_sched_read_clk(void) +{ + return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); +} + +static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + + return 0; +} + +static int mchp_pit64b_clkevt_set_periodic(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + mchp_pit64b_reset(timer, mchp_pit64b_ce_cycles, MCHP_PIT64B_MR_CONT, + MCHP_PIT64B_IER_PERIOD); + + return 0; +} + +static int mchp_pit64b_clkevt_set_next_event(unsigned long evt, + struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + mchp_pit64b_reset(timer, evt, MCHP_PIT64B_MR_ONE_SHOT, + MCHP_PIT64B_IER_PERIOD); + + return 0; +} + +static void mchp_pit64b_clkevt_suspend(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + writel_relaxed(MCHP_PIT64B_CR_SWRST, timer->base + MCHP_PIT64B_CR); + if (timer->mode & MCHP_PIT64B_MR_SGCLK) + clk_disable_unprepare(timer->gclk); + clk_disable_unprepare(timer->pclk); +} + +static void mchp_pit64b_clkevt_resume(struct clock_event_device *cedev) +{ + struct mchp_pit64b_timer *timer = to_mchp_pit64b_timer(cedev); + + clk_prepare_enable(timer->pclk); + if (timer->mode & MCHP_PIT64B_MR_SGCLK) + clk_prepare_enable(timer->gclk); +} + +static irqreturn_t mchp_pit64b_interrupt(int irq, void *dev_id) +{ + struct mchp_pit64b_clkevt *irq_data = dev_id; + + /* Need to clear the interrupt. */ + readl_relaxed(irq_data->timer.base + MCHP_PIT64B_ISR); + + irq_data->clkevt.event_handler(&irq_data->clkevt); + + return IRQ_HANDLED; +} + +static void __init mchp_pit64b_pres_compute(u32 *pres, u32 clk_rate, + u32 max_rate) +{ + u32 tmp; + + for (*pres = 0; *pres < MCHP_PIT64B_PRES_MAX; (*pres)++) { + tmp = clk_rate / (*pres + 1); + if (tmp <= max_rate) + break; + } + + /* Use the bigest prescaler if we didn't match one. */ + if (*pres == MCHP_PIT64B_PRES_MAX) + *pres = MCHP_PIT64B_PRES_MAX - 1; +} + +/** + * mchp_pit64b_init_mode - prepare PIT64B mode register value to be used at + * runtime; this includes prescaler and SGCLK bit + * + * PIT64B timer may be fed by gclk or pclk. When gclk is used its rate has to + * be at least 3 times lower that pclk's rate. pclk rate is fixed, gclk rate + * could be changed via clock APIs. The chosen clock (pclk or gclk) could be + * divided by the internal PIT64B's divider. + * + * This function, first tries to use GCLK by requesting the desired rate from + * PMC and then using the internal PIT64B prescaler, if any, to reach the + * requested rate. If PCLK/GCLK < 3 (condition requested by PIT64B hardware) + * then the function falls back on using PCLK as clock source for PIT64B timer + * choosing the highest prescaler in case it doesn't locate one to match the + * requested frequency. + * + * Below is presented the PIT64B block in relation with PMC: + * + * PIT64B + * PMC +------------------------------------+ + * +----+ | +-----+ | + * | |-->gclk -->|-->| | +---------+ +-----+ | + * | | | | MUX |--->| Divider |->|timer| | + * | |-->pclk -->|-->| | +---------+ +-----+ | + * +----+ | +-----+ | + * | ^ | + * | sel | + * +------------------------------------+ + * + * Where: + * - gclk rate <= pclk rate/3 + * - gclk rate could be requested from PMC + * - pclk rate is fixed (cannot be requested from PMC) + */ +static int __init mchp_pit64b_init_mode(struct mchp_pit64b_timer *timer, + unsigned long max_rate) +{ + unsigned long pclk_rate, diff = 0, best_diff = ULONG_MAX; + long gclk_round = 0; + u32 pres, best_pres = 0; + + pclk_rate = clk_get_rate(timer->pclk); + if (!pclk_rate) + return -EINVAL; + + timer->mode = 0; + + /* Try using GCLK. */ + gclk_round = clk_round_rate(timer->gclk, max_rate); + if (gclk_round < 0) + goto pclk; + + if (pclk_rate / gclk_round < 3) + goto pclk; + + mchp_pit64b_pres_compute(&pres, gclk_round, max_rate); + best_diff = abs(gclk_round / (pres + 1) - max_rate); + best_pres = pres; + + if (!best_diff) { + timer->mode |= MCHP_PIT64B_MR_SGCLK; + goto done; + } + +pclk: + /* Check if requested rate could be obtained using PCLK. */ + mchp_pit64b_pres_compute(&pres, pclk_rate, max_rate); + diff = abs(pclk_rate / (pres + 1) - max_rate); + + if (best_diff > diff) { + /* Use PCLK. */ + best_pres = pres; + } else { + /* Use GCLK. */ + timer->mode |= MCHP_PIT64B_MR_SGCLK; + clk_set_rate(timer->gclk, gclk_round); + } + +done: + timer->mode |= MCHP_PIT64B_PRES_TO_MODE(best_pres); + + pr_info("PIT64B: using clk=%s with prescaler %u, freq=%lu [Hz]\n", + timer->mode & MCHP_PIT64B_MR_SGCLK ? "gclk" : "pclk", best_pres, + timer->mode & MCHP_PIT64B_MR_SGCLK ? + gclk_round / (best_pres + 1) : pclk_rate / (best_pres + 1)); + + return 0; +} + +static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer, + u32 clk_rate) +{ + int ret; + + mchp_pit64b_reset(timer, ULLONG_MAX, MCHP_PIT64B_MR_CONT, 0); + + mchp_pit64b_cs_base = timer->base; + + ret = clocksource_mmio_init(timer->base, MCHP_PIT64B_NAME, clk_rate, + 210, 64, mchp_pit64b_clksrc_read); + if (ret) { + pr_debug("clksrc: Failed to register PIT64B clocksource!\n"); + + /* Stop timer. */ + writel_relaxed(MCHP_PIT64B_CR_SWRST, + timer->base + MCHP_PIT64B_CR); + + return ret; + } + + sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate); + + return 0; +} + +static int __init mchp_pit64b_init_clkevt(struct mchp_pit64b_timer *timer, + u32 clk_rate, u32 irq) +{ + struct mchp_pit64b_clkevt *ce; + int ret; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) + return -ENOMEM; + + mchp_pit64b_ce_cycles = DIV_ROUND_CLOSEST(clk_rate, HZ); + + ce->timer.base = timer->base; + ce->timer.pclk = timer->pclk; + ce->timer.gclk = timer->gclk; + ce->timer.mode = timer->mode; + ce->clkevt.name = MCHP_PIT64B_NAME; + ce->clkevt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; + ce->clkevt.rating = 150; + ce->clkevt.set_state_shutdown = mchp_pit64b_clkevt_shutdown; + ce->clkevt.set_state_periodic = mchp_pit64b_clkevt_set_periodic; + ce->clkevt.set_next_event = mchp_pit64b_clkevt_set_next_event; + ce->clkevt.suspend = mchp_pit64b_clkevt_suspend; + ce->clkevt.resume = mchp_pit64b_clkevt_resume; + ce->clkevt.cpumask = cpumask_of(0); + ce->clkevt.irq = irq; + + ret = request_irq(irq, mchp_pit64b_interrupt, IRQF_TIMER, + "pit64b_tick", ce); + if (ret) { + pr_debug("clkevt: Failed to setup PIT64B IRQ\n"); + kfree(ce); + return ret; + } + + clockevents_config_and_register(&ce->clkevt, clk_rate, 1, ULONG_MAX); + + return 0; +} + +static int __init mchp_pit64b_dt_init_timer(struct device_node *node, + bool clkevt) +{ + u32 freq = clkevt ? MCHP_PIT64B_DEF_CE_FREQ : MCHP_PIT64B_DEF_CS_FREQ; + struct mchp_pit64b_timer timer; + unsigned long clk_rate; + u32 irq = 0; + int ret; + + /* Parse DT node. */ + timer.pclk = of_clk_get_by_name(node, "pclk"); + if (IS_ERR(timer.pclk)) + return PTR_ERR(timer.pclk); + + timer.gclk = of_clk_get_by_name(node, "gclk"); + if (IS_ERR(timer.gclk)) + return PTR_ERR(timer.gclk); + + timer.base = of_iomap(node, 0); + if (!timer.base) + return -ENXIO; + + if (clkevt) { + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + ret = -ENODEV; + goto io_unmap; + } + } + + /* Initialize mode (prescaler + SGCK bit). To be used at runtime. */ + ret = mchp_pit64b_init_mode(&timer, freq); + if (ret) + goto irq_unmap; + + ret = clk_prepare_enable(timer.pclk); + if (ret) + goto irq_unmap; + + if (timer.mode & MCHP_PIT64B_MR_SGCLK) { + ret = clk_prepare_enable(timer.gclk); + if (ret) + goto pclk_unprepare; + + clk_rate = clk_get_rate(timer.gclk); + } else { + clk_rate = clk_get_rate(timer.pclk); + } + clk_rate = clk_rate / (MCHP_PIT64B_MODE_TO_PRES(timer.mode) + 1); + + if (clkevt) + ret = mchp_pit64b_init_clkevt(&timer, clk_rate, irq); + else + ret = mchp_pit64b_init_clksrc(&timer, clk_rate); + + if (ret) + goto gclk_unprepare; + + return 0; + +gclk_unprepare: + if (timer.mode & MCHP_PIT64B_MR_SGCLK) + clk_disable_unprepare(timer.gclk); +pclk_unprepare: + clk_disable_unprepare(timer.pclk); +irq_unmap: + irq_dispose_mapping(irq); +io_unmap: + iounmap(timer.base); + + return ret; +} + +static int __init mchp_pit64b_dt_init(struct device_node *node) +{ + static int inits; + + switch (inits++) { + case 0: + /* 1st request, register clockevent. */ + return mchp_pit64b_dt_init_timer(node, true); + case 1: + /* 2nd request, register clocksource. */ + return mchp_pit64b_dt_init_timer(node, false); + } + + /* The rest, don't care. */ + return -EINVAL; +} + +TIMER_OF_DECLARE(mchp_pit64b, "microchip,sam9x60-pit64b", mchp_pit64b_dt_init); diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 4e54856ce2a5..c4f15c4068c0 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs) return get_cycles64(); } -static u64 riscv_sched_clock(void) +static u64 notrace riscv_sched_clock(void) { return get_cycles64(); } diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 5394d9dbdfbc..269a994d6a99 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -780,7 +780,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) { unsigned long flags; struct omap_dm_timer *timer; - struct resource *mem, *irq; struct device *dev = &pdev->dev; const struct dmtimer_platform_data *pdata; int ret; @@ -796,24 +795,16 @@ static int omap_dm_timer_probe(struct platform_device *pdev) return -ENODEV; } - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (unlikely(!irq)) { - dev_err(dev, "%s: no IRQ resource.\n", __func__); - return -ENODEV; - } - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(!mem)) { - dev_err(dev, "%s: no memory resource.\n", __func__); - return -ENODEV; - } - timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL); if (!timer) return -ENOMEM; + timer->irq = platform_get_irq(pdev, 0); + if (timer->irq < 0) + return timer->irq; + timer->fclk = ERR_PTR(-ENODEV); - timer->io_base = devm_ioremap_resource(dev, mem); + timer->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(timer->io_base)) return PTR_ERR(timer->io_base); @@ -836,7 +827,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) if (pdata) timer->errata = pdata->timer_errata; - timer->irq = irq->start; timer->pdev = pdev; pm_runtime_enable(dev); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 77b0e5d0fb13..4f86ce2db34f 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct private_data *priv = policy->driver_data; + cpufreq_cpu_put(policy); + return brcm_avs_get_frequency(priv->base); } diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8d8da763adc5..a06777c35fc0 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -39,7 +39,7 @@ static struct cppc_cpudata **all_cpu_data; struct cppc_workaround_oem_info { - char oem_id[ACPI_OEM_ID_SIZE +1]; + char oem_id[ACPI_OEM_ID_SIZE + 1]; char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; u32 oem_revision; }; @@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void) for (i = 0; i < ARRAY_SIZE(wa_info); i++) { if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && - wa_info[i].oem_revision == tbl->oem_revision) + wa_info[i].oem_revision == tbl->oem_revision) { apply_hisi_workaround = true; + break; + } } + + acpi_put_table(tbl); } /* Callback function used to retrieve the max frequency from DMI */ diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index f1d170dcf4d3..f2ae9cd455c1 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "fsl,imx8mq", }, { .compatible = "fsl,imx8mm", }, { .compatible = "fsl,imx8mn", }, + { .compatible = "fsl,imx8mp", }, { .compatible = "marvell,armadaxp", }, @@ -121,6 +122,8 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "nvidia,tegra20", }, + { .compatible = "nvidia,tegra30", }, { .compatible = "nvidia,tegra124", }, { .compatible = "nvidia,tegra210", }, diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 85a6efd6b68f..6cb8193421ea 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (ret) return ret; - if (of_machine_is_compatible("fsl,imx8mn")) + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT; else @@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (of_machine_is_compatible("fsl,imx8mm") || of_machine_is_compatible("fsl,imx8mq")) speed_grade = 1; - if (of_machine_is_compatible("fsl,imx8mn")) + if (of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) speed_grade = 0xb; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d2fa3e9ccd97..ad6a17cf0011 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -172,7 +172,7 @@ struct vid_data { /** * struct global_params - Global parameters, mostly tunable via sysfs. * @no_turbo: Whether or not to use turbo P-states. - * @turbo_disabled: Whethet or not turbo P-states are available at all, + * @turbo_disabled: Whether or not turbo P-states are available at all, * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index cb74bdc5baaa..70ad8fe1d78b 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = { static int kirkwood_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - struct resource *res; int err; priv.dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv.base = devm_ioremap_resource(&pdev->dev, res); + priv.base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv.base)) return PTR_ERR(priv.base); diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index e9caa9586982..909f40fbcde2 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void) u32 cpu_freq; spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG(0); - LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */ + cpu_freq = readl(LOONGSON_CHIPCFG); + /* Put CPU into wait mode */ + writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG); + /* Restore CPU state */ + writel(cpu_freq, LOONGSON_CHIPCFG); spin_unlock_irqrestore(&loongson2_wait_lock, flags); local_irq_enable(); } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index fdc767fdbe6a..89d4fa8b65e9 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -445,7 +445,7 @@ static int __init pcc_cpufreq_probe(void) goto out_free; } - pcch_virt_addr = ioremap_nocache(mem_resource->minimum, + pcch_virt_addr = ioremap(mem_resource->minimum, mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 106910351c41..5c221bc90210 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; int ret; + struct cpufreq_policy *policy; mutex_lock(&cpufreq_lock); @@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this, */ if (s3c_freq->is_dvs) { pr_debug("cpufreq: leave dvs on reboot\n"); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0); + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0); + cpufreq_cpu_put(policy); + if (ret < 0) return NOTIFY_BAD; } diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 5d10030f2560..e84281e2561d 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr) { int ret; + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(0); + if (!policy) { + pr_debug("cpufreq: get no policy for cpu0\n"); + return NOTIFY_BAD; + } + + ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0); + cpufreq_cpu_put(policy); - ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0); if (ret < 0) return NOTIFY_BAD; diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index bcecb068b51b..2e233ad72758 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) { struct tegra186_cpufreq_data *data; struct tegra_bpmp *bpmp; - struct resource *res; unsigned int i = 0, err; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); @@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) if (IS_ERR(bpmp)) return PTR_ERR(bpmp); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) { err = PTR_ERR(data->regs); goto put_bpmp; diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 506e3f2bf53a..83c85d3d67e3 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -434,7 +434,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) if (cur_cluster < MAX_CLUSTERS) { int cpu; - cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus); for_each_cpu(cpu, policy->cpus) per_cpu(physical_cluster, cpu) = cur_cluster; diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index a224d33dda7f..62272ecfa771 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" - depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS + depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST depends on MCPM && !ARM64 select ARM_CPU_SUSPEND select CPU_IDLE_MULTIPLE_DRIVERS @@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE config ARM_KIRKWOOD_CPUIDLE bool "CPU Idle Driver for Marvell Kirkwood SoCs" - depends on MACH_KIRKWOOD && !ARM64 + depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64 help This adds the CPU Idle driver for Marvell Kirkwood SoCs. config ARM_ZYNQ_CPUIDLE bool "CPU Idle Driver for Xilinx Zynq processors" - depends on ARCH_ZYNQ && !ARM64 + depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Xilinx Zynq processors. @@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE config ARM_AT91_CPUIDLE bool "Cpu Idle Driver for the AT91 processors" default y - depends on ARCH_AT91 && !ARM64 + depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle for AT91 processors. config ARM_EXYNOS_CPUIDLE bool "Cpu Idle Driver for the Exynos processors" - depends on ARCH_EXYNOS && !ARM64 + depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64 select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP help Select this to enable cpuidle for Exynos processors. config ARM_MVEBU_V7_CPUIDLE bool "CPU Idle Driver for mvebu v7 family processors" - depends on ARCH_MVEBU && !ARM64 + depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Armada 370, 38x and XP processors. diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index b607278df25b..04003b90dc49 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -89,6 +89,7 @@ * @coupled_cpus: mask of cpus that are part of the coupled set * @requested_state: array of requested states for cpus in the coupled set * @ready_waiting_counts: combined count of cpus in ready or waiting loops + * @abort_barrier: synchronisation point for abort cases * @online_count: count of cpus that are online * @refcnt: reference count of cpuidle devices that are using this struct * @prevent: flag to prevent coupled idle while a cpu is hotplugging @@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu) /** * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting - * @dev: struct cpuidle_device for this cpu + * @this_cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Calls cpuidle_coupled_poke on all other online cpus. @@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu, /** * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * @@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu, /** * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Removes the requested idle state for the specified cpuidle device. @@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) /** * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed - * @cpu - this cpu + * @cpu: this cpu * * Turns on interrupts and spins until any outstanding poke interrupts have * been processed and the poke bit has been cleared. diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c index 6e36740f5719..fc22c59b6c73 100644 --- a/drivers/cpuidle/cpuidle-clps711x.c +++ b/drivers/cpuidle/cpuidle-clps711x.c @@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = { static int __init clps711x_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - clps711x_halt = devm_ioremap_resource(&pdev->dev, res); + clps711x_halt = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(clps711x_halt)) return PTR_ERR(clps711x_halt); diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c index d23d8f468c12..511c4f46027a 100644 --- a/drivers/cpuidle/cpuidle-kirkwood.c +++ b/drivers/cpuidle/cpuidle-kirkwood.c @@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = { /* Initialize CPU idle by registering the idle states */ static int kirkwood_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ddr_operation_base = devm_ioremap_resource(&pdev->dev, res); + ddr_operation_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ddr_operation_base)) return PTR_ERR(ddr_operation_base); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 0005be5ea2b4..de81298051b3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns) * cpuidle_find_deepest_state - Find the deepest available idle state. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. + * @latency_limit_ns: Idle state exit latency limit + * + * Return: the index of the deepest available idle state. */ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, @@ -381,7 +384,8 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv, if (dev->states_usage[i].disable) continue; - limit_ns = (u64)drv->states[i].target_residency_ns; + limit_ns = drv->states[i].target_residency_ns; + break; } dev->poll_limit_ns = limit_ns; @@ -571,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) if (!try_module_get(drv->owner)) return -EINVAL; - for (i = 0; i < drv->state_count; i++) + for (i = 0; i < drv->state_count; i++) { if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER; + if (drv->states[i].flags & CPUIDLE_FLAG_OFF) + dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; + } + per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index c76423aaef4d..4070e573bf43 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) { int i; - drv->refcnt = 0; - /* * Use all possible CPUs as the default, because if the kernel boots * with some CPUs offline and then we online one of them, the CPU @@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) */ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) { - if (WARN_ON(drv->refcnt > 0)) - return; - if (drv->bctimer) { drv->bctimer = 0; on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, @@ -350,47 +345,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); /** - * cpuidle_driver_ref - get a reference to the driver. - * - * Increment the reference counter of the cpuidle driver associated with - * the current CPU. - * - * Returns a pointer to the driver, or NULL if the current CPU has no driver. - */ -struct cpuidle_driver *cpuidle_driver_ref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv) - drv->refcnt++; - - spin_unlock(&cpuidle_driver_lock); - return drv; -} - -/** - * cpuidle_driver_unref - puts down the refcount for the driver - * - * Decrement the reference counter of the cpuidle driver associated with - * the current CPU. - */ -void cpuidle_driver_unref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv && !WARN_ON(drv->refcnt <= 0)) - drv->refcnt--; - - spin_unlock(&cpuidle_driver_lock); -} - -/** * cpuidle_driver_state_disabled - Disable or enable an idle state * @drv: cpuidle driver owning the state * @idx: State index @@ -403,6 +357,13 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, mutex_lock(&cpuidle_lock); + spin_lock(&cpuidle_driver_lock); + + if (!drv->cpumask) { + drv->states[idx].flags |= CPUIDLE_FLAG_UNUSABLE; + goto unlock; + } + for_each_cpu(cpu, drv->cpumask) { struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); @@ -415,5 +376,8 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER; } +unlock: + spin_unlock(&cpuidle_driver_lock); + mutex_unlock(&cpuidle_lock); } diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index de7e706efd46..6deaaf5f05b5 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * pattern detection. */ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns; - if (cpu_data->interval_idx > INTERVALS) + if (cpu_data->interval_idx >= INTERVALS) cpu_data->interval_idx = 0; } diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 38ef770be90d..cdeedbf02646 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = { /** * cpuidle_add_interface - add CPU global sysfs attributes + * @dev: the target device */ int cpuidle_add_interface(struct device *dev) { @@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev) /** * cpuidle_remove_interface - remove CPU global sysfs attributes + * @dev: the target device */ void cpuidle_remove_interface(struct device *dev) { @@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state, return size; } +static ssize_t show_state_default_status(struct cpuidle_state *state, + struct cpuidle_state_usage *state_usage, + char *buf) +{ + return sprintf(buf, "%s\n", + state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); +} + define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); define_one_state_ro(latency, show_state_exit_latency); @@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); define_one_state_ro(above, show_state_above); define_one_state_ro(below, show_state_below); +define_one_state_ro(default_status, show_state_default_status); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, @@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = { &attr_disable.attr, &attr_above.attr, &attr_below.attr, + &attr_default_status.attr, NULL }; @@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = { /** * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) { @@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) /** * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) { diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 4e7323884ae3..354836468c5d 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2507,7 +2507,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) addr = pci_resource_start(pdev, i); size = pci_resource_len(pdev, i); - dev->bar[i] = ioremap_nocache(addr, size); + dev->bar[i] = ioremap(addr, size); if (!dev->bar[i]) { err = -ENOMEM; goto err_out_unmap_bars; diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index defe1d438710..0b1df12e0f21 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -77,13 +77,12 @@ config DEVFREQ_GOV_PASSIVE comment "DEVFREQ Drivers" config ARM_EXYNOS_BUS_DEVFREQ - tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver" + tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver" depends on ARCH_EXYNOS || COMPILE_TEST select DEVFREQ_GOV_SIMPLE_ONDEMAND select DEVFREQ_GOV_PASSIVE select DEVFREQ_EVENT_EXYNOS_PPMU select PM_DEVFREQ_EVENT - select PM_OPP help This adds the common DEVFREQ driver for Exynos Memory bus. Exynos Memory bus has one more group of memory bus (e.g, MIF and INT block). @@ -92,13 +91,23 @@ config ARM_EXYNOS_BUS_DEVFREQ and adjusts the operating frequencies and voltages with OPP support. This does not yet operate with optimal voltages. +config ARM_IMX8M_DDRC_DEVFREQ + tristate "i.MX8M DDRC DEVFREQ Driver" + depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \ + (COMPILE_TEST && HAVE_ARM_SMCCC) + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select DEVFREQ_GOV_USERSPACE + help + This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows + adjusting DRAM frequency. + config ARM_TEGRA_DEVFREQ tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver" depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \ ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \ ARCH_TEGRA_210_SOC || \ COMPILE_TEST - select PM_OPP + depends on COMMON_CLK help This adds the DEVFREQ driver for the Tegra family of SoCs. It reads ACTMON counters of memory controllers and adjusts the @@ -109,7 +118,6 @@ config ARM_TEGRA20_DEVFREQ depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST depends on COMMON_CLK select DEVFREQ_GOV_SIMPLE_ONDEMAND - select PM_OPP help This adds the DEVFREQ driver for the Tegra20 family of SoCs. It reads Memory Controller counters and adjusts the operating @@ -117,15 +125,15 @@ config ARM_TEGRA20_DEVFREQ config ARM_RK3399_DMC_DEVFREQ tristate "ARM RK3399 DMC DEVFREQ Driver" - depends on ARCH_ROCKCHIP + depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \ + (COMPILE_TEST && HAVE_ARM_SMCCC) select DEVFREQ_EVENT_ROCKCHIP_DFI select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ_EVENT - select PM_OPP help - This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). - It sets the frequency for the memory controller and reads the usage counts - from hardware. + This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). + It sets the frequency for the memory controller and reads the usage counts + from hardware. source "drivers/devfreq/event/Kconfig" diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 338ae8440db6..3eb4d5e6635c 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o # DEVFREQ Drivers obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o +obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c index 3dc5fd6065a3..8c31b0f2e28f 100644 --- a/drivers/devfreq/devfreq-event.c +++ b/drivers/devfreq/devfreq-event.c @@ -346,9 +346,9 @@ EXPORT_SYMBOL_GPL(devfreq_event_add_edev); /** * devfreq_event_remove_edev() - Remove the devfreq-event device registered. - * @dev : the devfreq-event device + * @edev : the devfreq-event device * - * Note that this function remove the registered devfreq-event device. + * Note that this function removes the registered devfreq-event device. */ int devfreq_event_remove_edev(struct devfreq_event_dev *edev) { diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 425149e8bab0..cceee8bc3c2f 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/sched.h> +#include <linux/debugfs.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> @@ -24,12 +25,16 @@ #include <linux/printk.h> #include <linux/hrtimer.h> #include <linux/of.h> +#include <linux/pm_qos.h> #include "governor.h" #define CREATE_TRACE_POINTS #include <trace/events/devfreq.h> +#define HZ_PER_KHZ 1000 + static struct class *devfreq_class; +static struct dentry *devfreq_debugfs; /* * devfreq core provides delayed work based load monitoring helper @@ -99,6 +104,54 @@ static unsigned long find_available_max_freq(struct devfreq *devfreq) } /** + * get_freq_range() - Get the current freq range + * @devfreq: the devfreq instance + * @min_freq: the min frequency + * @max_freq: the max frequency + * + * This takes into consideration all constraints. + */ +static void get_freq_range(struct devfreq *devfreq, + unsigned long *min_freq, + unsigned long *max_freq) +{ + unsigned long *freq_table = devfreq->profile->freq_table; + s32 qos_min_freq, qos_max_freq; + + lockdep_assert_held(&devfreq->lock); + + /* + * Initialize minimum/maximum frequency from freq table. + * The devfreq drivers can initialize this in either ascending or + * descending order and devfreq core supports both. + */ + if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) { + *min_freq = freq_table[0]; + *max_freq = freq_table[devfreq->profile->max_state - 1]; + } else { + *min_freq = freq_table[devfreq->profile->max_state - 1]; + *max_freq = freq_table[0]; + } + + /* Apply constraints from PM QoS */ + qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent, + DEV_PM_QOS_MIN_FREQUENCY); + qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent, + DEV_PM_QOS_MAX_FREQUENCY); + *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq); + if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE) + *max_freq = min(*max_freq, + (unsigned long)HZ_PER_KHZ * qos_max_freq); + + /* Apply constraints from OPP interface */ + *min_freq = max(*min_freq, devfreq->scaling_min_freq); + *max_freq = min(*max_freq, devfreq->scaling_max_freq); + + if (*min_freq > *max_freq) + *min_freq = *max_freq; +} + +/** * devfreq_get_freq_level() - Lookup freq_table for the frequency * @devfreq: the devfreq instance * @freq: the target frequency @@ -158,10 +211,10 @@ static int set_freq_table(struct devfreq *devfreq) int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) { int lev, prev_lev, ret = 0; - unsigned long cur_time; + u64 cur_time; lockdep_assert_held(&devfreq->lock); - cur_time = jiffies; + cur_time = get_jiffies_64(); /* Immediately exit if previous_freq is not initialized yet. */ if (!devfreq->previous_freq) @@ -173,8 +226,8 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) goto out; } - devfreq->time_in_state[prev_lev] += - cur_time - devfreq->last_stat_updated; + devfreq->stats.time_in_state[prev_lev] += + cur_time - devfreq->stats.last_update; lev = devfreq_get_freq_level(devfreq, freq); if (lev < 0) { @@ -183,13 +236,13 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) } if (lev != prev_lev) { - devfreq->trans_table[(prev_lev * - devfreq->profile->max_state) + lev]++; - devfreq->total_trans++; + devfreq->stats.trans_table[ + (prev_lev * devfreq->profile->max_state) + lev]++; + devfreq->stats.total_trans++; } out: - devfreq->last_stat_updated = cur_time; + devfreq->stats.last_update = cur_time; return ret; } EXPORT_SYMBOL(devfreq_update_status); @@ -351,16 +404,7 @@ int update_devfreq(struct devfreq *devfreq) err = devfreq->governor->get_target_freq(devfreq, &freq); if (err) return err; - - /* - * Adjust the frequency with user freq, QoS and available freq. - * - * List from the highest priority - * max_freq - * min_freq - */ - max_freq = min(devfreq->scaling_max_freq, devfreq->max_freq); - min_freq = max(devfreq->scaling_min_freq, devfreq->min_freq); + get_freq_range(devfreq, &min_freq, &max_freq); if (freq < min_freq) { freq = min_freq; @@ -493,7 +537,7 @@ void devfreq_monitor_resume(struct devfreq *devfreq) msecs_to_jiffies(devfreq->profile->polling_ms)); out_update: - devfreq->last_stat_updated = jiffies; + devfreq->stats.last_update = get_jiffies_64(); devfreq->stop_polling = false; if (devfreq->profile->get_cur_freq && @@ -568,26 +612,69 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, void *devp) { struct devfreq *devfreq = container_of(nb, struct devfreq, nb); - int ret; + int err = -EINVAL; mutex_lock(&devfreq->lock); devfreq->scaling_min_freq = find_available_min_freq(devfreq); - if (!devfreq->scaling_min_freq) { - mutex_unlock(&devfreq->lock); - return -EINVAL; - } + if (!devfreq->scaling_min_freq) + goto out; devfreq->scaling_max_freq = find_available_max_freq(devfreq); if (!devfreq->scaling_max_freq) { - mutex_unlock(&devfreq->lock); - return -EINVAL; + devfreq->scaling_max_freq = ULONG_MAX; + goto out; } - ret = update_devfreq(devfreq); + err = update_devfreq(devfreq); + +out: mutex_unlock(&devfreq->lock); + if (err) + dev_err(devfreq->dev.parent, + "failed to update frequency from OPP notifier (%d)\n", + err); - return ret; + return NOTIFY_OK; +} + +/** + * qos_notifier_call() - Common handler for QoS constraints. + * @devfreq: the devfreq instance. + */ +static int qos_notifier_call(struct devfreq *devfreq) +{ + int err; + + mutex_lock(&devfreq->lock); + err = update_devfreq(devfreq); + mutex_unlock(&devfreq->lock); + if (err) + dev_err(devfreq->dev.parent, + "failed to update frequency from PM QoS (%d)\n", + err); + + return NOTIFY_OK; +} + +/** + * qos_min_notifier_call() - Callback for QoS min_freq changes. + * @nb: Should be devfreq->nb_min + */ +static int qos_min_notifier_call(struct notifier_block *nb, + unsigned long val, void *ptr) +{ + return qos_notifier_call(container_of(nb, struct devfreq, nb_min)); +} + +/** + * qos_max_notifier_call() - Callback for QoS max_freq changes. + * @nb: Should be devfreq->nb_max + */ +static int qos_max_notifier_call(struct notifier_block *nb, + unsigned long val, void *ptr) +{ + return qos_notifier_call(container_of(nb, struct devfreq, nb_max)); } /** @@ -599,16 +686,36 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, static void devfreq_dev_release(struct device *dev) { struct devfreq *devfreq = to_devfreq(dev); + int err; mutex_lock(&devfreq_list_lock); - if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { - mutex_unlock(&devfreq_list_lock); - dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); - return; - } list_del(&devfreq->node); mutex_unlock(&devfreq_list_lock); + err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max, + DEV_PM_QOS_MAX_FREQUENCY); + if (err && err != -ENOENT) + dev_warn(dev->parent, + "Failed to remove max_freq notifier: %d\n", err); + err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min, + DEV_PM_QOS_MIN_FREQUENCY); + if (err && err != -ENOENT) + dev_warn(dev->parent, + "Failed to remove min_freq notifier: %d\n", err); + + if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) { + err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req); + if (err) + dev_warn(dev->parent, + "Failed to remove max_freq request: %d\n", err); + } + if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) { + err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req); + if (err) + dev_warn(dev->parent, + "Failed to remove min_freq request: %d\n", err); + } + if (devfreq->profile->exit) devfreq->profile->exit(devfreq->dev.parent); @@ -660,6 +767,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->dev.parent = dev; devfreq->dev.class = devfreq_class; devfreq->dev.release = devfreq_dev_release; + INIT_LIST_HEAD(&devfreq->node); devfreq->profile = profile; strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); devfreq->previous_freq = profile->initial_freq; @@ -681,7 +789,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err = -EINVAL; goto err_dev; } - devfreq->min_freq = devfreq->scaling_min_freq; devfreq->scaling_max_freq = find_available_max_freq(devfreq); if (!devfreq->scaling_max_freq) { @@ -689,7 +796,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err = -EINVAL; goto err_dev; } - devfreq->max_freq = devfreq->scaling_max_freq; devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); atomic_set(&devfreq->suspend_count, 0); @@ -703,33 +809,56 @@ struct devfreq *devfreq_add_device(struct device *dev, goto err_out; } - devfreq->trans_table = devm_kzalloc(&devfreq->dev, + devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev, array3_size(sizeof(unsigned int), devfreq->profile->max_state, devfreq->profile->max_state), GFP_KERNEL); - if (!devfreq->trans_table) { + if (!devfreq->stats.trans_table) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } - devfreq->time_in_state = devm_kcalloc(&devfreq->dev, + devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev, devfreq->profile->max_state, - sizeof(unsigned long), + sizeof(*devfreq->stats.time_in_state), GFP_KERNEL); - if (!devfreq->time_in_state) { + if (!devfreq->stats.time_in_state) { mutex_unlock(&devfreq->lock); err = -ENOMEM; goto err_devfreq; } - devfreq->last_stat_updated = jiffies; + devfreq->stats.total_trans = 0; + devfreq->stats.last_update = get_jiffies_64(); srcu_init_notifier_head(&devfreq->transition_notifier_list); mutex_unlock(&devfreq->lock); + err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req, + DEV_PM_QOS_MIN_FREQUENCY, 0); + if (err < 0) + goto err_devfreq; + err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req, + DEV_PM_QOS_MAX_FREQUENCY, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); + if (err < 0) + goto err_devfreq; + + devfreq->nb_min.notifier_call = qos_min_notifier_call; + err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min, + DEV_PM_QOS_MIN_FREQUENCY); + if (err) + goto err_devfreq; + + devfreq->nb_max.notifier_call = qos_max_notifier_call; + err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max, + DEV_PM_QOS_MAX_FREQUENCY); + if (err) + goto err_devfreq; + mutex_lock(&devfreq_list_lock); governor = try_then_request_governor(devfreq->governor_name); @@ -1133,6 +1262,14 @@ err_out: } EXPORT_SYMBOL(devfreq_remove_governor); +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct devfreq *devfreq = to_devfreq(dev); + return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent)); +} +static DEVICE_ATTR_RO(name); + static ssize_t governor_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1303,42 +1440,39 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, unsigned long value; int ret; + /* + * Protect against theoretical sysfs writes between + * device_add and dev_pm_qos_add_request + */ + if (!dev_pm_qos_request_active(&df->user_min_freq_req)) + return -EAGAIN; + ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; - mutex_lock(&df->lock); - - if (value) { - if (value > df->max_freq) { - ret = -EINVAL; - goto unlock; - } - } else { - unsigned long *freq_table = df->profile->freq_table; - - /* Get minimum frequency according to sorting order */ - if (freq_table[0] < freq_table[df->profile->max_state - 1]) - value = freq_table[0]; - else - value = freq_table[df->profile->max_state - 1]; - } + /* Round down to kHz for PM QoS */ + ret = dev_pm_qos_update_request(&df->user_min_freq_req, + value / HZ_PER_KHZ); + if (ret < 0) + return ret; - df->min_freq = value; - update_devfreq(df); - ret = count; -unlock: - mutex_unlock(&df->lock); - return ret; + return count; } static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); + unsigned long min_freq, max_freq; + + mutex_lock(&df->lock); + get_freq_range(df, &min_freq, &max_freq); + mutex_unlock(&df->lock); - return sprintf(buf, "%lu\n", max(df->scaling_min_freq, df->min_freq)); + return sprintf(buf, "%lu\n", min_freq); } +static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1347,42 +1481,50 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, unsigned long value; int ret; + /* + * Protect against theoretical sysfs writes between + * device_add and dev_pm_qos_add_request + */ + if (!dev_pm_qos_request_active(&df->user_max_freq_req)) + return -EINVAL; + ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; - mutex_lock(&df->lock); - - if (value) { - if (value < df->min_freq) { - ret = -EINVAL; - goto unlock; - } - } else { - unsigned long *freq_table = df->profile->freq_table; + /* + * PM QoS frequencies are in kHz so we need to convert. Convert by + * rounding upwards so that the acceptable interval never shrinks. + * + * For example if the user writes "666666666" to sysfs this value will + * be converted to 666667 kHz and back to 666667000 Hz before an OPP + * lookup, this ensures that an OPP of 666666666Hz is still accepted. + * + * A value of zero means "no limit". + */ + if (value) + value = DIV_ROUND_UP(value, HZ_PER_KHZ); + else + value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; - /* Get maximum frequency according to sorting order */ - if (freq_table[0] < freq_table[df->profile->max_state - 1]) - value = freq_table[df->profile->max_state - 1]; - else - value = freq_table[0]; - } + ret = dev_pm_qos_update_request(&df->user_max_freq_req, value); + if (ret < 0) + return ret; - df->max_freq = value; - update_devfreq(df); - ret = count; -unlock: - mutex_unlock(&df->lock); - return ret; + return count; } -static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); + unsigned long min_freq, max_freq; + + mutex_lock(&df->lock); + get_freq_range(df, &min_freq, &max_freq); + mutex_unlock(&df->lock); - return sprintf(buf, "%lu\n", min(df->scaling_max_freq, df->max_freq)); + return sprintf(buf, "%lu\n", max_freq); } static DEVICE_ATTR_RW(max_freq); @@ -1449,18 +1591,47 @@ static ssize_t trans_stat_show(struct device *dev, devfreq->profile->freq_table[i]); for (j = 0; j < max_state; j++) len += sprintf(buf + len, "%10u", - devfreq->trans_table[(i * max_state) + j]); - len += sprintf(buf + len, "%10u\n", - jiffies_to_msecs(devfreq->time_in_state[i])); + devfreq->stats.trans_table[(i * max_state) + j]); + + len += sprintf(buf + len, "%10llu\n", (u64) + jiffies64_to_msecs(devfreq->stats.time_in_state[i])); } len += sprintf(buf + len, "Total transition : %u\n", - devfreq->total_trans); + devfreq->stats.total_trans); return len; } -static DEVICE_ATTR_RO(trans_stat); + +static ssize_t trans_stat_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct devfreq *df = to_devfreq(dev); + int err, value; + + if (df->profile->max_state == 0) + return count; + + err = kstrtoint(buf, 10, &value); + if (err || value != 0) + return -EINVAL; + + mutex_lock(&df->lock); + memset(df->stats.time_in_state, 0, (df->profile->max_state * + sizeof(*df->stats.time_in_state))); + memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int), + df->profile->max_state, + df->profile->max_state)); + df->stats.total_trans = 0; + df->stats.last_update = get_jiffies_64(); + mutex_unlock(&df->lock); + + return count; +} +static DEVICE_ATTR_RW(trans_stat); static struct attribute *devfreq_attrs[] = { + &dev_attr_name.attr, &dev_attr_governor.attr, &dev_attr_available_governors.attr, &dev_attr_cur_freq.attr, @@ -1474,6 +1645,81 @@ static struct attribute *devfreq_attrs[] = { }; ATTRIBUTE_GROUPS(devfreq); +/** + * devfreq_summary_show() - Show the summary of the devfreq devices + * @s: seq_file instance to show the summary of devfreq devices + * @data: not used + * + * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file. + * It helps that user can know the detailed information of the devfreq devices. + * + * Return 0 always because it shows the information without any data change. + */ +static int devfreq_summary_show(struct seq_file *s, void *data) +{ + struct devfreq *devfreq; + struct devfreq *p_devfreq = NULL; + unsigned long cur_freq, min_freq, max_freq; + unsigned int polling_ms; + + seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n", + "dev_name", + "dev", + "parent_dev", + "governor", + "polling_ms", + "cur_freq_Hz", + "min_freq_Hz", + "max_freq_Hz"); + seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n", + "------------------------------", + "----------", + "----------", + "---------------", + "----------", + "------------", + "------------", + "------------"); + + mutex_lock(&devfreq_list_lock); + + list_for_each_entry_reverse(devfreq, &devfreq_list, node) { +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) + if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE, + DEVFREQ_NAME_LEN)) { + struct devfreq_passive_data *data = devfreq->data; + + if (data) + p_devfreq = data->parent; + } else { + p_devfreq = NULL; + } +#endif + + mutex_lock(&devfreq->lock); + cur_freq = devfreq->previous_freq, + get_freq_range(devfreq, &min_freq, &max_freq); + polling_ms = devfreq->profile->polling_ms, + mutex_unlock(&devfreq->lock); + + seq_printf(s, + "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n", + dev_name(devfreq->dev.parent), + dev_name(&devfreq->dev), + p_devfreq ? dev_name(&p_devfreq->dev) : "null", + devfreq->governor_name, + polling_ms, + cur_freq, + min_freq, + max_freq); + } + + mutex_unlock(&devfreq_list_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(devfreq_summary); + static int __init devfreq_init(void) { devfreq_class = class_create(THIS_MODULE, "devfreq"); @@ -1490,6 +1736,11 @@ static int __init devfreq_init(void) } devfreq_class->dev_groups = devfreq_groups; + devfreq_debugfs = debugfs_create_dir("devfreq", NULL); + debugfs_create_file("devfreq_summary", 0444, + devfreq_debugfs, NULL, + &devfreq_summary_fops); + return 0; } subsys_initcall(devfreq_init); @@ -1683,7 +1934,7 @@ static void devm_devfreq_notifier_release(struct device *dev, void *res) /** * devm_devfreq_register_notifier() - - Resource-managed devfreq_register_notifier() + * - Resource-managed devfreq_register_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. @@ -1719,7 +1970,7 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier); /** * devm_devfreq_unregister_notifier() - - Resource-managed devfreq_unregister_notifier() + * - Resource-managed devfreq_unregister_notifier() * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * @nb: The notifier block to be unregistered. diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index cef2cf5347ca..878825372f6f 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -15,7 +15,7 @@ menuconfig PM_DEVFREQ_EVENT if PM_DEVFREQ_EVENT config DEVFREQ_EVENT_EXYNOS_NOCP - tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" + tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP select REGMAP_MMIO @@ -24,7 +24,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP (Network on Chip) Probe counters to measure the bandwidth of AXI bus. config DEVFREQ_EVENT_EXYNOS_PPMU - tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" + tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP help @@ -34,7 +34,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU config DEVFREQ_EVENT_ROCKCHIP_DFI tristate "ROCKCHIP DFI DEVFREQ event Driver" - depends on ARCH_ROCKCHIP + depends on ARCH_ROCKCHIP || COMPILE_TEST help This add the devfreq-event driver for Rockchip SoC. It provides DFI (DDR Monitor Module) driver to count ddr load. diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 1c565926db9f..ccc531ee6938 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support + * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h index 55cc96284a36..2d6f08cfd0c5 100644 --- a/drivers/devfreq/event/exynos-nocp.h +++ b/drivers/devfreq/event/exynos-nocp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file + * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file * * Copyright (c) 2016 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 85c7a77bf3f0..17ed980d9099 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support + * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support * * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> @@ -101,17 +101,22 @@ static struct __exynos_ppmu_events { PPMU_EVENT(dmc1_1), }; -static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +static int __exynos_ppmu_find_ppmu_id(const char *edev_name) { int i; for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) - if (!strcmp(edev->desc->name, ppmu_events[i].name)) + if (!strcmp(edev_name, ppmu_events[i].name)) return ppmu_events[i].id; return -EINVAL; } +static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev) +{ + return __exynos_ppmu_find_ppmu_id(edev->desc->name); +} + /* * The devfreq-event ops structure for PPMU v1.1 */ @@ -556,13 +561,11 @@ static int of_get_devfreq_events(struct device_node *np, * use default if not. */ if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) { - struct devfreq_event_dev edev; int id; /* Not all registers take the same value for * read+write data count. */ - edev.desc = &desc[j]; - id = exynos_ppmu_find_ppmu_id(&edev); + id = __exynos_ppmu_find_ppmu_id(desc[j].name); switch (id) { case PPMU_PMNCNT0: diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h index 284420047455..97f667d0cbdd 100644 --- a/drivers/devfreq/event/exynos-ppmu.h +++ b/drivers/devfreq/event/exynos-ppmu.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * exynos_ppmu.h - EXYNOS PPMU header file + * exynos_ppmu.h - Exynos PPMU header file * * Copyright (c) 2015 Samsung Electronics Co., Ltd. * Author : Chanwoo Choi <cw00.choi@samsung.com> diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 5d1042188727..9a88faaf8b27 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -177,7 +177,6 @@ static int rockchip_dfi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_dfi *data; - struct resource *res; struct devfreq_event_desc *desc; struct device_node *np = pdev->dev.of_node, *node; @@ -185,8 +184,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev) if (!data) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); + data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) return PTR_ERR(data->regs); @@ -200,6 +198,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { data->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(data->regmap_pmu)) return PTR_ERR(data->regmap_pmu); } diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index c832673273a2..8fa8eb541373 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -15,11 +15,10 @@ #include <linux/device.h> #include <linux/export.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/pm_opp.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <linux/slab.h> #define DEFAULT_SATURATION_RATIO 40 @@ -127,6 +126,7 @@ static int exynos_bus_get_dev_status(struct device *dev, ret = exynos_bus_get_event(bus, &edata); if (ret < 0) { + dev_err(dev, "failed to get event from devfreq-event devices\n"); stat->total_time = stat->busy_time = 0; goto err; } @@ -287,52 +287,12 @@ err_clk: return ret; } -static int exynos_bus_probe(struct platform_device *pdev) +static int exynos_bus_profile_init(struct exynos_bus *bus, + struct devfreq_dev_profile *profile) { - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node, *node; - struct devfreq_dev_profile *profile; + struct device *dev = bus->dev; struct devfreq_simple_ondemand_data *ondemand_data; - struct devfreq_passive_data *passive_data; - struct devfreq *parent_devfreq; - struct exynos_bus *bus; - int ret, max_state; - unsigned long min_freq, max_freq; - bool passive = false; - - if (!np) { - dev_err(dev, "failed to find devicetree node\n"); - return -EINVAL; - } - - bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); - if (!bus) - return -ENOMEM; - mutex_init(&bus->lock); - bus->dev = &pdev->dev; - platform_set_drvdata(pdev, bus); - - profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); - if (!profile) - return -ENOMEM; - - node = of_parse_phandle(dev->of_node, "devfreq", 0); - if (node) { - of_node_put(node); - passive = true; - } else { - ret = exynos_bus_parent_parse_of(np, bus); - if (ret < 0) - return ret; - } - - /* Parse the device-tree to get the resource information */ - ret = exynos_bus_parse_of(np, bus); - if (ret < 0) - goto err_reg; - - if (passive) - goto passive; + int ret; /* Initialize the struct profile and governor data for parent device */ profile->polling_ms = 50; @@ -341,10 +301,9 @@ static int exynos_bus_probe(struct platform_device *pdev) profile->exit = exynos_bus_exit; ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL); - if (!ondemand_data) { - ret = -ENOMEM; - goto err; - } + if (!ondemand_data) + return -ENOMEM; + ondemand_data->upthreshold = 40; ondemand_data->downdifferential = 5; @@ -354,15 +313,14 @@ static int exynos_bus_probe(struct platform_device *pdev) ondemand_data); if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq device\n"); - ret = PTR_ERR(bus->devfreq); - goto err; + return PTR_ERR(bus->devfreq); } /* Register opp_notifier to catch the change of OPP */ ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq); if (ret < 0) { dev_err(dev, "failed to register opp notifier\n"); - goto err; + return ret; } /* @@ -372,33 +330,44 @@ static int exynos_bus_probe(struct platform_device *pdev) ret = exynos_bus_enable_edev(bus); if (ret < 0) { dev_err(dev, "failed to enable devfreq-event devices\n"); - goto err; + return ret; } ret = exynos_bus_set_event(bus); if (ret < 0) { dev_err(dev, "failed to set event to devfreq-event devices\n"); - goto err; + goto err_edev; } - goto out; -passive: + return 0; + +err_edev: + if (exynos_bus_disable_edev(bus)) + dev_warn(dev, "failed to disable the devfreq-event devices\n"); + + return ret; +} + +static int exynos_bus_profile_init_passive(struct exynos_bus *bus, + struct devfreq_dev_profile *profile) +{ + struct device *dev = bus->dev; + struct devfreq_passive_data *passive_data; + struct devfreq *parent_devfreq; + /* Initialize the struct profile and governor data for passive device */ profile->target = exynos_bus_target; profile->exit = exynos_bus_passive_exit; /* Get the instance of parent devfreq device */ parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0); - if (IS_ERR(parent_devfreq)) { - ret = -EPROBE_DEFER; - goto err; - } + if (IS_ERR(parent_devfreq)) + return -EPROBE_DEFER; passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL); - if (!passive_data) { - ret = -ENOMEM; - goto err; - } + if (!passive_data) + return -ENOMEM; + passive_data->parent = parent_devfreq; /* Add devfreq device for exynos bus with passive governor */ @@ -407,11 +376,61 @@ passive: if (IS_ERR(bus->devfreq)) { dev_err(dev, "failed to add devfreq dev with passive governor\n"); - ret = PTR_ERR(bus->devfreq); - goto err; + return PTR_ERR(bus->devfreq); + } + + return 0; +} + +static int exynos_bus_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node, *node; + struct devfreq_dev_profile *profile; + struct exynos_bus *bus; + int ret, max_state; + unsigned long min_freq, max_freq; + bool passive = false; + + if (!np) { + dev_err(dev, "failed to find devicetree node\n"); + return -EINVAL; } -out: + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + mutex_init(&bus->lock); + bus->dev = &pdev->dev; + platform_set_drvdata(pdev, bus); + + profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); + if (!profile) + return -ENOMEM; + + node = of_parse_phandle(dev->of_node, "devfreq", 0); + if (node) { + of_node_put(node); + passive = true; + } else { + ret = exynos_bus_parent_parse_of(np, bus); + if (ret < 0) + return ret; + } + + /* Parse the device-tree to get the resource information */ + ret = exynos_bus_parse_of(np, bus); + if (ret < 0) + goto err_reg; + + if (passive) + ret = exynos_bus_profile_init_passive(bus, profile); + else + ret = exynos_bus_profile_init(bus, profile); + + if (ret < 0) + goto err; + max_state = bus->devfreq->profile->max_state; min_freq = (bus->devfreq->profile->freq_table[0] / 1000); max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000); diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c new file mode 100644 index 000000000000..bc82d3653bff --- /dev/null +++ b/drivers/devfreq/imx8m-ddrc.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/devfreq.h> +#include <linux/pm_opp.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/arm-smccc.h> + +#define IMX_SIP_DDR_DVFS 0xc2000004 + +/* Query available frequencies. */ +#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10 +#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11 + +/* + * This should be in a 1:1 mapping with devicetree OPPs but + * firmware provides additional info. + */ +struct imx8m_ddrc_freq { + unsigned long rate; + unsigned long smcarg; + int dram_core_parent_index; + int dram_alt_parent_index; + int dram_apb_parent_index; +}; + +/* Hardware limitation */ +#define IMX8M_DDRC_MAX_FREQ_COUNT 4 + +/* + * i.MX8M DRAM Controller clocks have the following structure (abridged): + * + * +----------+ |\ +------+ + * | dram_pll |-------|M| dram_core | | + * +----------+ |U|---------->| D | + * /--|X| | D | + * dram_alt_root | |/ | R | + * | | C | + * +---------+ | | + * |FIX DIV/4| | | + * +---------+ | | + * composite: | | | + * +----------+ | | | + * | dram_alt |----/ | | + * +----------+ | | + * | dram_apb |-------------------->| | + * +----------+ +------+ + * + * The dram_pll is used for higher rates and dram_alt is used for lower rates. + * + * Frequency switching is implemented in TF-A (via SMC call) and can change the + * configuration of the clocks, including mux parents. The dram_alt and + * dram_apb clocks are "imx composite" and their parent can change too. + * + * We need to prepare/enable the new mux parents head of switching and update + * their information afterwards. + */ +struct imx8m_ddrc { + struct devfreq_dev_profile profile; + struct devfreq *devfreq; + + /* For frequency switching: */ + struct clk *dram_core; + struct clk *dram_pll; + struct clk *dram_alt; + struct clk *dram_apb; + + int freq_count; + struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT]; +}; + +static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv, + unsigned long rate) +{ + struct imx8m_ddrc_freq *freq; + int i; + + /* + * Firmware reports values in MT/s, so we round-down from Hz + * Rounding is extra generous to ensure a match. + */ + rate = DIV_ROUND_CLOSEST(rate, 250000); + for (i = 0; i < priv->freq_count; ++i) { + freq = &priv->freq_table[i]; + if (freq->rate == rate || + freq->rate + 1 == rate || + freq->rate - 1 == rate) + return freq; + } + + return NULL; +} + +static void imx8m_ddrc_smc_set_freq(int target_freq) +{ + struct arm_smccc_res res; + u32 online_cpus = 0; + int cpu; + + local_irq_disable(); + + for_each_online_cpu(cpu) + online_cpus |= (1 << (cpu * 8)); + + /* change the ddr freqency */ + arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus, + 0, 0, 0, 0, 0, &res); + + local_irq_enable(); +} + +static struct clk *clk_get_parent_by_index(struct clk *clk, int index) +{ + struct clk_hw *hw; + + hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index); + + return hw ? hw->clk : NULL; +} + +static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct clk *new_dram_core_parent; + struct clk *new_dram_alt_parent; + struct clk *new_dram_apb_parent; + int ret; + + /* + * Fetch new parents + * + * new_dram_alt_parent and new_dram_apb_parent are optional but + * new_dram_core_parent is not. + */ + new_dram_core_parent = clk_get_parent_by_index( + priv->dram_core, freq->dram_core_parent_index - 1); + if (!new_dram_core_parent) { + dev_err(dev, "failed to fetch new dram_core parent\n"); + return -EINVAL; + } + if (freq->dram_alt_parent_index) { + new_dram_alt_parent = clk_get_parent_by_index( + priv->dram_alt, + freq->dram_alt_parent_index - 1); + if (!new_dram_alt_parent) { + dev_err(dev, "failed to fetch new dram_alt parent\n"); + return -EINVAL; + } + } else + new_dram_alt_parent = NULL; + + if (freq->dram_apb_parent_index) { + new_dram_apb_parent = clk_get_parent_by_index( + priv->dram_apb, + freq->dram_apb_parent_index - 1); + if (!new_dram_apb_parent) { + dev_err(dev, "failed to fetch new dram_apb parent\n"); + return -EINVAL; + } + } else + new_dram_apb_parent = NULL; + + /* increase reference counts and ensure clks are ON before switch */ + ret = clk_prepare_enable(new_dram_core_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_core parent: %d\n", + ret); + goto out; + } + ret = clk_prepare_enable(new_dram_alt_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_alt parent: %d\n", + ret); + goto out_disable_core_parent; + } + ret = clk_prepare_enable(new_dram_apb_parent); + if (ret) { + dev_err(dev, "failed to enable new dram_apb parent: %d\n", + ret); + goto out_disable_alt_parent; + } + + imx8m_ddrc_smc_set_freq(freq->smcarg); + + /* update parents in clk tree after switch. */ + ret = clk_set_parent(priv->dram_core, new_dram_core_parent); + if (ret) + dev_warn(dev, "failed to set dram_core parent: %d\n", ret); + if (new_dram_alt_parent) { + ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent); + if (ret) + dev_warn(dev, "failed to set dram_alt parent: %d\n", + ret); + } + if (new_dram_apb_parent) { + ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent); + if (ret) + dev_warn(dev, "failed to set dram_apb parent: %d\n", + ret); + } + + /* + * Explicitly refresh dram PLL rate. + * + * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be + * automatically refreshed when clk_get_rate is called on children. + */ + clk_get_rate(priv->dram_pll); + + /* + * clk_set_parent transfer the reference count from old parent. + * now we drop extra reference counts used during the switch + */ + clk_disable_unprepare(new_dram_apb_parent); +out_disable_alt_parent: + clk_disable_unprepare(new_dram_alt_parent); +out_disable_core_parent: + clk_disable_unprepare(new_dram_core_parent); +out: + return ret; +} + +static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct imx8m_ddrc_freq *freq_info; + struct dev_pm_opp *new_opp; + unsigned long old_freq, new_freq; + int ret; + + new_opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(new_opp)) { + ret = PTR_ERR(new_opp); + dev_err(dev, "failed to get recommended opp: %d\n", ret); + return ret; + } + dev_pm_opp_put(new_opp); + + old_freq = clk_get_rate(priv->dram_core); + if (*freq == old_freq) + return 0; + + freq_info = imx8m_ddrc_find_freq(priv, *freq); + if (!freq_info) + return -EINVAL; + + /* + * Read back the clk rate to verify switch was correct and so that + * we can report it on all error paths. + */ + ret = imx8m_ddrc_set_freq(dev, freq_info); + + new_freq = clk_get_rate(priv->dram_core); + if (ret) + dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n", + *freq, old_freq, ret, new_freq); + else if (*freq != new_freq) + dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n", + *freq, old_freq, new_freq); + else + dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n", + *freq, old_freq); + + return ret; +} + +static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + + *freq = clk_get_rate(priv->dram_core); + + return 0; +} + +static int imx8m_ddrc_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + + stat->busy_time = 0; + stat->total_time = 0; + stat->current_frequency = clk_get_rate(priv->dram_core); + + return 0; +} + +static int imx8m_ddrc_init_freq_info(struct device *dev) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct arm_smccc_res res; + int index; + + /* An error here means DDR DVFS API not supported by firmware */ + arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT, + 0, 0, 0, 0, 0, 0, &res); + priv->freq_count = res.a0; + if (priv->freq_count <= 0 || + priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT) + return -ENODEV; + + for (index = 0; index < priv->freq_count; ++index) { + struct imx8m_ddrc_freq *freq = &priv->freq_table[index]; + + arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO, + index, 0, 0, 0, 0, 0, &res); + /* Result should be strictly positive */ + if ((long)res.a0 <= 0) + return -ENODEV; + + freq->rate = res.a0; + freq->smcarg = index; + freq->dram_core_parent_index = res.a1; + freq->dram_alt_parent_index = res.a2; + freq->dram_apb_parent_index = res.a3; + + /* dram_core has 2 options: dram_pll or dram_alt_root */ + if (freq->dram_core_parent_index != 1 && + freq->dram_core_parent_index != 2) + return -ENODEV; + /* dram_apb and dram_alt have exactly 8 possible parents */ + if (freq->dram_alt_parent_index > 8 || + freq->dram_apb_parent_index > 8) + return -ENODEV; + /* dram_core from alt requires explicit dram_alt parent */ + if (freq->dram_core_parent_index == 2 && + freq->dram_alt_parent_index == 0) + return -ENODEV; + } + + return 0; +} + +static int imx8m_ddrc_check_opps(struct device *dev) +{ + struct imx8m_ddrc *priv = dev_get_drvdata(dev); + struct imx8m_ddrc_freq *freq_info; + struct dev_pm_opp *opp; + unsigned long freq; + int i, opp_count; + + /* Enumerate DT OPPs and disable those not supported by firmware */ + opp_count = dev_pm_opp_get_opp_count(dev); + if (opp_count < 0) + return opp_count; + for (i = 0, freq = 0; i < opp_count; ++i, ++freq) { + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) { + dev_err(dev, "Failed enumerating OPPs: %ld\n", + PTR_ERR(opp)); + return PTR_ERR(opp); + } + dev_pm_opp_put(opp); + + freq_info = imx8m_ddrc_find_freq(priv, freq); + if (!freq_info) { + dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n", + freq, DIV_ROUND_CLOSEST(freq, 250000)); + dev_pm_opp_disable(dev, freq); + } + } + + return 0; +} + +static void imx8m_ddrc_exit(struct device *dev) +{ + dev_pm_opp_of_remove_table(dev); +} + +static int imx8m_ddrc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct imx8m_ddrc *priv; + const char *gov = DEVFREQ_GOV_USERSPACE; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + ret = imx8m_ddrc_init_freq_info(dev); + if (ret) { + dev_err(dev, "failed to init firmware freq info: %d\n", ret); + return ret; + } + + priv->dram_core = devm_clk_get(dev, "core"); + if (IS_ERR(priv->dram_core)) { + ret = PTR_ERR(priv->dram_core); + dev_err(dev, "failed to fetch core clock: %d\n", ret); + return ret; + } + priv->dram_pll = devm_clk_get(dev, "pll"); + if (IS_ERR(priv->dram_pll)) { + ret = PTR_ERR(priv->dram_pll); + dev_err(dev, "failed to fetch pll clock: %d\n", ret); + return ret; + } + priv->dram_alt = devm_clk_get(dev, "alt"); + if (IS_ERR(priv->dram_alt)) { + ret = PTR_ERR(priv->dram_alt); + dev_err(dev, "failed to fetch alt clock: %d\n", ret); + return ret; + } + priv->dram_apb = devm_clk_get(dev, "apb"); + if (IS_ERR(priv->dram_apb)) { + ret = PTR_ERR(priv->dram_apb); + dev_err(dev, "failed to fetch apb clock: %d\n", ret); + return ret; + } + + ret = dev_pm_opp_of_add_table(dev); + if (ret < 0) { + dev_err(dev, "failed to get OPP table\n"); + return ret; + } + + ret = imx8m_ddrc_check_opps(dev); + if (ret < 0) + goto err; + + priv->profile.polling_ms = 1000; + priv->profile.target = imx8m_ddrc_target; + priv->profile.get_dev_status = imx8m_ddrc_get_dev_status; + priv->profile.exit = imx8m_ddrc_exit; + priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq; + priv->profile.initial_freq = clk_get_rate(priv->dram_core); + + priv->devfreq = devm_devfreq_add_device(dev, &priv->profile, + gov, NULL); + if (IS_ERR(priv->devfreq)) { + ret = PTR_ERR(priv->devfreq); + dev_err(dev, "failed to add devfreq device: %d\n", ret); + goto err; + } + + return 0; + +err: + dev_pm_opp_of_remove_table(dev); + return ret; +} + +static const struct of_device_id imx8m_ddrc_of_match[] = { + { .compatible = "fsl,imx8m-ddrc", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match); + +static struct platform_driver imx8m_ddrc_platdrv = { + .probe = imx8m_ddrc_probe, + .driver = { + .name = "imx8m-ddrc-devfreq", + .of_match_table = of_match_ptr(imx8m_ddrc_of_match), + }, +}; +module_platform_driver(imx8m_ddrc_platdrv); + +MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver"); +MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index 2e65d7279d79..24f04f78285b 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) if (res.a0) { dev_err(dev, "Failed to set dram param: %ld\n", res.a0); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } } } @@ -372,8 +373,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { data->regmap_pmu = syscon_node_to_regmap(node); - if (IS_ERR(data->regmap_pmu)) - return PTR_ERR(data->regmap_pmu); + of_node_put(node); + if (IS_ERR(data->regmap_pmu)) { + ret = PTR_ERR(data->regmap_pmu); + goto err_edev; + } } regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); @@ -391,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq; break; default: - return -EINVAL; + ret = -EINVAL; + goto err_edev; }; arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, @@ -425,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) */ if (dev_pm_opp_of_add_table(dev)) { dev_err(dev, "Invalid operating-points in device tree.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } of_property_read_u32(np, "upthreshold", @@ -465,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) err_free_opp: dev_pm_opp_of_remove_table(&pdev->dev); +err_edev: + devfreq_event_disable_edev(data->edev); + return ret; } diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 76fb072c22dc..5a5a1da01a00 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -221,7 +221,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, a_fences = get_fences(a, &a_num_fences); b_fences = get_fences(b, &b_num_fences); if (a_num_fences > INT_MAX - b_num_fences) - return NULL; + goto err; num_fences = a_num_fences + b_num_fences; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6fa1eba9d477..5142da401db3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -239,6 +239,14 @@ config FSL_RAID the capability to offload memcpy, xor and pq computation for raid5/6. +config HISI_DMA + tristate "HiSilicon DMA Engine support" + depends on ARM64 || (COMPILE_TEST && PCI_MSI) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support HiSilicon Kunpeng DMA engine. + config IMG_MDC_DMA tristate "IMG MDC support" depends on MIPS || COMPILE_TEST @@ -273,6 +281,19 @@ config INTEL_IDMA64 Enable DMA support for Intel Low Power Subsystem such as found on Intel Skylake PCH. +config INTEL_IDXD + tristate "Intel Data Accelerators support" + depends on PCI && X86_64 + select DMA_ENGINE + select SBITMAP + help + Enable support for the Intel(R) data accelerators present + in Intel Xeon CPU. + + Say Y if you have such a platform. + + If unsure, say N. + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86_64 @@ -497,6 +518,15 @@ config PXA_DMA 16 to 32 channels for peripheral to memory or memory to memory transfers. +config PLX_DMA + tristate "PLX ExpressLane PEX Switch DMA Engine Support" + depends on PCI + select DMA_ENGINE + help + Some PLX ExpressLane PCI Switches support additional DMA engines. + These are exposed via extra functions on the switch's + upstream port. Each function exposes one DMA channel. + config SIRF_DMA tristate "CSR SiRFprimaII/SiRFmarco DMA support" depends on ARCH_SIRF diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 42d7e2fc64fa..1d908394fbea 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o +obj-$(CONFIG_HISI_DMA) += hisi_dma.o obj-$(CONFIG_HSU_DMA) += hsu/ obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ +obj-$(CONFIG_INTEL_IDXD) += idxd/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o obj-$(CONFIG_K3_DMA) += k3dma.o @@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o obj-$(CONFIG_OWL_DMA) += owl-dma.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_PL330_DMA) += pl330.o +obj-$(CONFIG_PLX_DMA) += plx_dma.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 832aefbe7af9..539e785039ca 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c @@ -772,10 +772,10 @@ static int request_and_map(struct platform_device *pdev, const char *name, return -EBUSY; } - *ptr = devm_ioremap_nocache(device, region->start, + *ptr = devm_ioremap(device, region->start, resource_size(region)); if (*ptr == NULL) { - dev_err(device, "ioremap_nocache of %s failed!", name); + dev_err(device, "ioremap of %s failed!", name); return -ENOMEM; } diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e4c593f48575..4768ef26013b 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) /* stop DMA activity */ if (c->desc) { - if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT) - vchan_terminate_vdesc(&c->desc->vd); - else - vchan_vdesc_fini(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; bcm2835_dma_abort(c); } diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index a0ee404b736e..f1d149e32839 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev) struct dma_device *dma_dev; struct axi_dmac *dmac; struct resource *res; + struct regmap *regmap; int ret; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); @@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dmac); - devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); + regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, + &axi_dmac_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto err_free_irq; + } return 0; +err_free_irq: + free_irq(dmac->irq, dmac); err_unregister_of: of_dma_controller_free(pdev->dev.of_node); err_unregister_device: diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index fa626acdc9b9..448f663da89c 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { .nb_channels = 6, .transfer_ord_max = 5, - .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, }; static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { @@ -1020,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = { .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, }; +static const struct jz4780_dma_soc_data x1830_dma_soc_data = { + .nb_channels = 32, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, + { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data }, {}, }; MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 03ac4b96117c..f3ef4edd4de1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -60,6 +60,8 @@ static long dmaengine_ref_count; /* --- sysfs implementation --- */ +#define DMA_SLAVE_NAME "slave" + /** * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev - device node @@ -164,11 +166,152 @@ static struct class dma_devclass = { /* --- client and device registration --- */ -#define dma_device_satisfies_mask(device, mask) \ - __dma_device_satisfies_mask((device), &(mask)) -static int -__dma_device_satisfies_mask(struct dma_device *device, - const dma_cap_mask_t *want) +/** + * dma_cap_mask_all - enable iteration over all operation types + */ +static dma_cap_mask_t dma_cap_mask_all; + +/** + * dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan - associated channel for this entry + */ +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +/** + * channel_table - percpu lookup table for memory-to-memory offload providers + */ +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; + +static int __init dma_channel_table_init(void) +{ + enum dma_transaction_type cap; + int err = 0; + + bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); + + /* 'interrupt', 'private', and 'slave' are channel capabilities, + * but are not associated with an operation so they do not need + * an entry in the channel_table + */ + clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); + clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); + clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); + + for_each_dma_cap_mask(cap, dma_cap_mask_all) { + channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); + if (!channel_table[cap]) { + err = -ENOMEM; + break; + } + } + + if (err) { + pr_err("dmaengine dma_channel_table_init failure: %d\n", err); + for_each_dma_cap_mask(cap, dma_cap_mask_all) + free_percpu(channel_table[cap]); + } + + return err; +} +arch_initcall(dma_channel_table_init); + +/** + * dma_chan_is_local - returns true if the channel is in the same numa-node as + * the cpu + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - returns the channel with min count and in the same numa-node as + * the cpu + * @cap: capability to match + * @cpu: cpu index which the channel should be close to + * + * If some channels are close to the given cpu, the one with the lowest + * reference count is returned. Otherwise, cpu is ignored and only the + * reference count is taken into account. + * Must be called under dma_list_mutex. + */ +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) +{ + struct dma_device *device; + struct dma_chan *chan; + struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (!dma_has_cap(cap, device->cap_mask) || + dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + if (!chan->client_count) + continue; + if (!min || chan->table_count < min->table_count) + min = chan; + + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; + } + } + + chan = localmin ? localmin : min; + + if (chan) + chan->table_count++; + + return chan; +} + +/** + * dma_channel_rebalance - redistribute the available channels + * + * Optimize for cpu isolation (each cpu gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. Must be called under + * dma_list_mutex. + */ +static void dma_channel_rebalance(void) +{ + struct dma_chan *chan; + struct dma_device *device; + int cpu; + int cap; + + /* undo the last distribution */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_possible_cpu(cpu) + per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + chan->table_count = 0; + } + + /* don't populate the channel_table if no clients are available */ + if (!dmaengine_ref_count) + return; + + /* redistribute available channels */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_online_cpu(cpu) { + chan = min_chan(cap, cpu); + per_cpu_ptr(channel_table[cap], cpu)->chan = chan; + } +} + +static int dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) { dma_cap_mask_t has; @@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device, static struct module *dma_chan_to_owner(struct dma_chan *chan) { - return chan->device->dev->driver->owner; + return chan->device->owner; } /** @@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan) } } +static void dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, dma_device_release); +} + /** * dma_chan_get - try to grab a dma channel's parent driver module * @chan - channel to grab @@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan) if (!try_module_get(owner)) return -ENODEV; + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + /* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) { ret = chan->device->device_alloc_chan_resources(chan); @@ -233,6 +399,8 @@ out: return 0; err_out: + dma_device_put(chan->device); +module_put_out: module_put(owner); return ret; } @@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan) return; chan->client_count--; - module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ if (!chan->client_count && chan->device->device_free_chan_resources) { @@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan) chan->router = NULL; chan->route_data = NULL; } + + dma_device_put(chan->device); + module_put(dma_chan_to_owner(chan)); } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) @@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) EXPORT_SYMBOL(dma_sync_wait); /** - * dma_cap_mask_all - enable iteration over all operation types - */ -static dma_cap_mask_t dma_cap_mask_all; - -/** - * dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan - associated channel for this entry - */ -struct dma_chan_tbl_ent { - struct dma_chan *chan; -}; - -/** - * channel_table - percpu lookup table for memory-to-memory offload providers - */ -static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; - -static int __init dma_channel_table_init(void) -{ - enum dma_transaction_type cap; - int err = 0; - - bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); - - /* 'interrupt', 'private', and 'slave' are channel capabilities, - * but are not associated with an operation so they do not need - * an entry in the channel_table - */ - clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); - clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); - clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); - - for_each_dma_cap_mask(cap, dma_cap_mask_all) { - channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); - if (!channel_table[cap]) { - err = -ENOMEM; - break; - } - } - - if (err) { - pr_err("initialization failure\n"); - for_each_dma_cap_mask(cap, dma_cap_mask_all) - free_percpu(channel_table[cap]); - } - - return err; -} -arch_initcall(dma_channel_table_init); - -/** * dma_find_channel - find a channel to carry out the operation * @tx_type: transaction type */ @@ -369,97 +488,6 @@ void dma_issue_pending_all(void) } EXPORT_SYMBOL(dma_issue_pending_all); -/** - * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu - */ -static bool dma_chan_is_local(struct dma_chan *chan, int cpu) -{ - int node = dev_to_node(chan->device->dev); - return node == NUMA_NO_NODE || - cpumask_test_cpu(cpu, cpumask_of_node(node)); -} - -/** - * min_chan - returns the channel with min count and in the same numa-node as the cpu - * @cap: capability to match - * @cpu: cpu index which the channel should be close to - * - * If some channels are close to the given cpu, the one with the lowest - * reference count is returned. Otherwise, cpu is ignored and only the - * reference count is taken into account. - * Must be called under dma_list_mutex. - */ -static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) -{ - struct dma_device *device; - struct dma_chan *chan; - struct dma_chan *min = NULL; - struct dma_chan *localmin = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (!dma_has_cap(cap, device->cap_mask) || - dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) { - if (!chan->client_count) - continue; - if (!min || chan->table_count < min->table_count) - min = chan; - - if (dma_chan_is_local(chan, cpu)) - if (!localmin || - chan->table_count < localmin->table_count) - localmin = chan; - } - } - - chan = localmin ? localmin : min; - - if (chan) - chan->table_count++; - - return chan; -} - -/** - * dma_channel_rebalance - redistribute the available channels - * - * Optimize for cpu isolation (each cpu gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. Must be called under - * dma_list_mutex. - */ -static void dma_channel_rebalance(void) -{ - struct dma_chan *chan; - struct dma_device *device; - int cpu; - int cap; - - /* undo the last distribution */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_possible_cpu(cpu) - per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - chan->table_count = 0; - } - - /* don't populate the channel_table if no clients are available */ - if (!dmaengine_ref_count) - return; - - /* redistribute available channels */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_online_cpu(cpu) { - chan = min_chan(cap, cpu); - per_cpu_ptr(channel_table[cap], cpu)->chan = chan; - } -} - int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { struct dma_device *device; @@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, { struct dma_chan *chan; - if (mask && !__dma_device_satisfies_mask(dev, mask)) { + if (mask && !dma_device_satisfies_mask(dev, mask)) { dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); return NULL; } @@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) if (has_acpi_companion(dev) && !chan) chan = acpi_dma_request_slave_chan_by_name(dev, name); - if (chan) { - /* Valid channel found or requester needs to be deferred */ - if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) - return chan; - } + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; /* Try to find the channel via the DMA filter map(s) */ mutex_lock(&dma_list_mutex); @@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) } mutex_unlock(&dma_list_mutex); - return chan ? chan : ERR_PTR(-EPROBE_DEFER); + if (!IS_ERR_OR_NULL(chan)) + goto found; + + return ERR_PTR(-EPROBE_DEFER); + +found: + chan->slave = dev; + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return ERR_PTR(-ENOMEM); + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_err(dev, "Cannot create DMA %s symlink\n", chan->name); + return chan; } EXPORT_SYMBOL_GPL(dma_request_chan); @@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan) /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + if (chan->slave) { + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); @@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get); */ void dmaengine_put(void) { - struct dma_device *device; + struct dma_device *device, *_d; struct dma_chan *chan; mutex_lock(&dma_list_mutex); dmaengine_ref_count--; BUG_ON(dmaengine_ref_count < 0); /* drop channel references */ - list_for_each_entry(device, &dma_device_list, global_node) { + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue; list_for_each_entry(chan, &device->channels, device_node) @@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device) return 0; } +static int __dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan, + int chan_id) +{ + int rc = 0; + int chancnt = device->chancnt; + atomic_t *idr_ref; + struct dma_chan *tchan; + + tchan = list_first_entry_or_null(&device->channels, + struct dma_chan, device_node); + if (tchan->dev) { + idr_ref = tchan->dev->idr_ref; + } else { + idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); + if (!idr_ref) + return -ENOMEM; + atomic_set(idr_ref, 0); + } + + chan->local = alloc_percpu(typeof(*chan->local)); + if (!chan->local) + goto err_out; + chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + if (!chan->dev) { + free_percpu(chan->local); + chan->local = NULL; + goto err_out; + } + + /* + * When the chan_id is a negative value, we are dynamically adding + * the channel. Otherwise we are static enumerating. + */ + chan->chan_id = chan_id < 0 ? chancnt : chan_id; + chan->dev->device.class = &dma_devclass; + chan->dev->device.parent = device->dev; + chan->dev->chan = chan; + chan->dev->idr_ref = idr_ref; + chan->dev->dev_id = device->dev_id; + atomic_inc(idr_ref); + dev_set_name(&chan->dev->device, "dma%dchan%d", + device->dev_id, chan->chan_id); + + rc = device_register(&chan->dev->device); + if (rc) + goto err_out; + chan->client_count = 0; + device->chancnt = chan->chan_id + 1; + + return 0; + + err_out: + free_percpu(chan->local); + kfree(chan->dev); + if (atomic_dec_return(idr_ref) == 0) + kfree(idr_ref); + return rc; +} + +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + rc = __dma_async_device_channel_register(device, chan, -1); + if (rc < 0) + return rc; + + dma_channel_rebalance(); + return 0; +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_register); + +static void __dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + WARN_ONCE(!device->device_release && chan->client_count, + "%s called while %d clients hold a reference\n", + __func__, chan->client_count); + mutex_lock(&dma_list_mutex); + list_del(&chan->device_node); + device->chancnt--; + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + device_unregister(&chan->dev->device); + free_percpu(chan->local); +} + +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __dma_async_device_channel_unregister(device, chan); + dma_channel_rebalance(); +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); + /** * dma_async_device_register - registers DMA devices found * @device: &dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * dma_async_device_unregister() is called and no further references are taken. */ int dma_async_device_register(struct dma_device *device) { - int chancnt = 0, rc; + int rc, i = 0; struct dma_chan* chan; - atomic_t *idr_ref; if (!device) return -ENODEV; @@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + device->owner = device->dev->driver->owner; + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", @@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + if (!device->device_release) + dev_warn(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case */ if (device_has_all_tx_types(device)) dma_cap_set(DMA_ASYNC_TX, device->cap_mask); - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); - if (!idr_ref) - return -ENOMEM; rc = get_dma_id(device); - if (rc != 0) { - kfree(idr_ref); + if (rc != 0) return rc; - } - - atomic_set(idr_ref, 0); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { - rc = -ENOMEM; - chan->local = alloc_percpu(typeof(*chan->local)); - if (chan->local == NULL) - goto err_out; - chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); - if (chan->dev == NULL) { - free_percpu(chan->local); - chan->local = NULL; + rc = __dma_async_device_channel_register(device, chan, i++); + if (rc < 0) goto err_out; - } - - chan->chan_id = chancnt++; - chan->dev->device.class = &dma_devclass; - chan->dev->device.parent = device->dev; - chan->dev->chan = chan; - chan->dev->idr_ref = idr_ref; - chan->dev->dev_id = device->dev_id; - atomic_inc(idr_ref); - dev_set_name(&chan->dev->device, "dma%dchan%d", - device->dev_id, chan->chan_id); - - rc = device_register(&chan->dev->device); - if (rc) { - free_percpu(chan->local); - chan->local = NULL; - kfree(chan->dev); - atomic_dec(idr_ref); - goto err_out; - } - chan->client_count = 0; - } - - if (!chancnt) { - dev_err(device->dev, "%s: device has no channels!\n", __func__); - rc = -ENODEV; - goto err_out; } - device->chancnt = chancnt; - mutex_lock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) @@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device) err_out: /* if we never registered a channel just release the idr */ - if (atomic_read(idr_ref) == 0) { + if (!device->chancnt) { ida_free(&dma_ida, device->dev_id); - kfree(idr_ref); return rc; } @@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register); */ void dma_async_device_unregister(struct dma_device *device) { - struct dma_chan *chan; + struct dma_chan *chan, *n; + + list_for_each_entry_safe(chan, n, &device->channels, device_node) + __dma_async_device_channel_unregister(device, chan); mutex_lock(&dma_list_mutex); - list_del_rcu(&device->global_node); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_channel_rebalance(); + dma_device_put(device); mutex_unlock(&dma_list_mutex); - - list_for_each_entry(chan, &device->channels, device_node) { - WARN_ONCE(chan->client_count, - "%s called while %d clients hold a reference\n", - __func__, chan->client_count); - mutex_lock(&dma_list_mutex); - chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); - device_unregister(&chan->dev->device); - free_percpu(chan->local); - } } EXPORT_SYMBOL(dma_async_device_unregister); @@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, } EXPORT_SYMBOL(dma_async_tx_descriptor_init); +static inline int desc_check_and_set_metadata_mode( + struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); + +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); + +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); + /* dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on */ @@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void) return class_register(&dma_devclass); } arch_initcall(dma_bus_init); - - diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 501c0b063f85..e8a320c9e57c 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan, state->last = complete; state->used = used; state->residue = 0; + state->in_flight_bytes = 0; } return dma_async_is_complete(cookie, complete, used); } @@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) state->residue = residue; } +static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + struct dmaengine_desc_callback { dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; @@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) return (cb->callback) ? true : false; } +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); + #endif diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index a1ce307c502f..14c1ac26f866 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) vchan_get_all_descriptors(&chan->vc, &head); - /* - * As vchan_dma_desc_free_list can access to desc_allocated list - * we need to call it in vc.lock context. - */ - vchan_dma_desc_free_list(&chan->vc, &head); - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_dma_desc_free_list(&chan->vc, &head); + dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); return 0; diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b1a7ca91701a..5697c3622699 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; + int endian_diff[4] = {3, 1, -1, -3}; u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; + + if (fsl_chan->edma->drvdata->mux_swap) + ch_off += endian_diff[ch_off % 4]; + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 5eaa2902ed39..67e422590c9a 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -147,6 +147,7 @@ struct fsl_edma_drvdata { enum edma_version version; u32 dmamuxs; bool has_dmaclk; + bool mux_swap; int (*setup_irq)(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma); }; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b626c06ac2e0..eff7ebd8cf35 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = { .setup_irq = fsl_edma_irq_init, }; +static struct fsl_edma_drvdata ls1028a_data = { + .version = v1, + .dmamuxs = DMAMUX_NR, + .mux_swap = true, + .setup_irq = fsl_edma_irq_init, +}; + static struct fsl_edma_drvdata imx7ulp_data = { .version = v3, .dmamuxs = 1, @@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = { static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,vf610-edma", .data = &vf610_data}, + { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { /* sentinel */ } }; diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 89792083d62c..95cc0256b387 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) vchan_dma_desc_free_list(&fsl_chan->vchan, &head); - if (!fsl_queue->comp_pool && !fsl_queue->comp_pool) + if (!fsl_queue->comp_pool && !fsl_queue->desc_pool) return; list_for_each_entry_safe(comp_temp, _comp_temp, diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c new file mode 100644 index 000000000000..ed3619266a48 --- /dev/null +++ b/drivers/dma/hisi_dma.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2019 HiSilicon Limited. */ +#include <linux/bitfield.h> +#include <linux/dmaengine.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include "virt-dma.h" + +#define HISI_DMA_SQ_BASE_L 0x0 +#define HISI_DMA_SQ_BASE_H 0x4 +#define HISI_DMA_SQ_DEPTH 0x8 +#define HISI_DMA_SQ_TAIL_PTR 0xc +#define HISI_DMA_CQ_BASE_L 0x10 +#define HISI_DMA_CQ_BASE_H 0x14 +#define HISI_DMA_CQ_DEPTH 0x18 +#define HISI_DMA_CQ_HEAD_PTR 0x1c +#define HISI_DMA_CTRL0 0x20 +#define HISI_DMA_CTRL0_QUEUE_EN_S 0 +#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4 +#define HISI_DMA_CTRL1 0x24 +#define HISI_DMA_CTRL1_QUEUE_RESET_S 0 +#define HISI_DMA_Q_FSM_STS 0x30 +#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0) +#define HISI_DMA_INT_STS 0x40 +#define HISI_DMA_INT_STS_MASK GENMASK(12, 0) +#define HISI_DMA_INT_MSK 0x44 +#define HISI_DMA_MODE 0x217c +#define HISI_DMA_OFFSET 0x100 + +#define HISI_DMA_MSI_NUM 30 +#define HISI_DMA_CHAN_NUM 30 +#define HISI_DMA_Q_DEPTH_VAL 1024 + +#define PCI_BAR_2 2 + +enum hisi_dma_mode { + EP = 0, + RC, +}; + +enum hisi_dma_chan_status { + DISABLE = -1, + IDLE = 0, + RUN, + CPL, + PAUSE, + HALT, + ABORT, + WAIT, + BUFFCLR, +}; + +struct hisi_dma_sqe { + __le32 dw0; +#define OPCODE_MASK GENMASK(3, 0) +#define OPCODE_SMALL_PACKAGE 0x1 +#define OPCODE_M2M 0x4 +#define LOCAL_IRQ_EN BIT(8) +#define ATTR_SRC_MASK GENMASK(14, 12) + __le32 dw1; + __le32 dw2; +#define ATTR_DST_MASK GENMASK(26, 24) + __le32 length; + __le64 src_addr; + __le64 dst_addr; +}; + +struct hisi_dma_cqe { + __le32 rsv0; + __le32 rsv1; + __le16 sq_head; + __le16 rsv2; + __le16 rsv3; + __le16 w0; +#define STATUS_MASK GENMASK(15, 1) +#define STATUS_SUCC 0x0 +#define VALID_BIT BIT(0) +}; + +struct hisi_dma_desc { + struct virt_dma_desc vd; + struct hisi_dma_sqe sqe; +}; + +struct hisi_dma_chan { + struct virt_dma_chan vc; + struct hisi_dma_dev *hdma_dev; + struct hisi_dma_sqe *sq; + struct hisi_dma_cqe *cq; + dma_addr_t sq_dma; + dma_addr_t cq_dma; + u32 sq_tail; + u32 cq_head; + u32 qp_num; + enum hisi_dma_chan_status status; + struct hisi_dma_desc *desc; +}; + +struct hisi_dma_dev { + struct pci_dev *pdev; + void __iomem *base; + struct dma_device dma_dev; + u32 chan_num; + u32 chan_depth; + struct hisi_dma_chan chan[]; +}; + +static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct hisi_dma_chan, vc.chan); +} + +static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct hisi_dma_desc, vd); +} + +static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index, + u32 val) +{ + writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET); +} + +static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val) +{ + u32 tmp; + + tmp = readl_relaxed(addr); + tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos); + writel_relaxed(tmp, addr); +} + +static void hisi_dma_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool pause) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause); +} + +static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool enable) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable); +} + +static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index, + HISI_DMA_INT_STS_MASK); +} + +static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + void __iomem *base = hdma_dev->base; + + hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index, + HISI_DMA_INT_STS_MASK); + hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0); +} + +static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1); +} + +static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) +{ + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + u32 index = chan->qp_num, tmp; + int ret; + + hisi_dma_pause_dma(hdma_dev, index, true); + hisi_dma_enable_dma(hdma_dev, index, false); + hisi_dma_mask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n"); + WARN_ON(1); + } + + hisi_dma_do_reset(hdma_dev, index); + hisi_dma_reset_qp_point(hdma_dev, index); + hisi_dma_pause_dma(hdma_dev, index, false); + hisi_dma_enable_dma(hdma_dev, index, true); + hisi_dma_unmask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n"); + WARN_ON(1); + } +} + +static void hisi_dma_free_chan_resources(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + + hisi_dma_reset_hw_chan(chan); + vchan_free_chan_resources(&chan->vc); + + memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); + memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth); + chan->sq_tail = 0; + chan->cq_head = 0; + chan->status = DISABLE; +} + +static void hisi_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(to_hisi_dma_desc(vd)); +} + +static struct dma_async_tx_descriptor * +hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->sqe.length = cpu_to_le32(len); + desc->sqe.src_addr = cpu_to_le64(src); + desc->sqe.dst_addr = cpu_to_le64(dst); + + return vchan_tx_prep(&chan->vc, &desc->vd, flags); +} + +static enum dma_status +hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(c, cookie, txstate); +} + +static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) +{ + struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); + chan->desc = NULL; + return; + } + list_del(&vd->node); + desc = to_hisi_dma_desc(vd); + chan->desc = desc; + + memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); + + /* update other field in sqe */ + sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); + sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); + + /* make sure data has been updated in sqe */ + wmb(); + + /* update sq tail, point to new sqe position */ + chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth; + + /* update sq_tail to trigger a new task */ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num, + chan->sq_tail); +} + +static void hisi_dma_issue_pending(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + if (vchan_issue_pending(&chan->vc)) + hisi_dma_start_transfer(chan); + + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static int hisi_dma_terminate_all(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vc.lock, flags); + + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vd); + chan->desc = NULL; + } + + vchan_get_all_descriptors(&chan->vc, &head); + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + vchan_dma_desc_free_list(&chan->vc, &head); + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false); + + return 0; +} + +static void hisi_dma_synchronize(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + + vchan_synchronize(&chan->vc); +} + +static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev) +{ + size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth; + size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; + struct device *dev = &hdma_dev->pdev->dev; + struct hisi_dma_chan *chan; + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + chan = &hdma_dev->chan[i]; + chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma, + GFP_KERNEL); + if (!chan->sq) + return -ENOMEM; + + chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, + GFP_KERNEL); + if (!chan->cq) + return -ENOMEM; + } + + return 0; +} + +static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index) +{ + struct hisi_dma_chan *chan = &hdma_dev->chan[index]; + u32 hw_depth = hdma_dev->chan_depth - 1; + void __iomem *base = hdma_dev->base; + + /* set sq, cq base */ + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index, + lower_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index, + upper_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index, + lower_32_bits(chan->cq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index, + upper_32_bits(chan->cq_dma)); + + /* set sq, cq depth */ + hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth); + hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth); + + /* init sq tail and cq head */ + hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_init_hw_qp(hdma_dev, qp_index); + hisi_dma_unmask_irq(hdma_dev, qp_index); + hisi_dma_enable_dma(hdma_dev, qp_index, true); +} + +static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]); +} + +static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hdma_dev->chan[i].qp_num = i; + hdma_dev->chan[i].hdma_dev = hdma_dev; + hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free; + vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev); + hisi_dma_enable_qp(hdma_dev, i); + } +} + +static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hisi_dma_disable_qp(hdma_dev, i); + tasklet_kill(&hdma_dev->chan[i].vc.task); + } +} + +static irqreturn_t hisi_dma_irq(int irq, void *data) +{ + struct hisi_dma_chan *chan = data; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct hisi_dma_cqe *cqe; + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + desc = chan->desc; + cqe = chan->cq + chan->cq_head; + if (desc) { + if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { + chan->cq_head = (chan->cq_head + 1) % + hdma_dev->chan_depth; + hisi_dma_chan_write(hdma_dev->base, + HISI_DMA_CQ_HEAD_PTR, chan->qp_num, + chan->cq_head); + vchan_cookie_complete(&desc->vd); + } else { + dev_err(&hdma_dev->pdev->dev, "task error!\n"); + } + + chan->desc = NULL; + } + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev) +{ + struct pci_dev *pdev = hdma_dev->pdev; + int i, ret; + + for (i = 0; i < hdma_dev->chan_num; i++) { + ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), + hisi_dma_irq, IRQF_SHARED, "hisi_dma", + &hdma_dev->chan[i]); + if (ret) + return ret; + } + + return 0; +} + +/* This function enables all hw channels in a device */ +static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev) +{ + int ret; + + ret = hisi_dma_alloc_qps_mem(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n"); + return ret; + } + + ret = hisi_dma_request_qps_irq(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n"); + return ret; + } + + hisi_dma_enable_qps(hdma_dev); + + return 0; +} + +static void hisi_dma_disable_hw_channels(void *data) +{ + hisi_dma_disable_qps(data); +} + +static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev, + enum hisi_dma_mode mode) +{ + writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE); +} + +static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct hisi_dma_dev *hdma_dev; + struct dma_device *dma_dev; + size_t dev_size; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "failed to enable device mem!\n"); + return ret; + } + + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev)); + if (ret) { + dev_err(dev, "failed to remap I/O region!\n"); + return ret; + } + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM + + sizeof(*hdma_dev); + hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL); + if (!hdma_dev) + return -EINVAL; + + hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2]; + hdma_dev->pdev = pdev; + hdma_dev->chan_num = HISI_DMA_CHAN_NUM; + hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL; + + pci_set_drvdata(pdev, hdma_dev); + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM, + PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to allocate MSI vectors!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev); + if (ret) + return ret; + + dma_dev = &hdma_dev->dma_dev; + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources; + dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy; + dma_dev->device_tx_status = hisi_dma_tx_status; + dma_dev->device_issue_pending = hisi_dma_issue_pending; + dma_dev->device_terminate_all = hisi_dma_terminate_all; + dma_dev->device_synchronize = hisi_dma_synchronize; + dma_dev->directions = BIT(DMA_MEM_TO_MEM); + dma_dev->dev = dev; + INIT_LIST_HEAD(&dma_dev->channels); + + hisi_dma_set_mode(hdma_dev, RC); + + ret = hisi_dma_enable_hw_channels(hdma_dev); + if (ret < 0) { + dev_err(dev, "failed to enable hw channel!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels, + hdma_dev); + if (ret) + return ret; + + ret = dmaenginem_async_device_register(dma_dev); + if (ret < 0) + dev_err(dev, "failed to register device!\n"); + + return ret; +} + +static const struct pci_device_id hisi_dma_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) }, + { 0, } +}; + +static struct pci_driver hisi_dma_pci_driver = { + .name = "hisi_dma", + .id_table = hisi_dma_pci_tbl, + .probe = hisi_dma_probe, +}; + +module_pci_driver(hisi_dma_pci_driver); + +MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); +MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>"); +MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl); diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile new file mode 100644 index 000000000000..8978b898d777 --- /dev/null +++ b/drivers/dma/idxd/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IDXD) += idxd.o +idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c new file mode 100644 index 000000000000..1d7347825b95 --- /dev/null +++ b/drivers/dma/idxd/cdev.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/sched/task.h> +#include <linux/intel-svm.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +struct idxd_cdev_context { + const char *name; + dev_t devt; + struct ida minor_ida; +}; + +/* + * ictx is an array based off of accelerator types. enum idxd_type + * is used as index + */ +static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { + { .name = "dsa" }, +}; + +struct idxd_user_context { + struct idxd_wq *wq; + struct task_struct *task; + unsigned int flags; +}; + +enum idxd_cdev_cleanup { + CDEV_NORMAL = 0, + CDEV_FAILED, +}; + +static void idxd_cdev_dev_release(struct device *dev) +{ + dev_dbg(dev, "releasing cdev device\n"); + kfree(dev); +} + +static struct device_type idxd_cdev_device_type = { + .name = "idxd_cdev", + .release = idxd_cdev_dev_release, +}; + +static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) +{ + struct cdev *cdev = inode->i_cdev; + + return container_of(cdev, struct idxd_cdev, cdev); +} + +static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev) +{ + return container_of(idxd_cdev, struct idxd_wq, idxd_cdev); +} + +static inline struct idxd_wq *inode_wq(struct inode *inode) +{ + return idxd_cdev_wq(inode_idxd_cdev(inode)); +} + +static int idxd_cdev_open(struct inode *inode, struct file *filp) +{ + struct idxd_user_context *ctx; + struct idxd_device *idxd; + struct idxd_wq *wq; + struct device *dev; + struct idxd_cdev *idxd_cdev; + + wq = inode_wq(inode); + idxd = wq->idxd; + dev = &idxd->pdev->dev; + idxd_cdev = &wq->idxd_cdev; + + dev_dbg(dev, "%s called\n", __func__); + + if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) + return -EBUSY; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->wq = wq; + filp->private_data = ctx; + idxd_wq_get(wq); + return 0; +} + +static int idxd_cdev_release(struct inode *node, struct file *filep) +{ + struct idxd_user_context *ctx = filep->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + + dev_dbg(dev, "%s called\n", __func__); + filep->private_data = NULL; + + kfree(ctx); + idxd_wq_put(wq); + return 0; +} + +static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, + const char *func) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { + dev_info_ratelimited(dev, + "%s: %s: mapping too large: %lu\n", + current->comm, func, + vma->vm_end - vma->vm_start); + return -EINVAL; + } + + return 0; +} + +static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR); + unsigned long pfn; + int rc; + + dev_dbg(&pdev->dev, "%s called\n", __func__); + rc = check_vma(wq, vma, __func__); + + vma->vm_flags |= VM_DONTCOPY; + pfn = (base + idxd_get_wq_portal_full_offset(wq->id, + IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_private_data = ctx; + + return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, + vma->vm_page_prot); +} + +static __poll_t idxd_cdev_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + unsigned long flags; + __poll_t out = 0; + + poll_wait(filp, &idxd_cdev->err_queue, wait); + spin_lock_irqsave(&idxd->dev_lock, flags); + if (idxd->sw_err.valid) + out = EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return out; +} + +static const struct file_operations idxd_cdev_fops = { + .owner = THIS_MODULE, + .open = idxd_cdev_open, + .release = idxd_cdev_release, + .mmap = idxd_cdev_mmap, + .poll = idxd_cdev_poll, +}; + +int idxd_cdev_get_major(struct idxd_device *idxd) +{ + return MAJOR(ictx[idxd->type].devt); +} + +static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + struct device *dev; + int minor, rc; + + idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL); + if (!idxd_cdev->dev) + return -ENOMEM; + + dev = idxd_cdev->dev; + dev->parent = &idxd->pdev->dev; + dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd), + idxd->id, wq->id); + dev->bus = idxd_get_bus_type(idxd); + + cdev_ctx = &ictx[wq->idxd->type]; + minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); + if (minor < 0) { + rc = minor; + goto ida_err; + } + + dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); + dev->type = &idxd_cdev_device_type; + rc = device_register(dev); + if (rc < 0) { + dev_err(&idxd->pdev->dev, "device register failed\n"); + put_device(dev); + goto dev_reg_err; + } + idxd_cdev->minor = minor; + + return 0; + + dev_reg_err: + ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); + ida_err: + kfree(dev); + idxd_cdev->dev = NULL; + return rc; +} + +static void idxd_wq_cdev_cleanup(struct idxd_wq *wq, + enum idxd_cdev_cleanup cdev_state) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + + cdev_ctx = &ictx[wq->idxd->type]; + if (cdev_state == CDEV_NORMAL) + cdev_del(&idxd_cdev->cdev); + device_unregister(idxd_cdev->dev); + /* + * The device_type->release() will be called on the device and free + * the allocated struct device. We can just forget it. + */ + ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); + idxd_cdev->dev = NULL; + idxd_cdev->minor = -1; +} + +int idxd_wq_add_cdev(struct idxd_wq *wq) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct cdev *cdev = &idxd_cdev->cdev; + struct device *dev; + int rc; + + rc = idxd_wq_cdev_dev_setup(wq); + if (rc < 0) + return rc; + + dev = idxd_cdev->dev; + cdev_init(cdev, &idxd_cdev_fops); + cdev_set_parent(cdev, &dev->kobj); + rc = cdev_add(cdev, dev->devt, 1); + if (rc) { + dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); + idxd_wq_cdev_cleanup(wq, CDEV_FAILED); + return rc; + } + + init_waitqueue_head(&idxd_cdev->err_queue); + return 0; +} + +void idxd_wq_del_cdev(struct idxd_wq *wq) +{ + idxd_wq_cdev_cleanup(wq, CDEV_NORMAL); +} + +int idxd_cdev_register(void) +{ + int rc, i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + ida_init(&ictx[i].minor_ida); + rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, + ictx[i].name); + if (rc) + return rc; + } + + return 0; +} + +void idxd_cdev_remove(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + unregister_chrdev_region(ictx[i].devt, MINORMASK); + ida_destroy(&ictx[i].minor_ida); + } +} diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c new file mode 100644 index 000000000000..ada69e722f84 --- /dev/null +++ b/drivers/dma/idxd/device.c @@ -0,0 +1,693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout); +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand); + +/* Interrupt control bits */ +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 1; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_mask_msix_vectors(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + int i, rc; + + for (i = 0; i < msixcnt; i++) { + rc = idxd_mask_msix_vector(idxd, i); + if (rc < 0) + dev_warn(&pdev->dev, + "Failed disabling msix vec %d\n", i); + } +} + +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 0; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_unmask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 1; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +void idxd_mask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 0; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +static void free_hw_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->hw_descs[i]); + + kfree(wq->hw_descs); +} + +static int alloc_hw_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), + GFP_KERNEL, node); + if (!wq->hw_descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), + GFP_KERNEL, node); + if (!wq->hw_descs[i]) { + free_hw_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +static void free_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->descs[i]); + + kfree(wq->descs); +} + +static int alloc_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), + GFP_KERNEL, node); + if (!wq->descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), + GFP_KERNEL, node); + if (!wq->descs[i]) { + free_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +/* WQ control bits */ +int idxd_wq_alloc_resources(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_group *group = wq->group; + struct device *dev = &idxd->pdev->dev; + int rc, num_descs, i; + + if (wq->type != IDXD_WQT_KERNEL) + return 0; + + num_descs = wq->size + + idxd->hw.gen_cap.max_descs_per_engine * group->num_engines; + wq->num_descs = num_descs; + + rc = alloc_hw_descs(wq, num_descs); + if (rc < 0) + return rc; + + wq->compls_size = num_descs * sizeof(struct dsa_completion_record); + wq->compls = dma_alloc_coherent(dev, wq->compls_size, + &wq->compls_addr, GFP_KERNEL); + if (!wq->compls) { + rc = -ENOMEM; + goto fail_alloc_compls; + } + + rc = alloc_descs(wq, num_descs); + if (rc < 0) + goto fail_alloc_descs; + + rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL, + dev_to_node(dev)); + if (rc < 0) + goto fail_sbitmap_init; + + for (i = 0; i < num_descs; i++) { + struct idxd_desc *desc = wq->descs[i]; + + desc->hw = wq->hw_descs[i]; + desc->completion = &wq->compls[i]; + desc->compl_dma = wq->compls_addr + + sizeof(struct dsa_completion_record) * i; + desc->id = i; + desc->wq = wq; + + dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan); + desc->txd.tx_submit = idxd_dma_tx_submit; + } + + return 0; + + fail_sbitmap_init: + free_descs(wq); + fail_alloc_descs: + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + fail_alloc_compls: + free_hw_descs(wq); + return rc; +} + +void idxd_wq_free_resources(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if (wq->type != IDXD_WQT_KERNEL) + return; + + free_hw_descs(wq); + free_descs(wq); + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + sbitmap_free(&wq->sbmap); +} + +int idxd_wq_enable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + + if (wq->state == IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d already enabled\n", wq->id); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_WQ_ENABLED) { + dev_dbg(dev, "WQ enable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_ENABLED; + dev_dbg(dev, "WQ %d enabled\n", wq->id); + return 0; +} + +int idxd_wq_disable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status, operand; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + dev_dbg(dev, "Disabling WQ %d\n", wq->id); + + if (wq->state != IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); + return 0; + } + + operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS) { + dev_dbg(dev, "WQ disable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_DISABLED; + dev_dbg(dev, "WQ %d disabled\n", wq->id); + return 0; +} + +int idxd_wq_map_portal(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + resource_size_t start; + + start = pci_resource_start(pdev, IDXD_WQ_BAR); + start = start + wq->id * IDXD_PORTAL_SIZE; + + wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); + if (!wq->dportal) + return -ENOMEM; + dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal); + + return 0; +} + +void idxd_wq_unmap_portal(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + devm_iounmap(dev, wq->dportal); +} + +/* Device control bits */ +static inline bool idxd_is_enabled(struct idxd_device *idxd) +{ + union gensts_reg gensts; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + + if (gensts.state == IDXD_DEVICE_STATE_ENABLED) + return true; + return false; +} + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout) +{ + u32 sts, to = timeout; + + lockdep_assert_held(&idxd->dev_lock); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + while (sts & IDXD_CMDSTS_ACTIVE && --to) { + cpu_relax(); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + } + + if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) { + dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__); + *status = 0; + return -EBUSY; + } + + *status = sts; + return 0; +} + +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand) +{ + union idxd_command_reg cmd; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd = cmd_code; + cmd.operand = operand; + dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", + __func__, cmd_code, operand); + iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); + + return 0; +} + +int idxd_device_enable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device already enabled\n"); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was enabled */ + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_DEV_ENABLED) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + return -ENXIO; + } + + idxd->state = IDXD_DEV_ENABLED; + return 0; +} + +int idxd_device_disable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (!idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device is not enabled\n"); + return 0; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was disabled */ + if (status != IDXD_CMDSTS_SUCCESS && + !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + rc = -ENXIO; + return rc; + } + + idxd->state = IDXD_DEV_CONF_READY; + return 0; +} + +int __idxd_device_reset(struct idxd_device *idxd) +{ + u32 status; + int rc; + + rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + return 0; +} + +int idxd_device_reset(struct idxd_device *idxd) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = __idxd_device_reset(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + return rc; +} + +/* Device configuration bits */ +static void idxd_group_config_write(struct idxd_group *group) +{ + struct idxd_device *idxd = group->idxd; + struct device *dev = &idxd->pdev->dev; + int i; + u32 grpcfg_offset; + + dev_dbg(dev, "Writing group %d cfg registers\n", group->id); + + /* setup GRPWQCFG */ + for (i = 0; i < 4; i++) { + grpcfg_offset = idxd->grpcfg_offset + + group->id * 64 + i * sizeof(u64); + iowrite64(group->grpcfg.wqs[i], + idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", + group->id, i, grpcfg_offset, + ioread64(idxd->reg_base + grpcfg_offset)); + } + + /* setup GRPENGCFG */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32; + iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, + grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); + + /* setup GRPFLAGS */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40; + iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", + group->id, grpcfg_offset, + ioread32(idxd->reg_base + grpcfg_offset)); +} + +static int idxd_groups_config_write(struct idxd_device *idxd) + +{ + union gencfg_reg reg; + int i; + struct device *dev = &idxd->pdev->dev; + + /* Setup bandwidth token limit */ + if (idxd->token_limit) { + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); + reg.token_limit = idxd->token_limit; + iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); + } + + dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, + ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + idxd_group_config_write(group); + } + + return 0; +} + +static int idxd_wq_config_write(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 wq_offset; + int i; + + if (!wq->group) + return 0; + + memset(&wq->wqcfg, 0, sizeof(union wqcfg)); + + /* byte 0-3 */ + wq->wqcfg.wq_size = wq->size; + + if (wq->size == 0) { + dev_warn(dev, "Incorrect work queue size: 0\n"); + return -EINVAL; + } + + /* bytes 4-7 */ + wq->wqcfg.wq_thresh = wq->threshold; + + /* byte 8-11 */ + wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL); + wq->wqcfg.mode = 1; + + wq->wqcfg.priority = wq->priority; + + /* bytes 12-15 */ + wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift; + wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift; + + dev_dbg(dev, "WQ %d CFGs\n", wq->id); + for (i = 0; i < 8; i++) { + wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); + iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset); + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", + wq->id, i, wq_offset, + ioread32(idxd->reg_base + wq_offset)); + } + + return 0; +} + +static int idxd_wqs_config_write(struct idxd_device *idxd) +{ + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + rc = idxd_wq_config_write(wq); + if (rc < 0) + return rc; + } + + return 0; +} + +static void idxd_group_flags_setup(struct idxd_device *idxd) +{ + int i; + + /* TC-A 0 and TC-B 1 should be defaults */ + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + if (group->tc_a == -1) + group->grpcfg.flags.tc_a = 0; + else + group->grpcfg.flags.tc_a = group->tc_a; + if (group->tc_b == -1) + group->grpcfg.flags.tc_b = 1; + else + group->grpcfg.flags.tc_b = group->tc_b; + group->grpcfg.flags.use_token_limit = group->use_token_limit; + group->grpcfg.flags.tokens_reserved = group->tokens_reserved; + if (group->tokens_allowed) + group->grpcfg.flags.tokens_allowed = + group->tokens_allowed; + else + group->grpcfg.flags.tokens_allowed = idxd->max_tokens; + } +} + +static int idxd_engines_setup(struct idxd_device *idxd) +{ + int i, engines = 0; + struct idxd_engine *eng; + struct idxd_group *group; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + group->grpcfg.engines = 0; + } + + for (i = 0; i < idxd->max_engines; i++) { + eng = &idxd->engines[i]; + group = eng->group; + + if (!group) + continue; + + group->grpcfg.engines |= BIT(eng->id); + engines++; + } + + if (!engines) + return -EINVAL; + + return 0; +} + +static int idxd_wqs_setup(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct idxd_group *group; + int i, j, configured = 0; + struct device *dev = &idxd->pdev->dev; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + for (j = 0; j < 4; j++) + group->grpcfg.wqs[j] = 0; + } + + for (i = 0; i < idxd->max_wqs; i++) { + wq = &idxd->wqs[i]; + group = wq->group; + + if (!wq->group) + continue; + if (!wq->size) + continue; + + if (!wq_dedicated(wq)) { + dev_warn(dev, "No shared workqueue support.\n"); + return -EINVAL; + } + + group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); + configured++; + } + + if (configured == 0) + return -EINVAL; + + return 0; +} + +int idxd_device_config(struct idxd_device *idxd) +{ + int rc; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_wqs_setup(idxd); + if (rc < 0) + return rc; + + rc = idxd_engines_setup(idxd); + if (rc < 0) + return rc; + + idxd_group_flags_setup(idxd); + + rc = idxd_wqs_config_write(idxd); + if (rc < 0) + return rc; + + rc = idxd_groups_config_write(idxd); + if (rc < 0) + return rc; + + return 0; +} diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c new file mode 100644 index 000000000000..c64c1429d160 --- /dev/null +++ b/drivers/dma/idxd/dma.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) +{ + return container_of(c, struct idxd_wq, dma_chan); +} + +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type) +{ + struct dma_async_tx_descriptor *tx; + struct dmaengine_result res; + int complete = 1; + + if (desc->completion->status == DSA_COMP_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (desc->completion->status) + res.result = DMA_TRANS_WRITE_FAILED; + else if (comp_type == IDXD_COMPLETE_ABORT) + res.result = DMA_TRANS_ABORTED; + else + complete = 0; + + tx = &desc->txd; + if (complete && tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + dmaengine_desc_get_callback_invoke(tx, &res); + tx->callback = NULL; + tx->callback_result = NULL; + } +} + +static void op_flag_setup(unsigned long flags, u32 *desc_flags) +{ + *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; + if (flags & DMA_PREP_INTERRUPT) + *desc_flags |= IDXD_OP_FLAG_RCI; +} + +static inline void set_completion_address(struct idxd_desc *desc, + u64 *compl_addr) +{ + *compl_addr = desc->compl_dma; +} + +static inline void idxd_prep_desc_common(struct idxd_wq *wq, + struct dsa_hw_desc *hw, char opcode, + u64 addr_f1, u64 addr_f2, u64 len, + u64 compl, u32 flags) +{ + struct idxd_device *idxd = wq->idxd; + + hw->flags = flags; + hw->opcode = opcode; + hw->src_addr = addr_f1; + hw->dst_addr = addr_f2; + hw->xfer_size = len; + hw->priv = !!(wq->type == IDXD_WQT_KERNEL); + hw->completion_addr = compl; + + /* + * Descriptor completion vectors are 1-8 for MSIX. We will round + * robin through the 8 vectors. + */ + wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; + hw->int_handle = wq->vec_ptr; +} + +static struct dma_async_tx_descriptor * +idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_device *idxd = wq->idxd; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + if (len > idxd->max_xfer_bytes) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE, + dma_src, dma_dest, len, desc->compl_dma, + desc_flags); + + desc->txd.flags = flags; + + return &desc->txd; +} + +static int idxd_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_get(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); + return 0; +} + +static void idxd_dma_free_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_put(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); +} + +static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dma_chan, cookie, txstate); +} + +/* + * issue_pending() does not need to do anything since tx_submit() does the job + * already. + */ +static void idxd_dma_issue_pending(struct dma_chan *dma_chan) +{ +} + +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *c = tx->chan; + struct idxd_wq *wq = to_idxd_wq(c); + dma_cookie_t cookie; + int rc; + struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd); + + cookie = dma_cookie_assign(tx); + + rc = idxd_submit_desc(wq, desc); + if (rc < 0) { + idxd_free_desc(wq, desc); + return rc; + } + + return cookie; +} + +static void idxd_dma_release(struct dma_device *device) +{ +} + +int idxd_register_dma_device(struct idxd_device *idxd) +{ + struct dma_device *dma = &idxd->dma_dev; + + INIT_LIST_HEAD(&dma->channels); + dma->dev = &idxd->pdev->dev; + + dma->device_release = idxd_dma_release; + + if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; + } + + dma->device_tx_status = idxd_dma_tx_status; + dma->device_issue_pending = idxd_dma_issue_pending; + dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; + dma->device_free_chan_resources = idxd_dma_free_chan_resources; + + return dma_async_device_register(&idxd->dma_dev); +} + +void idxd_unregister_dma_device(struct idxd_device *idxd) +{ + dma_async_device_unregister(&idxd->dma_dev); +} + +int idxd_register_dma_channel(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct dma_device *dma = &idxd->dma_dev; + struct dma_chan *chan = &wq->dma_chan; + int rc; + + memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); + chan->device = dma; + list_add_tail(&chan->device_node, &dma->channels); + rc = dma_async_device_channel_register(dma, chan); + if (rc < 0) + return rc; + + return 0; +} + +void idxd_unregister_dma_channel(struct idxd_wq *wq) +{ + dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan); +} diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h new file mode 100644 index 000000000000..b8f8a363b4a7 --- /dev/null +++ b/drivers/dma/idxd/idxd.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_H_ +#define _IDXD_H_ + +#include <linux/sbitmap.h> +#include <linux/dmaengine.h> +#include <linux/percpu-rwsem.h> +#include <linux/wait.h> +#include <linux/cdev.h> +#include "registers.h" + +#define IDXD_DRIVER_VERSION "1.00" + +extern struct kmem_cache *idxd_desc_pool; + +#define IDXD_REG_TIMEOUT 50 +#define IDXD_DRAIN_TIMEOUT 5000 + +enum idxd_type { + IDXD_TYPE_UNKNOWN = -1, + IDXD_TYPE_DSA = 0, + IDXD_TYPE_MAX +}; + +#define IDXD_NAME_SIZE 128 + +struct idxd_device_driver { + struct device_driver drv; +}; + +struct idxd_irq_entry { + struct idxd_device *idxd; + int id; + struct llist_head pending_llist; + struct list_head work_list; +}; + +struct idxd_group { + struct device conf_dev; + struct idxd_device *idxd; + struct grpcfg grpcfg; + int id; + int num_engines; + int num_wqs; + bool use_token_limit; + u8 tokens_allowed; + u8 tokens_reserved; + int tc_a; + int tc_b; +}; + +#define IDXD_MAX_PRIORITY 0xf + +enum idxd_wq_state { + IDXD_WQ_DISABLED = 0, + IDXD_WQ_ENABLED, +}; + +enum idxd_wq_flag { + WQ_FLAG_DEDICATED = 0, +}; + +enum idxd_wq_type { + IDXD_WQT_NONE = 0, + IDXD_WQT_KERNEL, + IDXD_WQT_USER, +}; + +struct idxd_cdev { + struct cdev cdev; + struct device *dev; + int minor; + struct wait_queue_head err_queue; +}; + +#define IDXD_ALLOCATED_BATCH_SIZE 128U +#define WQ_NAME_SIZE 1024 +#define WQ_TYPE_SIZE 10 + +enum idxd_op_type { + IDXD_OP_BLOCK = 0, + IDXD_OP_NONBLOCK = 1, +}; + +enum idxd_complete_type { + IDXD_COMPLETE_NORMAL = 0, + IDXD_COMPLETE_ABORT, +}; + +struct idxd_wq { + void __iomem *dportal; + struct device conf_dev; + struct idxd_cdev idxd_cdev; + struct idxd_device *idxd; + int id; + enum idxd_wq_type type; + struct idxd_group *group; + int client_count; + struct mutex wq_lock; /* mutex for workqueue */ + u32 size; + u32 threshold; + u32 priority; + enum idxd_wq_state state; + unsigned long flags; + union wqcfg wqcfg; + atomic_t dq_count; /* dedicated queue flow control */ + u32 vec_ptr; /* interrupt steering */ + struct dsa_hw_desc **hw_descs; + int num_descs; + struct dsa_completion_record *compls; + dma_addr_t compls_addr; + int compls_size; + struct idxd_desc **descs; + struct sbitmap sbmap; + struct dma_chan dma_chan; + struct percpu_rw_semaphore submit_lock; + wait_queue_head_t submit_waitq; + char name[WQ_NAME_SIZE + 1]; +}; + +struct idxd_engine { + struct device conf_dev; + int id; + struct idxd_group *group; + struct idxd_device *idxd; +}; + +/* shadow registers */ +struct idxd_hw { + u32 version; + union gen_cap_reg gen_cap; + union wq_cap_reg wq_cap; + union group_cap_reg group_cap; + union engine_cap_reg engine_cap; + struct opcap opcap; +}; + +enum idxd_device_state { + IDXD_DEV_HALTED = -1, + IDXD_DEV_DISABLED = 0, + IDXD_DEV_CONF_READY, + IDXD_DEV_ENABLED, +}; + +enum idxd_device_flag { + IDXD_FLAG_CONFIGURABLE = 0, +}; + +struct idxd_device { + enum idxd_type type; + struct device conf_dev; + struct list_head list; + struct idxd_hw hw; + enum idxd_device_state state; + unsigned long flags; + int id; + int major; + + struct pci_dev *pdev; + void __iomem *reg_base; + + spinlock_t dev_lock; /* spinlock for device */ + struct idxd_group *groups; + struct idxd_wq *wqs; + struct idxd_engine *engines; + + int num_groups; + + u32 msix_perm_offset; + u32 wqcfg_offset; + u32 grpcfg_offset; + u32 perfmon_offset; + + u64 max_xfer_bytes; + u32 max_batch_size; + int max_groups; + int max_engines; + int max_tokens; + int max_wqs; + int max_wq_size; + int token_limit; + int nr_tokens; /* non-reserved tokens */ + + union sw_err_reg sw_err; + + struct msix_entry *msix_entries; + int num_wq_irqs; + struct idxd_irq_entry *irq_entries; + + struct dma_device dma_dev; +}; + +/* IDXD software descriptor */ +struct idxd_desc { + struct dsa_hw_desc *hw; + dma_addr_t desc_dma; + struct dsa_completion_record *completion; + dma_addr_t compl_dma; + struct dma_async_tx_descriptor txd; + struct llist_node llnode; + struct list_head list; + int id; + struct idxd_wq *wq; +}; + +#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) +#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) + +extern struct bus_type dsa_bus_type; + +static inline bool wq_dedicated(struct idxd_wq *wq) +{ + return test_bit(WQ_FLAG_DEDICATED, &wq->flags); +} + +enum idxd_portal_prot { + IDXD_PORTAL_UNLIMITED = 0, + IDXD_PORTAL_LIMITED, +}; + +static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) +{ + return prot * 0x1000; +} + +static inline int idxd_get_wq_portal_full_offset(int wq_id, + enum idxd_portal_prot prot) +{ + return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); +} + +static inline void idxd_set_type(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + + if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0) + idxd->type = IDXD_TYPE_DSA; + else + idxd->type = IDXD_TYPE_UNKNOWN; +} + +static inline void idxd_wq_get(struct idxd_wq *wq) +{ + wq->client_count++; +} + +static inline void idxd_wq_put(struct idxd_wq *wq) +{ + wq->client_count--; +} + +static inline int idxd_wq_refcount(struct idxd_wq *wq) +{ + return wq->client_count; +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd); +int idxd_register_bus_type(void); +void idxd_unregister_bus_type(void); +int idxd_setup_sysfs(struct idxd_device *idxd); +void idxd_cleanup_sysfs(struct idxd_device *idxd); +int idxd_register_driver(void); +void idxd_unregister_driver(void); +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd); + +/* device interrupt control */ +irqreturn_t idxd_irq_handler(int vec, void *data); +irqreturn_t idxd_misc_thread(int vec, void *data); +irqreturn_t idxd_wq_thread(int irq, void *data); +void idxd_mask_error_interrupts(struct idxd_device *idxd); +void idxd_unmask_error_interrupts(struct idxd_device *idxd); +void idxd_mask_msix_vectors(struct idxd_device *idxd); +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); + +/* device control */ +int idxd_device_enable(struct idxd_device *idxd); +int idxd_device_disable(struct idxd_device *idxd); +int idxd_device_reset(struct idxd_device *idxd); +int __idxd_device_reset(struct idxd_device *idxd); +void idxd_device_cleanup(struct idxd_device *idxd); +int idxd_device_config(struct idxd_device *idxd); +void idxd_device_wqs_clear_state(struct idxd_device *idxd); + +/* work queue control */ +int idxd_wq_alloc_resources(struct idxd_wq *wq); +void idxd_wq_free_resources(struct idxd_wq *wq); +int idxd_wq_enable(struct idxd_wq *wq); +int idxd_wq_disable(struct idxd_wq *wq); +int idxd_wq_map_portal(struct idxd_wq *wq); +void idxd_wq_unmap_portal(struct idxd_wq *wq); + +/* submission */ +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); + +/* dmaengine */ +int idxd_register_dma_device(struct idxd_device *idxd); +void idxd_unregister_dma_device(struct idxd_device *idxd); +int idxd_register_dma_channel(struct idxd_wq *wq); +void idxd_unregister_dma_channel(struct idxd_wq *wq); +void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type); +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx); + +/* cdev */ +int idxd_cdev_register(void); +void idxd_cdev_remove(void); +int idxd_cdev_get_major(struct idxd_device *idxd); +int idxd_wq_add_cdev(struct idxd_wq *wq); +void idxd_wq_del_cdev(struct idxd_wq *wq); + +#endif diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c new file mode 100644 index 000000000000..7778c05deb5d --- /dev/null +++ b/drivers/dma/idxd/init.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/aer.h> +#include <linux/fs.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/device.h> +#include <linux/idr.h> +#include <uapi/linux/idxd.h> +#include <linux/dmaengine.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +MODULE_VERSION(IDXD_DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); + +#define DRV_NAME "idxd" + +static struct idr idxd_idrs[IDXD_TYPE_MAX]; +static struct mutex idxd_idr_lock; + +static struct pci_device_id idxd_pci_tbl[] = { + /* DSA ver 1.0 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); + +static char *idxd_name[] = { + "dsa", +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd) +{ + return idxd_name[idxd->type]; +} + +static int idxd_setup_interrupts(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + struct idxd_irq_entry *irq_entry; + int i, msixcnt; + int rc = 0; + + msixcnt = pci_msix_vec_count(pdev); + if (msixcnt < 0) { + dev_err(dev, "Not MSI-X interrupt capable.\n"); + goto err_no_irq; + } + + idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) * + msixcnt, GFP_KERNEL); + if (!idxd->msix_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) + idxd->msix_entries[i].entry = i; + + rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt); + if (rc) { + dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt); + goto err_no_irq; + } + dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); + + /* + * We implement 1 completion list per MSI-X entry except for + * entry 0, which is for errors and others. + */ + idxd->irq_entries = devm_kcalloc(dev, msixcnt, + sizeof(struct idxd_irq_entry), + GFP_KERNEL); + if (!idxd->irq_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) { + idxd->irq_entries[i].id = i; + idxd->irq_entries[i].idxd = idxd; + } + + msix = &idxd->msix_entries[0]; + irq_entry = &idxd->irq_entries[0]; + rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler, + idxd_misc_thread, 0, "idxd-misc", + irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate misc interrupt.\n"); + goto err_no_irq; + } + + dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", + msix->vector); + + /* first MSI-X entry is not for wq interrupts */ + idxd->num_wq_irqs = msixcnt - 1; + + for (i = 1; i < msixcnt; i++) { + msix = &idxd->msix_entries[i]; + irq_entry = &idxd->irq_entries[i]; + + init_llist_head(&idxd->irq_entries[i].pending_llist); + INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); + rc = devm_request_threaded_irq(dev, msix->vector, + idxd_irq_handler, + idxd_wq_thread, 0, + "idxd-portal", irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate irq %d.\n", + msix->vector); + goto err_no_irq; + } + dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", + i, msix->vector); + } + + idxd_unmask_error_interrupts(idxd); + + return 0; + + err_no_irq: + /* Disable error interrupt generation */ + idxd_mask_error_interrupts(idxd); + pci_disable_msix(pdev); + dev_err(dev, "No usable interrupts\n"); + return rc; +} + +static void idxd_wqs_free_lock(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + percpu_free_rwsem(&wq->submit_lock); + } +} + +static int idxd_setup_internals(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + idxd->groups = devm_kcalloc(dev, idxd->max_groups, + sizeof(struct idxd_group), GFP_KERNEL); + if (!idxd->groups) + return -ENOMEM; + + for (i = 0; i < idxd->max_groups; i++) { + idxd->groups[i].idxd = idxd; + idxd->groups[i].id = i; + idxd->groups[i].tc_a = -1; + idxd->groups[i].tc_b = -1; + } + + idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq), + GFP_KERNEL); + if (!idxd->wqs) + return -ENOMEM; + + idxd->engines = devm_kcalloc(dev, idxd->max_engines, + sizeof(struct idxd_engine), GFP_KERNEL); + if (!idxd->engines) + return -ENOMEM; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + int rc; + + wq->id = i; + wq->idxd = idxd; + mutex_init(&wq->wq_lock); + atomic_set(&wq->dq_count, 0); + init_waitqueue_head(&wq->submit_waitq); + wq->idxd_cdev.minor = -1; + rc = percpu_init_rwsem(&wq->submit_lock); + if (rc < 0) { + idxd_wqs_free_lock(idxd); + return rc; + } + } + + for (i = 0; i < idxd->max_engines; i++) { + idxd->engines[i].idxd = idxd; + idxd->engines[i].id = i; + } + + return 0; +} + +static void idxd_read_table_offsets(struct idxd_device *idxd) +{ + union offsets_reg offsets; + struct device *dev = &idxd->pdev->dev; + + offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); + offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + + sizeof(u64)); + idxd->grpcfg_offset = offsets.grpcfg * 0x100; + dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); + idxd->wqcfg_offset = offsets.wqcfg * 0x100; + dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", + idxd->wqcfg_offset); + idxd->msix_perm_offset = offsets.msix_perm * 0x100; + dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", + idxd->msix_perm_offset); + idxd->perfmon_offset = offsets.perfmon * 0x100; + dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); +} + +static void idxd_read_caps(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + /* reading generic capabilities */ + idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); + dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); + idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; + dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); + idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; + dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); + if (idxd->hw.gen_cap.config_en) + set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); + + /* reading group capabilities */ + idxd->hw.group_cap.bits = + ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); + dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); + idxd->max_groups = idxd->hw.group_cap.num_groups; + dev_dbg(dev, "max groups: %u\n", idxd->max_groups); + idxd->max_tokens = idxd->hw.group_cap.total_tokens; + dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); + idxd->nr_tokens = idxd->max_tokens; + + /* read engine capabilities */ + idxd->hw.engine_cap.bits = + ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); + dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); + idxd->max_engines = idxd->hw.engine_cap.num_engines; + dev_dbg(dev, "max engines: %u\n", idxd->max_engines); + + /* read workqueue capabilities */ + idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); + dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); + idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; + dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); + idxd->max_wqs = idxd->hw.wq_cap.num_wqs; + dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); + + /* reading operation capabilities */ + for (i = 0; i < 4; i++) { + idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + + IDXD_OPCAP_OFFSET + i * sizeof(u64)); + dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); + } +} + +static struct idxd_device *idxd_alloc(struct pci_dev *pdev, + void __iomem * const *iomap) +{ + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + + idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL); + if (!idxd) + return NULL; + + idxd->pdev = pdev; + idxd->reg_base = iomap[IDXD_MMIO_BAR]; + spin_lock_init(&idxd->dev_lock); + + return idxd; +} + +static int idxd_probe(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + int rc; + + dev_dbg(dev, "%s entered and resetting device\n", __func__); + rc = idxd_device_reset(idxd); + if (rc < 0) + return rc; + dev_dbg(dev, "IDXD reset complete\n"); + + idxd_read_caps(idxd); + idxd_read_table_offsets(idxd); + + rc = idxd_setup_internals(idxd); + if (rc) + goto err_setup; + + rc = idxd_setup_interrupts(idxd); + if (rc) + goto err_setup; + + dev_dbg(dev, "IDXD interrupt setup complete.\n"); + + mutex_lock(&idxd_idr_lock); + idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL); + mutex_unlock(&idxd_idr_lock); + if (idxd->id < 0) { + rc = -ENOMEM; + goto err_idr_fail; + } + + idxd->major = idxd_cdev_get_major(idxd); + + dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); + return 0; + + err_idr_fail: + idxd_mask_error_interrupts(idxd); + idxd_mask_msix_vectors(idxd); + err_setup: + return rc; +} + +static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + int rc; + unsigned int mask; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + dev_dbg(dev, "Mapping BARs\n"); + mask = (1 << IDXD_MMIO_BAR); + rc = pcim_iomap_regions(pdev, mask, DRV_NAME); + if (rc) + return rc; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + dev_dbg(dev, "Set DMA masks\n"); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev, iomap); + if (!idxd) + return -ENOMEM; + + idxd_set_type(idxd); + + dev_dbg(dev, "Set PCI master\n"); + pci_set_master(pdev); + pci_set_drvdata(pdev, idxd); + + idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); + rc = idxd_probe(idxd); + if (rc) { + dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); + return -ENODEV; + } + + rc = idxd_setup_sysfs(idxd); + if (rc) { + dev_err(dev, "IDXD sysfs setup failed\n"); + return -ENODEV; + } + + idxd->state = IDXD_DEV_CONF_READY; + + dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", + idxd->hw.version); + + return 0; +} + +static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *itr; + struct llist_node *head; + + head = llist_del_all(&ie->pending_llist); + if (!head) + return; + + llist_for_each_entry_safe(desc, itr, head, llnode) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_flush_work_list(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *iter; + + list_for_each_entry_safe(desc, iter, &ie->work_list, list) { + list_del(&desc->list); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_shutdown(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + int rc, i; + struct idxd_irq_entry *irq_entry; + int msixcnt = pci_msix_vec_count(pdev); + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + if (rc) + dev_err(&pdev->dev, "Disabling device failed\n"); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_mask_msix_vectors(idxd); + idxd_mask_error_interrupts(idxd); + + for (i = 0; i < msixcnt; i++) { + irq_entry = &idxd->irq_entries[i]; + synchronize_irq(idxd->msix_entries[i].vector); + if (i == 0) + continue; + idxd_flush_pending_llist(irq_entry); + idxd_flush_work_list(irq_entry); + } +} + +static void idxd_remove(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_cleanup_sysfs(idxd); + idxd_shutdown(pdev); + idxd_wqs_free_lock(idxd); + mutex_lock(&idxd_idr_lock); + idr_remove(&idxd_idrs[idxd->type], idxd->id); + mutex_unlock(&idxd_idr_lock); +} + +static struct pci_driver idxd_pci_driver = { + .name = DRV_NAME, + .id_table = idxd_pci_tbl, + .probe = idxd_pci_probe, + .remove = idxd_remove, + .shutdown = idxd_shutdown, +}; + +static int __init idxd_init_module(void) +{ + int err, i; + + /* + * If the CPU does not support write512, there's no point in + * enumerating the device. We can not utilize it. + */ + if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { + pr_warn("idxd driver failed to load without MOVDIR64B.\n"); + return -ENODEV; + } + + pr_info("%s: Intel(R) Accelerator Devices Driver %s\n", + DRV_NAME, IDXD_DRIVER_VERSION); + + mutex_init(&idxd_idr_lock); + for (i = 0; i < IDXD_TYPE_MAX; i++) + idr_init(&idxd_idrs[i]); + + err = idxd_register_bus_type(); + if (err < 0) + return err; + + err = idxd_register_driver(); + if (err < 0) + goto err_idxd_driver_register; + + err = idxd_cdev_register(); + if (err) + goto err_cdev_register; + + err = pci_register_driver(&idxd_pci_driver); + if (err) + goto err_pci_register; + + return 0; + +err_pci_register: + idxd_cdev_remove(); +err_cdev_register: + idxd_unregister_driver(); +err_idxd_driver_register: + idxd_unregister_bus_type(); + return err; +} +module_init(idxd_init_module); + +static void __exit idxd_exit_module(void) +{ + pci_unregister_driver(&idxd_pci_driver); + idxd_cdev_remove(); + idxd_unregister_bus_type(); +} +module_exit(idxd_exit_module); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c new file mode 100644 index 000000000000..d6fcd2e60103 --- /dev/null +++ b/drivers/dma/idxd/irq.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +void idxd_device_wqs_clear_state(struct idxd_device *idxd) +{ + int i; + + lockdep_assert_held(&idxd->dev_lock); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->state = IDXD_WQ_DISABLED; + } +} + +static int idxd_restart(struct idxd_device *idxd) +{ + int i, rc; + + lockdep_assert_held(&idxd->dev_lock); + + rc = __idxd_device_reset(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_config(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_enable(idxd); + if (rc < 0) + goto out; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_ENABLED) { + rc = idxd_wq_enable(wq); + if (rc < 0) { + dev_warn(&idxd->pdev->dev, + "Unable to re-enable wq %s\n", + dev_name(&wq->conf_dev)); + } + } + } + + return 0; + + out: + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + return rc; +} + +irqreturn_t idxd_irq_handler(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + + idxd_mask_msix_vector(idxd, irq_entry->id); + return IRQ_WAKE_THREAD; +} + +irqreturn_t idxd_misc_thread(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + struct device *dev = &idxd->pdev->dev; + union gensts_reg gensts; + u32 cause, val = 0; + int i, rc; + bool err = false; + + cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); + + if (cause & IDXD_INTC_ERR) { + spin_lock_bh(&idxd->dev_lock); + for (i = 0; i < 4; i++) + idxd->sw_err.bits[i] = ioread64(idxd->reg_base + + IDXD_SWERR_OFFSET + i * sizeof(u64)); + iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); + + if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { + int id = idxd->sw_err.wq_idx; + struct idxd_wq *wq = &idxd->wqs[id]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } else { + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } + } + + spin_unlock_bh(&idxd->dev_lock); + val |= IDXD_INTC_ERR; + + for (i = 0; i < 4; i++) + dev_warn(dev, "err[%d]: %#16.16llx\n", + i, idxd->sw_err.bits[i]); + err = true; + } + + if (cause & IDXD_INTC_CMD) { + /* Driver does use command interrupts */ + val |= IDXD_INTC_CMD; + } + + if (cause & IDXD_INTC_OCCUPY) { + /* Driver does not utilize occupancy interrupt */ + val |= IDXD_INTC_OCCUPY; + } + + if (cause & IDXD_INTC_PERFMON_OVFL) { + /* + * Driver does not utilize perfmon counter overflow interrupt + * yet. + */ + val |= IDXD_INTC_PERFMON_OVFL; + } + + val ^= cause; + if (val) + dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", + val); + + iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); + if (!err) + return IRQ_HANDLED; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + if (gensts.state == IDXD_DEVICE_STATE_HALT) { + spin_lock_bh(&idxd->dev_lock); + if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { + rc = idxd_restart(idxd); + if (rc < 0) + dev_err(&idxd->pdev->dev, + "idxd restart failed, device halt."); + } else { + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + dev_err(&idxd->pdev->dev, + "idxd halted, need %s.\n", + gensts.reset_type == IDXD_DEVICE_RESET_FLR ? + "FLR" : "system reset"); + } + spin_unlock_bh(&idxd->dev_lock); + } + + idxd_unmask_msix_vector(idxd, irq_entry->id); + return IRQ_HANDLED; +} + +static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct idxd_desc *desc, *t; + struct llist_node *head; + int queued = 0; + + head = llist_del_all(&irq_entry->pending_llist); + if (!head) + return 0; + + llist_for_each_entry_safe(desc, t, head, llnode) { + if (desc->completion->status) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + list_add_tail(&desc->list, &irq_entry->work_list); + queued++; + } + } + + return queued; +} + +static int irq_process_work_list(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct list_head *node, *next; + int queued = 0; + + if (list_empty(&irq_entry->work_list)) + return 0; + + list_for_each_safe(node, next, &irq_entry->work_list) { + struct idxd_desc *desc = + container_of(node, struct idxd_desc, list); + + if (desc->completion->status) { + list_del(&desc->list); + /* process and callback */ + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + queued++; + } + } + + return queued; +} + +irqreturn_t idxd_wq_thread(int irq, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + int rc, processed = 0, retry = 0; + + /* + * There are two lists we are processing. The pending_llist is where + * submmiter adds all the submitted descriptor after sending it to + * the workqueue. It's a lockless singly linked list. The work_list + * is the common linux double linked list. We are in a scenario of + * multiple producers and a single consumer. The producers are all + * the kernel submitters of descriptors, and the consumer is the + * kernel irq handler thread for the msix vector when using threaded + * irq. To work with the restrictions of llist to remain lockless, + * we are doing the following steps: + * 1. Iterate through the work_list and process any completed + * descriptor. Delete the completed entries during iteration. + * 2. llist_del_all() from the pending list. + * 3. Iterate through the llist that was deleted from the pending list + * and process the completed entries. + * 4. If the entry is still waiting on hardware, list_add_tail() to + * the work_list. + * 5. Repeat until no more descriptors. + */ + do { + rc = irq_process_work_list(irq_entry, &processed); + if (rc != 0) { + retry++; + continue; + } + + rc = irq_process_pending_llist(irq_entry, &processed); + } while (rc != 0 && retry != 10); + + idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); + + if (processed == 0) + return IRQ_NONE; + + return IRQ_HANDLED; +} diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h new file mode 100644 index 000000000000..a39e7ae6b3d9 --- /dev/null +++ b/drivers/dma/idxd/registers.h @@ -0,0 +1,336 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_REGISTERS_H_ +#define _IDXD_REGISTERS_H_ + +/* PCI Config */ +#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 + +#define IDXD_MMIO_BAR 0 +#define IDXD_WQ_BAR 2 +#define IDXD_PORTAL_SIZE 0x4000 + +/* MMIO Device BAR0 Registers */ +#define IDXD_VER_OFFSET 0x00 +#define IDXD_VER_MAJOR_MASK 0xf0 +#define IDXD_VER_MINOR_MASK 0x0f +#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4) +#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK) + +union gen_cap_reg { + struct { + u64 block_on_fault:1; + u64 overlap_copy:1; + u64 cache_control_mem:1; + u64 cache_control_cache:1; + u64 rsvd:3; + u64 int_handle_req:1; + u64 dest_readback:1; + u64 drain_readback:1; + u64 rsvd2:6; + u64 max_xfer_shift:5; + u64 max_batch_shift:4; + u64 max_ims_mult:6; + u64 config_en:1; + u64 max_descs_per_engine:8; + u64 rsvd3:24; + }; + u64 bits; +} __packed; +#define IDXD_GENCAP_OFFSET 0x10 + +union wq_cap_reg { + struct { + u64 total_wq_size:16; + u64 num_wqs:8; + u64 rsvd:24; + u64 shared_mode:1; + u64 dedicated_mode:1; + u64 rsvd2:1; + u64 priority:1; + u64 occupancy:1; + u64 occupancy_int:1; + u64 rsvd3:10; + }; + u64 bits; +} __packed; +#define IDXD_WQCAP_OFFSET 0x20 + +union group_cap_reg { + struct { + u64 num_groups:8; + u64 total_tokens:8; + u64 token_en:1; + u64 token_limit:1; + u64 rsvd:46; + }; + u64 bits; +} __packed; +#define IDXD_GRPCAP_OFFSET 0x30 + +union engine_cap_reg { + struct { + u64 num_engines:8; + u64 rsvd:56; + }; + u64 bits; +} __packed; + +#define IDXD_ENGCAP_OFFSET 0x38 + +#define IDXD_OPCAP_NOOP 0x0001 +#define IDXD_OPCAP_BATCH 0x0002 +#define IDXD_OPCAP_MEMMOVE 0x0008 +struct opcap { + u64 bits[4]; +}; + +#define IDXD_OPCAP_OFFSET 0x40 + +#define IDXD_TABLE_OFFSET 0x60 +union offsets_reg { + struct { + u64 grpcfg:16; + u64 wqcfg:16; + u64 msix_perm:16; + u64 ims:16; + u64 perfmon:16; + u64 rsvd:48; + }; + u64 bits[2]; +} __packed; + +#define IDXD_GENCFG_OFFSET 0x80 +union gencfg_reg { + struct { + u32 token_limit:8; + u32 rsvd:4; + u32 user_int_en:1; + u32 rsvd2:19; + }; + u32 bits; +} __packed; + +#define IDXD_GENCTRL_OFFSET 0x88 +union genctrl_reg { + struct { + u32 softerr_int_en:1; + u32 rsvd:31; + }; + u32 bits; +} __packed; + +#define IDXD_GENSTATS_OFFSET 0x90 +union gensts_reg { + struct { + u32 state:2; + u32 reset_type:2; + u32 rsvd:28; + }; + u32 bits; +} __packed; + +enum idxd_device_status_state { + IDXD_DEVICE_STATE_DISABLED = 0, + IDXD_DEVICE_STATE_ENABLED, + IDXD_DEVICE_STATE_DRAIN, + IDXD_DEVICE_STATE_HALT, +}; + +enum idxd_device_reset_type { + IDXD_DEVICE_RESET_SOFTWARE = 0, + IDXD_DEVICE_RESET_FLR, + IDXD_DEVICE_RESET_WARM, + IDXD_DEVICE_RESET_COLD, +}; + +#define IDXD_INTCAUSE_OFFSET 0x98 +#define IDXD_INTC_ERR 0x01 +#define IDXD_INTC_CMD 0x02 +#define IDXD_INTC_OCCUPY 0x04 +#define IDXD_INTC_PERFMON_OVFL 0x08 + +#define IDXD_CMD_OFFSET 0xa0 +union idxd_command_reg { + struct { + u32 operand:20; + u32 cmd:5; + u32 rsvd:6; + u32 int_req:1; + }; + u32 bits; +} __packed; + +enum idxd_cmd { + IDXD_CMD_ENABLE_DEVICE = 1, + IDXD_CMD_DISABLE_DEVICE, + IDXD_CMD_DRAIN_ALL, + IDXD_CMD_ABORT_ALL, + IDXD_CMD_RESET_DEVICE, + IDXD_CMD_ENABLE_WQ, + IDXD_CMD_DISABLE_WQ, + IDXD_CMD_DRAIN_WQ, + IDXD_CMD_ABORT_WQ, + IDXD_CMD_RESET_WQ, + IDXD_CMD_DRAIN_PASID, + IDXD_CMD_ABORT_PASID, + IDXD_CMD_REQUEST_INT_HANDLE, +}; + +#define IDXD_CMDSTS_OFFSET 0xa8 +union cmdsts_reg { + struct { + u8 err; + u16 result; + u8 rsvd:7; + u8 active:1; + }; + u32 bits; +} __packed; +#define IDXD_CMDSTS_ACTIVE 0x80000000 + +enum idxd_cmdsts_err { + IDXD_CMDSTS_SUCCESS = 0, + IDXD_CMDSTS_INVAL_CMD, + IDXD_CMDSTS_INVAL_WQIDX, + IDXD_CMDSTS_HW_ERR, + /* enable device errors */ + IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10, + IDXD_CMDSTS_ERR_CONFIG, + IDXD_CMDSTS_ERR_BUSMASTER_EN, + IDXD_CMDSTS_ERR_PASID_INVAL, + IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE, + IDXD_CMDSTS_ERR_GRP_CONFIG, + IDXD_CMDSTS_ERR_GRP_CONFIG2, + IDXD_CMDSTS_ERR_GRP_CONFIG3, + IDXD_CMDSTS_ERR_GRP_CONFIG4, + /* enable wq errors */ + IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20, + IDXD_CMDSTS_ERR_WQ_ENABLED, + IDXD_CMDSTS_ERR_WQ_SIZE, + IDXD_CMDSTS_ERR_WQ_PRIOR, + IDXD_CMDSTS_ERR_WQ_MODE, + IDXD_CMDSTS_ERR_BOF_EN, + IDXD_CMDSTS_ERR_PASID_EN, + IDXD_CMDSTS_ERR_MAX_BATCH_SIZE, + IDXD_CMDSTS_ERR_MAX_XFER_SIZE, + /* disable device errors */ + IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31, + /* disable WQ, drain WQ, abort WQ, reset WQ */ + IDXD_CMDSTS_ERR_DEV_NOT_EN, + /* request interrupt handle */ + IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41, + IDXD_CMDSTS_ERR_NO_HANDLE, +}; + +#define IDXD_SWERR_OFFSET 0xc0 +#define IDXD_SWERR_VALID 0x00000001 +#define IDXD_SWERR_OVERFLOW 0x00000002 +#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW) +union sw_err_reg { + struct { + u64 valid:1; + u64 overflow:1; + u64 desc_valid:1; + u64 wq_idx_valid:1; + u64 batch:1; + u64 fault_rw:1; + u64 priv:1; + u64 rsvd:1; + u64 error:8; + u64 wq_idx:8; + u64 rsvd2:8; + u64 operation:8; + u64 pasid:20; + u64 rsvd3:4; + + u64 batch_idx:16; + u64 rsvd4:16; + u64 invalid_flags:32; + + u64 fault_addr; + + u64 rsvd5; + }; + u64 bits[4]; +} __packed; + +union msix_perm { + struct { + u32 rsvd:2; + u32 ignore:1; + u32 pasid_en:1; + u32 rsvd2:8; + u32 pasid:20; + }; + u32 bits; +} __packed; + +union group_flags { + struct { + u32 tc_a:3; + u32 tc_b:3; + u32 rsvd:1; + u32 use_token_limit:1; + u32 tokens_reserved:8; + u32 rsvd2:4; + u32 tokens_allowed:8; + u32 rsvd3:4; + }; + u32 bits; +} __packed; + +struct grpcfg { + u64 wqs[4]; + u64 engines; + union group_flags flags; +} __packed; + +union wqcfg { + struct { + /* bytes 0-3 */ + u16 wq_size; + u16 rsvd; + + /* bytes 4-7 */ + u16 wq_thresh; + u16 rsvd1; + + /* bytes 8-11 */ + u32 mode:1; /* shared or dedicated */ + u32 bof:1; /* block on fault */ + u32 rsvd2:2; + u32 priority:4; + u32 pasid:20; + u32 pasid_en:1; + u32 priv:1; + u32 rsvd3:2; + + /* bytes 12-15 */ + u32 max_xfer_shift:5; + u32 max_batch_shift:4; + u32 rsvd4:23; + + /* bytes 16-19 */ + u16 occupancy_inth; + u16 occupancy_table_sel:1; + u16 rsvd5:15; + + /* bytes 20-23 */ + u16 occupancy_limit; + u16 occupancy_int_en:1; + u16 rsvd6:15; + + /* bytes 24-27 */ + u16 occupancy; + u16 occupancy_int:1; + u16 rsvd7:12; + u16 mode_support:1; + u16 wq_state:2; + + /* bytes 28-31 */ + u32 rsvd8; + }; + u32 bits[8]; +} __packed; +#endif diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c new file mode 100644 index 000000000000..45a0c5869a0a --- /dev/null +++ b/drivers/dma/idxd/submit.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <uapi/linux/idxd.h> +#include "idxd.h" +#include "registers.h" + +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) +{ + struct idxd_desc *desc; + int idx; + struct idxd_device *idxd = wq->idxd; + + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + + if (optype == IDXD_OP_BLOCK) + percpu_down_read(&wq->submit_lock); + else if (!percpu_down_read_trylock(&wq->submit_lock)) + return ERR_PTR(-EBUSY); + + if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) { + int rc; + + if (optype == IDXD_OP_NONBLOCK) { + percpu_up_read(&wq->submit_lock); + return ERR_PTR(-EAGAIN); + } + + percpu_up_read(&wq->submit_lock); + percpu_down_write(&wq->submit_lock); + rc = wait_event_interruptible(wq->submit_waitq, + atomic_add_unless(&wq->dq_count, + 1, wq->size) || + idxd->state != IDXD_DEV_ENABLED); + percpu_up_write(&wq->submit_lock); + if (rc < 0) + return ERR_PTR(-EINTR); + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + } else { + percpu_up_read(&wq->submit_lock); + } + + idx = sbitmap_get(&wq->sbmap, 0, false); + if (idx < 0) { + atomic_dec(&wq->dq_count); + return ERR_PTR(-EAGAIN); + } + + desc = wq->descs[idx]; + memset(desc->hw, 0, sizeof(struct dsa_hw_desc)); + memset(desc->completion, 0, sizeof(struct dsa_completion_record)); + return desc; +} + +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + atomic_dec(&wq->dq_count); + + sbitmap_clear_bit(&wq->sbmap, desc->id); + wake_up(&wq->submit_waitq); +} + +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + struct idxd_device *idxd = wq->idxd; + int vec = desc->hw->int_handle; + void __iomem *portal; + + if (idxd->state != IDXD_DEV_ENABLED) + return -EIO; + + portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED); + /* + * The wmb() flushes writes to coherent DMA data before possibly + * triggering a DMA read. The wmb() is necessary even on UP because + * the recipient is a device. + */ + wmb(); + iosubmit_cmds512(portal, desc->hw, 1); + + /* + * Pending the descriptor to the lockless list for the irq_entry + * that we designated the descriptor to. + */ + if (desc->hw->flags & IDXD_OP_FLAG_RCI) + llist_add(&desc->llnode, + &idxd->irq_entries[vec].pending_llist); + + return 0; +} diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c new file mode 100644 index 000000000000..849c50ab939a --- /dev/null +++ b/drivers/dma/idxd/sysfs.c @@ -0,0 +1,1528 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +static char *idxd_wq_type_names[] = { + [IDXD_WQT_NONE] = "none", + [IDXD_WQT_KERNEL] = "kernel", + [IDXD_WQT_USER] = "user", +}; + +static void idxd_conf_device_release(struct device *dev) +{ + dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); +} + +static struct device_type idxd_group_device_type = { + .name = "group", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_wq_device_type = { + .name = "wq", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_engine_device_type = { + .name = "engine", + .release = idxd_conf_device_release, +}; + +static struct device_type dsa_device_type = { + .name = "dsa", + .release = idxd_conf_device_release, +}; + +static inline bool is_dsa_dev(struct device *dev) +{ + return dev ? dev->type == &dsa_device_type : false; +} + +static inline bool is_idxd_dev(struct device *dev) +{ + return is_dsa_dev(dev); +} + +static inline bool is_idxd_wq_dev(struct device *dev) +{ + return dev ? dev->type == &idxd_wq_device_type : false; +} + +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) +{ + if (wq->type == IDXD_WQT_KERNEL && + strcmp(wq->name, "dmaengine") == 0) + return true; + return false; +} + +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) +{ + return wq->type == IDXD_WQT_USER ? true : false; +} + +static int idxd_config_bus_match(struct device *dev, + struct device_driver *drv) +{ + int matched = 0; + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) + return 0; + matched = 1; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + if (idxd->state < IDXD_DEV_CONF_READY) + return 0; + + if (wq->state != IDXD_WQ_DISABLED) { + dev_dbg(dev, "%s not disabled\n", dev_name(dev)); + return 0; + } + matched = 1; + } + + if (matched) + dev_dbg(dev, "%s matched\n", dev_name(dev)); + + return matched; +} + +static int idxd_config_bus_probe(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called\n", __func__); + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) { + dev_warn(dev, "Device not ready for config\n"); + return -EBUSY; + } + + if (!try_module_get(THIS_MODULE)) + return -ENXIO; + + spin_lock_irqsave(&idxd->dev_lock, flags); + + /* Perform IDXD configuration and enabling */ + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device config failed: %d\n", rc); + return rc; + } + + /* start device */ + rc = idxd_device_enable(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device enable failed: %d\n", rc); + return rc; + } + + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_info(dev, "Device %s enabled\n", dev_name(dev)); + + rc = idxd_register_dma_device(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_dbg(dev, "Failed to register dmaengine device\n"); + return rc; + } + return 0; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + mutex_lock(&wq->wq_lock); + + if (idxd->state != IDXD_DEV_ENABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Enabling while device not enabled.\n"); + return -EPERM; + } + + if (wq->state != IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d already enabled.\n", wq->id); + return -EBUSY; + } + + if (!wq->group) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ not attached to group.\n"); + return -EINVAL; + } + + if (strlen(wq->name) == 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ name not set.\n"); + return -EINVAL; + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ resource alloc failed\n"); + return rc; + } + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Writing WQ %d config failed: %d\n", + wq->id, rc); + return rc; + } + + rc = idxd_wq_enable(wq); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d enabling failed: %d\n", + wq->id, rc); + return rc; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + rc = idxd_wq_map_portal(wq); + if (rc < 0) { + dev_warn(dev, "wq portal mapping failed: %d\n", rc); + rc = idxd_wq_disable(wq); + if (rc < 0) + dev_warn(dev, "IDXD wq disable failed\n"); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + return rc; + } + + wq->client_count = 0; + + dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); + + if (is_idxd_wq_dmaengine(wq)) { + rc = idxd_register_dma_channel(wq); + if (rc < 0) { + dev_dbg(dev, "DMA channel register failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } else if (is_idxd_wq_cdev(wq)) { + rc = idxd_wq_add_cdev(wq); + if (rc < 0) { + dev_dbg(dev, "Cdev creation failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } + + mutex_unlock(&wq->wq_lock); + return 0; + } + + return -ENODEV; +} + +static void disable_wq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + unsigned long flags; + int rc; + + mutex_lock(&wq->wq_lock); + dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); + if (wq->state == IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + return; + } + + if (is_idxd_wq_dmaengine(wq)) + idxd_unregister_dma_channel(wq); + else if (is_idxd_wq_cdev(wq)) + idxd_wq_del_cdev(wq); + + if (idxd_wq_refcount(wq)) + dev_warn(dev, "Clients has claim on wq %d: %d\n", + wq->id, idxd_wq_refcount(wq)); + + idxd_wq_unmap_portal(wq); + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_wq_disable(wq); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + idxd_wq_free_resources(wq); + wq->client_count = 0; + mutex_unlock(&wq->wq_lock); + + if (rc < 0) + dev_warn(dev, "Failed to disable %s: %d\n", + dev_name(&wq->conf_dev), rc); + else + dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); +} + +static int idxd_config_bus_remove(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); + + /* disable workqueue here */ + if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + + disable_wq(wq); + } else if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + int i; + + dev_dbg(dev, "%s removing dev %s\n", __func__, + dev_name(&idxd->conf_dev)); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_DISABLED) + continue; + dev_warn(dev, "Active wq %d on disable %s.\n", i, + dev_name(&idxd->conf_dev)); + device_release_driver(&wq->conf_dev); + } + + idxd_unregister_dma_device(idxd); + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); + if (rc < 0) + dev_warn(dev, "Device disable failed\n"); + else + dev_info(dev, "Device %s disabled\n", dev_name(dev)); + + } + + return 0; +} + +static void idxd_config_bus_shutdown(struct device *dev) +{ + dev_dbg(dev, "%s called\n", __func__); +} + +struct bus_type dsa_bus_type = { + .name = "dsa", + .match = idxd_config_bus_match, + .probe = idxd_config_bus_probe, + .remove = idxd_config_bus_remove, + .shutdown = idxd_config_bus_shutdown, +}; + +static struct bus_type *idxd_bus_types[] = { + &dsa_bus_type +}; + +static struct idxd_device_driver dsa_drv = { + .drv = { + .name = "dsa", + .bus = &dsa_bus_type, + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + }, +}; + +static struct idxd_device_driver *idxd_drvs[] = { + &dsa_drv +}; + +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) +{ + return idxd_bus_types[idxd->type]; +} + +static struct device_type *idxd_get_device_type(struct idxd_device *idxd) +{ + if (idxd->type == IDXD_TYPE_DSA) + return &dsa_device_type; + else + return NULL; +} + +/* IDXD generic driver setup */ +int idxd_register_driver(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = driver_register(&idxd_drvs[i]->drv); + if (rc < 0) + goto drv_fail; + } + + return 0; + +drv_fail: + for (; i > 0; i--) + driver_unregister(&idxd_drvs[i]->drv); + return rc; +} + +void idxd_unregister_driver(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + driver_unregister(&idxd_drvs[i]->drv); +} + +/* IDXD engine attributes */ +static ssize_t engine_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + + if (engine->group) + return sprintf(buf, "%d\n", engine->group->id); + else + return sprintf(buf, "%d\n", -1); +} + +static ssize_t engine_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + struct idxd_device *idxd = engine->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (engine->group) { + engine->group->num_engines--; + engine->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = engine->group; + + if (prevg) + prevg->num_engines--; + engine->group = &idxd->groups[id]; + engine->group->num_engines++; + + return count; +} + +static struct device_attribute dev_attr_engine_group = + __ATTR(group_id, 0644, engine_group_id_show, + engine_group_id_store); + +static struct attribute *idxd_engine_attributes[] = { + &dev_attr_engine_group.attr, + NULL, +}; + +static const struct attribute_group idxd_engine_attribute_group = { + .attrs = idxd_engine_attributes, +}; + +static const struct attribute_group *idxd_engine_attribute_groups[] = { + &idxd_engine_attribute_group, + NULL, +}; + +/* Group attributes */ + +static void idxd_set_free_tokens(struct idxd_device *idxd) +{ + int i, tokens; + + for (i = 0, tokens = 0; i < idxd->max_groups; i++) { + struct idxd_group *g = &idxd->groups[i]; + + tokens += g->tokens_reserved; + } + + idxd->nr_tokens = idxd->max_tokens - tokens; +} + +static ssize_t group_tokens_reserved_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_reserved); +} + +static ssize_t group_tokens_reserved_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + if (val > idxd->max_tokens) + return -EINVAL; + + if (val > idxd->nr_tokens) + return -EINVAL; + + group->tokens_reserved = val; + idxd_set_free_tokens(idxd); + return count; +} + +static struct device_attribute dev_attr_group_tokens_reserved = + __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, + group_tokens_reserved_store); + +static ssize_t group_tokens_allowed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_allowed); +} + +static ssize_t group_tokens_allowed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + if (val < 4 * group->num_engines || + val > group->tokens_reserved + idxd->nr_tokens) + return -EINVAL; + + group->tokens_allowed = val; + return count; +} + +static struct device_attribute dev_attr_group_tokens_allowed = + __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, + group_tokens_allowed_store); + +static ssize_t group_use_token_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->use_token_limit); +} + +static ssize_t group_use_token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + group->use_token_limit = !!val; + return count; +} + +static struct device_attribute dev_attr_group_use_token_limit = + __ATTR(use_token_limit, 0644, group_use_token_limit_show, + group_use_token_limit_store); + +static ssize_t group_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + if (!engine->group) + continue; + + if (engine->group->id == group->id) + rc += sprintf(tmp + rc, "engine%d.%d ", + idxd->id, engine->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_engines = + __ATTR(engines, 0444, group_engines_show, NULL); + +static ssize_t group_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (!wq->group) + continue; + + if (wq->group->id == group->id) + rc += sprintf(tmp + rc, "wq%d.%d ", + idxd->id, wq->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_work_queues = + __ATTR(work_queues, 0444, group_work_queues_show, NULL); + +static ssize_t group_traffic_class_a_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_a); +} + +static ssize_t group_traffic_class_a_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_a = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_a = + __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, + group_traffic_class_a_store); + +static ssize_t group_traffic_class_b_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_b); +} + +static ssize_t group_traffic_class_b_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_b = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_b = + __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, + group_traffic_class_b_store); + +static struct attribute *idxd_group_attributes[] = { + &dev_attr_group_work_queues.attr, + &dev_attr_group_engines.attr, + &dev_attr_group_use_token_limit.attr, + &dev_attr_group_tokens_allowed.attr, + &dev_attr_group_tokens_reserved.attr, + &dev_attr_group_traffic_class_a.attr, + &dev_attr_group_traffic_class_b.attr, + NULL, +}; + +static const struct attribute_group idxd_group_attribute_group = { + .attrs = idxd_group_attributes, +}; + +static const struct attribute_group *idxd_group_attribute_groups[] = { + &idxd_group_attribute_group, + NULL, +}; + +/* IDXD work queue attribs */ +static ssize_t wq_clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->client_count); +} + +static struct device_attribute dev_attr_wq_clients = + __ATTR(clients, 0444, wq_clients_show, NULL); + +static ssize_t wq_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->state) { + case IDXD_WQ_DISABLED: + return sprintf(buf, "disabled\n"); + case IDXD_WQ_ENABLED: + return sprintf(buf, "enabled\n"); + } + + return sprintf(buf, "unknown\n"); +} + +static struct device_attribute dev_attr_wq_state = + __ATTR(state, 0444, wq_state_show, NULL); + +static ssize_t wq_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->group) + return sprintf(buf, "%u\n", wq->group->id); + else + return sprintf(buf, "-1\n"); +} + +static ssize_t wq_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (wq->group) { + wq->group->num_wqs--; + wq->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = wq->group; + + if (prevg) + prevg->num_wqs--; + wq->group = group; + group->num_wqs++; + return count; +} + +static struct device_attribute dev_attr_wq_group_id = + __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); + +static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", + wq_dedicated(wq) ? "dedicated" : "shared"); +} + +static ssize_t wq_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (sysfs_streq(buf, "dedicated")) { + set_bit(WQ_FLAG_DEDICATED, &wq->flags); + wq->threshold = 0; + } else { + return -EINVAL; + } + + return count; +} + +static struct device_attribute dev_attr_wq_mode = + __ATTR(mode, 0644, wq_mode_show, wq_mode_store); + +static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->size); +} + +static ssize_t wq_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long size; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &size); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (size > idxd->max_wq_size) + return -EINVAL; + + wq->size = size; + return count; +} + +static struct device_attribute dev_attr_wq_size = + __ATTR(size, 0644, wq_size_show, wq_size_store); + +static ssize_t wq_priority_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->priority); +} + +static ssize_t wq_priority_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long prio; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &prio); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (prio > IDXD_MAX_PRIORITY) + return -EINVAL; + + wq->priority = prio; + return count; +} + +static struct device_attribute dev_attr_wq_priority = + __ATTR(priority, 0644, wq_priority_show, wq_priority_store); + +static ssize_t wq_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->type) { + case IDXD_WQT_KERNEL: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_KERNEL]); + case IDXD_WQT_USER: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_USER]); + case IDXD_WQT_NONE: + default: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_NONE]); + } + + return -EINVAL; +} + +static ssize_t wq_type_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + enum idxd_wq_type old_type; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + old_type = wq->type; + if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) + wq->type = IDXD_WQT_KERNEL; + else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) + wq->type = IDXD_WQT_USER; + else + wq->type = IDXD_WQT_NONE; + + /* If we are changing queue type, clear the name */ + if (wq->type != old_type) + memset(wq->name, 0, WQ_NAME_SIZE + 1); + + return count; +} + +static struct device_attribute dev_attr_wq_type = + __ATTR(type, 0644, wq_type_show, wq_type_store); + +static ssize_t wq_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", wq->name); +} + +static ssize_t wq_name_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) + return -EINVAL; + + memset(wq->name, 0, WQ_NAME_SIZE + 1); + strncpy(wq->name, buf, WQ_NAME_SIZE); + strreplace(wq->name, '\n', '\0'); + return count; +} + +static struct device_attribute dev_attr_wq_name = + __ATTR(name, 0644, wq_name_show, wq_name_store); + +static ssize_t wq_cdev_minor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->idxd_cdev.minor); +} + +static struct device_attribute dev_attr_wq_cdev_minor = + __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); + +static struct attribute *idxd_wq_attributes[] = { + &dev_attr_wq_clients.attr, + &dev_attr_wq_state.attr, + &dev_attr_wq_group_id.attr, + &dev_attr_wq_mode.attr, + &dev_attr_wq_size.attr, + &dev_attr_wq_priority.attr, + &dev_attr_wq_type.attr, + &dev_attr_wq_name.attr, + &dev_attr_wq_cdev_minor.attr, + NULL, +}; + +static const struct attribute_group idxd_wq_attribute_group = { + .attrs = idxd_wq_attributes, +}; + +static const struct attribute_group *idxd_wq_attribute_groups[] = { + &idxd_wq_attribute_group, + NULL, +}; + +/* IDXD device attribs */ +static ssize_t max_work_queues_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wq_size); +} +static DEVICE_ATTR_RO(max_work_queues_size); + +static ssize_t max_groups_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_groups); +} +static DEVICE_ATTR_RO(max_groups); + +static ssize_t max_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wqs); +} +static DEVICE_ATTR_RO(max_work_queues); + +static ssize_t max_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_engines); +} +static DEVICE_ATTR_RO(max_engines); + +static ssize_t numa_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); +} +static DEVICE_ATTR_RO(numa_node); + +static ssize_t max_batch_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_batch_size); +} +static DEVICE_ATTR_RO(max_batch_size); + +static ssize_t max_transfer_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); +} +static DEVICE_ATTR_RO(max_transfer_size); + +static ssize_t op_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); +} +static DEVICE_ATTR_RO(op_cap); + +static ssize_t configurable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", + test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); +} +static DEVICE_ATTR_RO(configurable); + +static ssize_t clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long flags; + int count = 0, i; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + count += wq->client_count; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return sprintf(buf, "%d\n", count); +} +static DEVICE_ATTR_RO(clients); + +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + switch (idxd->state) { + case IDXD_DEV_DISABLED: + case IDXD_DEV_CONF_READY: + return sprintf(buf, "disabled\n"); + case IDXD_DEV_ENABLED: + return sprintf(buf, "enabled\n"); + case IDXD_DEV_HALTED: + return sprintf(buf, "halted\n"); + } + + return sprintf(buf, "unknown\n"); +} +static DEVICE_ATTR_RO(state); + +static ssize_t errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + int i, out = 0; + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < 4; i++) + out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + out--; + out += sprintf(buf + out, "\n"); + return out; +} +static DEVICE_ATTR_RO(errors); + +static ssize_t max_tokens_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_tokens); +} +static DEVICE_ATTR_RO(max_tokens); + +static ssize_t token_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->token_limit); +} + +static ssize_t token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (!idxd->hw.group_cap.token_limit) + return -EPERM; + + if (val > idxd->hw.group_cap.total_tokens) + return -EINVAL; + + idxd->token_limit = val; + return count; +} +static DEVICE_ATTR_RW(token_limit); + +static ssize_t cdev_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->major); +} +static DEVICE_ATTR_RO(cdev_major); + +static struct attribute *idxd_device_attributes[] = { + &dev_attr_max_groups.attr, + &dev_attr_max_work_queues.attr, + &dev_attr_max_work_queues_size.attr, + &dev_attr_max_engines.attr, + &dev_attr_numa_node.attr, + &dev_attr_max_batch_size.attr, + &dev_attr_max_transfer_size.attr, + &dev_attr_op_cap.attr, + &dev_attr_configurable.attr, + &dev_attr_clients.attr, + &dev_attr_state.attr, + &dev_attr_errors.attr, + &dev_attr_max_tokens.attr, + &dev_attr_token_limit.attr, + &dev_attr_cdev_major.attr, + NULL, +}; + +static const struct attribute_group idxd_device_attribute_group = { + .attrs = idxd_device_attributes, +}; + +static const struct attribute_group *idxd_attribute_groups[] = { + &idxd_device_attribute_group, + NULL, +}; + +static int idxd_setup_engine_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + engine->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&engine->conf_dev, "engine%d.%d", + idxd->id, engine->id); + engine->conf_dev.bus = idxd_get_bus_type(idxd); + engine->conf_dev.groups = idxd_engine_attribute_groups; + engine->conf_dev.type = &idxd_engine_device_type; + dev_dbg(dev, "Engine device register: %s\n", + dev_name(&engine->conf_dev)); + rc = device_register(&engine->conf_dev); + if (rc < 0) { + put_device(&engine->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + return rc; +} + +static int idxd_setup_group_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + group->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&group->conf_dev, "group%d.%d", + idxd->id, group->id); + group->conf_dev.bus = idxd_get_bus_type(idxd); + group->conf_dev.groups = idxd_group_attribute_groups; + group->conf_dev.type = &idxd_group_device_type; + dev_dbg(dev, "Group device register: %s\n", + dev_name(&group->conf_dev)); + rc = device_register(&group->conf_dev); + if (rc < 0) { + put_device(&group->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + return rc; +} + +static int idxd_setup_wq_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); + wq->conf_dev.bus = idxd_get_bus_type(idxd); + wq->conf_dev.groups = idxd_wq_attribute_groups; + wq->conf_dev.type = &idxd_wq_device_type; + dev_dbg(dev, "WQ device register: %s\n", + dev_name(&wq->conf_dev)); + rc = device_register(&wq->conf_dev); + if (rc < 0) { + put_device(&wq->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + return rc; +} + +static int idxd_setup_device_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + char devname[IDXD_NAME_SIZE]; + + sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); + idxd->conf_dev.parent = dev; + dev_set_name(&idxd->conf_dev, "%s", devname); + idxd->conf_dev.bus = idxd_get_bus_type(idxd); + idxd->conf_dev.groups = idxd_attribute_groups; + idxd->conf_dev.type = idxd_get_device_type(idxd); + + dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); + rc = device_register(&idxd->conf_dev); + if (rc < 0) { + put_device(&idxd->conf_dev); + return rc; + } + + return 0; +} + +int idxd_setup_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + + rc = idxd_setup_device_sysfs(idxd); + if (rc < 0) { + dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_wq_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_group_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_engine_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); + return rc; + } + + return 0; +} + +void idxd_cleanup_sysfs(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + + device_unregister(&idxd->conf_dev); +} + +int idxd_register_bus_type(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = bus_register(idxd_bus_types[i]); + if (rc < 0) + goto bus_err; + } + + return 0; + +bus_err: + for (; i > 0; i--) + bus_unregister(idxd_bus_types[i]); + return rc; +} + +void idxd_unregister_bus_type(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + bus_unregister(idxd_bus_types[i]); +} diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c27e206a764c..066b21a32232 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac) return; } sdmac->desc = desc = to_sdma_desc(&vd->tx); - /* - * Do not delete the node in desc_issued list in cyclic mode, otherwise - * the desc allocated will never be freed in vchan_dma_desc_free_list - */ - if (!(sdmac->flags & IMX_DMA_SG_LOOP)) - list_del(&vd->node); + + list_del(&vd->node); sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; @@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work) spin_lock_irqsave(&sdmac->vc.lock, flags); vchan_get_all_descriptors(&sdmac->vc, &head); - sdmac->desc = NULL; spin_unlock_irqrestore(&sdmac->vc.lock, flags); vchan_dma_desc_free_list(&sdmac->vc, &head); sdmac->context_loaded = false; } -static int sdma_disable_channel_async(struct dma_chan *chan) +static int sdma_terminate_all(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&sdmac->vc.lock, flags); sdma_disable_channel(chan); - if (sdmac->desc) + if (sdmac->desc) { + vchan_terminate_vdesc(&sdmac->desc->vd); + sdmac->desc = NULL; schedule_work(&sdmac->terminate_worker); + } + + spin_unlock_irqrestore(&sdmac->vc.lock, flags); return 0; } @@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - sdma_disable_channel_async(chan); + sdma_terminate_all(chan); sdma_channel_synchronize(chan); @@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, struct dma_tx_state *txstate) { struct sdma_channel *sdmac = to_sdma_chan(chan); - struct sdma_desc *desc; + struct sdma_desc *desc = NULL; u32 residue; struct virt_dma_desc *vd; enum dma_status ret; @@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, return ret; spin_lock_irqsave(&sdmac->vc.lock, flags); + vd = vchan_find_desc(&sdmac->vc, cookie); - if (vd) { + if (vd) desc = to_sdma_desc(&vd->tx); + else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) + desc = sdmac->desc; + + if (desc) { if (sdmac->flags & IMX_DMA_SG_LOOP) residue = (desc->num_bd - desc->buf_ptail) * desc->period_len - desc->chn_real_count; else residue = desc->chn_count - desc->chn_real_count; - } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { - residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; } else { residue = 0; } + spin_unlock_irqrestore(&sdmac->vc.lock, flags); dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, @@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel_async; + sdma->dma_device.device_terminate_all = sdma_terminate_all; sdma->dma_device.device_synchronize = sdma_channel_synchronize; sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a422a8b43cf..18c011e57592 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) descs->virt = dma_alloc_coherent(to_dev(ioat_chan), SZ_2M, &descs->hw, flags); - if (!descs->virt && (i > 0)) { + if (!descs->virt) { int idx; for (idx = 0; idx < i; idx++) { + descs = &ioat_chan->descs[idx]; dma_free_coherent(to_dev(ioat_chan), SZ_2M, descs->virt, descs->hw); descs->virt = NULL; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index a6a6dc432db8..60e9afbb896c 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ioat_kobject_del(ioat_dma); dma_async_device_unregister(dma); - - dma_pool_destroy(ioat_dma->completion_pool); - - INIT_LIST_HEAD(&dma->channels); } /** @@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) break; @@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c) return; ioat_stop(ioat_chan); - ioat_reset_hw(ioat_chan); - /* Put LTR to idle */ - if (ioat_dma->version >= IOAT_VER_3_4) - writeb(IOAT_CHAN_LTR_SWSEL_IDLE, - ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); + if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) { + ioat_reset_hw(ioat_chan); + + /* Put LTR to idle */ + if (ioat_dma->version >= IOAT_VER_3_4) + writeb(IOAT_CHAN_LTR_SWSEL_IDLE, + ioat_chan->reg_base + + IOAT_CHAN_LTR_SWSEL_OFFSET); + } spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); @@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = { .err_handler = &ioat_err_handler, }; +static void release_ioatdma(struct dma_device *device) +{ + struct ioatdma_device *d = to_ioatdma_device(device); + int i; + + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(d->idx[i]); + + dma_pool_destroy(d->completion_pool); + kfree(d); +} + static struct ioatdma_device * alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) { - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return NULL; d->pdev = pdev; d->reg_base = iobase; + d->dma_dev.device_release = release_ioatdma; return d; } @@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; + ioat_shutdown(pdev); + dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index adecea51814f..c5c1aa0dcaed 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) c = p->vchan; if (c && (tc1 & BIT(i))) { spin_lock_irqsave(&c->vc.lock, flags); - vchan_cookie_complete(&p->ds_run->vd); - p->ds_done = p->ds_run; - p->ds_run = NULL; + if (p->ds_run != NULL) { + vchan_cookie_complete(&p->ds_run->vd); + p->ds_done = p->ds_run; + p->ds_run = NULL; + } spin_unlock_irqrestore(&c->vc.lock, flags); } if (c && (tc2 & BIT(i))) { @@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) return -EAGAIN; + /* Avoid losing track of ds_run if a transaction is in flight */ + if (c->phy->ds_run) + return -EAGAIN; + if (vd) { struct k3_dma_desc_sw *ds = container_of(vd, struct k3_dma_desc_sw, vd); diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index c20e6bd4e298..29f1223b285a 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&c->vc.lock, flags); vchan_get_all_descriptors(&c->vc, &head); - vchan_dma_desc_free_list(&c->vc, &head); spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + return 0; } diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index c2d779daa4b5..b2c2b5e8093c 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -15,6 +15,8 @@ #include <linux/of.h> #include <linux/of_dma.h> +#include "dmaengine.h" + static LIST_HEAD(of_dma_list); static DEFINE_MUTEX(of_dma_lock); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 023f951189a7..c683051257fd 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan) } vchan_get_all_descriptors(&vchan->vc, &head); - vchan_dma_desc_free_list(&vchan->vc, &head); spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6cce9ef61b29..88b884cbb7c1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); - pm_runtime_disable(dev); - - if (!pm_runtime_status_suspended(dev)) { - /* amba did not disable the clock */ - amba_pclk_disable(pcdev); - } + pm_runtime_force_suspend(dev); amba_pclk_unprepare(pcdev); return 0; @@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev) if (ret) return ret; - if (!pm_runtime_status_suspended(dev)) - ret = amba_pclk_enable(pcdev); - - pm_runtime_enable(dev); + pm_runtime_force_resume(dev); return ret; } -static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); +static const struct dev_pm_ops pl330_pm = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) +}; static int pl330_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c new file mode 100644 index 000000000000..db4c5fd453a9 --- /dev/null +++ b/drivers/dma/plx_dma.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microsemi Switchtec(tm) PCIe Management Driver + * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com> + * Copyright (c) 2019, GigaIO Networks, Inc + */ + +#include "dmaengine.h" + +#include <linux/circ_buf.h> +#include <linux/dmaengine.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> + +MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Logan Gunthorpe"); + +#define PLX_REG_DESC_RING_ADDR 0x214 +#define PLX_REG_DESC_RING_ADDR_HI 0x218 +#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C +#define PLX_REG_DESC_RING_COUNT 0x220 +#define PLX_REG_DESC_RING_LAST_ADDR 0x224 +#define PLX_REG_DESC_RING_LAST_SIZE 0x228 +#define PLX_REG_PREF_LIMIT 0x234 +#define PLX_REG_CTRL 0x238 +#define PLX_REG_CTRL2 0x23A +#define PLX_REG_INTR_CTRL 0x23C +#define PLX_REG_INTR_STATUS 0x23E + +#define PLX_REG_PREF_LIMIT_PREF_FOUR 8 + +#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0) +#define PLX_REG_CTRL_ABORT BIT(1) +#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2) +#define PLX_REG_CTRL_START BIT(3) +#define PLX_REG_CTRL_RING_STOP_MODE BIT(4) +#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5) +#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5) +#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5) +#define PLX_REG_CTRL_DESC_INVALID BIT(8) +#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9) +#define PLX_REG_CTRL_ABORT_DONE BIT(10) +#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12) +#define PLX_REG_CTRL_IN_PROGRESS BIT(30) + +#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \ + PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \ + PLX_REG_CTRL_ABORT_DONE | \ + PLX_REG_CTRL_IMM_PAUSE_DONE) + +#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \ + PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \ + PLX_REG_CTRL_START | \ + PLX_REG_CTRL_RESET_VAL) + +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7 + +#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0) +#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1) +#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3) +#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4) +#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5) + +#define PLX_REG_INTR_STATUS_ERROR BIT(0) +#define PLX_REG_INTR_STATUS_INV_DESC BIT(1) +#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2) +#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3) + +struct plx_dma_hw_std_desc { + __le32 flags_and_size; + __le16 dst_addr_hi; + __le16 src_addr_hi; + __le32 dst_addr_lo; + __le32 src_addr_lo; +}; + +#define PLX_DESC_SIZE_MASK 0x7ffffff +#define PLX_DESC_FLAG_VALID BIT(31) +#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30) + +#define PLX_DESC_WB_SUCCESS BIT(30) +#define PLX_DESC_WB_RD_FAIL BIT(29) +#define PLX_DESC_WB_WR_FAIL BIT(28) + +#define PLX_DMA_RING_COUNT 2048 + +struct plx_dma_desc { + struct dma_async_tx_descriptor txd; + struct plx_dma_hw_std_desc *hw; + u32 orig_size; +}; + +struct plx_dma_dev { + struct dma_device dma_dev; + struct dma_chan dma_chan; + struct pci_dev __rcu *pdev; + void __iomem *bar; + struct tasklet_struct desc_task; + + spinlock_t ring_lock; + bool ring_active; + int head; + int tail; + struct plx_dma_hw_std_desc *hw_ring; + dma_addr_t hw_ring_dma; + struct plx_dma_desc **desc_ring; +}; + +static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c) +{ + return container_of(c, struct plx_dma_dev, dma_chan); +} + +static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct plx_dma_desc, txd); +} + +static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i) +{ + return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)]; +} + +static void plx_dma_process_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + u32 flags; + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size)); + + if (flags & PLX_DESC_FLAG_VALID) + break; + + res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK); + + if (flags & PLX_DESC_WB_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (flags & PLX_DESC_WB_WR_FAIL) + res.result = DMA_TRANS_WRITE_FAILED; + else + res.result = DMA_TRANS_READ_FAILED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + + plx_dma_process_desc(plxdev); + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + res.residue = desc->orig_size; + res.result = DMA_TRANS_ABORTED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void __plx_dma_stop(struct plx_dma_dev *plxdev) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + u32 val; + + val = readl(plxdev->bar + PLX_REG_CTRL); + if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE)) + return; + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + while (!time_after(jiffies, timeout)) { + val = readl(plxdev->bar + PLX_REG_CTRL); + if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE) + break; + + cpu_relax(); + } + + if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)) + dev_err(plxdev->dma_dev.dev, + "Timeout waiting for graceful pause!\n"); + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); +} + +static void plx_dma_stop(struct plx_dma_dev *plxdev) +{ + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + __plx_dma_stop(plxdev); + + rcu_read_unlock(); +} + +static void plx_dma_desc_task(unsigned long data) +{ + struct plx_dma_dev *plxdev = (void *)data; + + plx_dma_process_desc(plxdev); +} + +static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c, + dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, + unsigned long flags) + __acquires(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c); + struct plx_dma_desc *plxdesc; + + spin_lock_bh(&plxdev->ring_lock); + if (!plxdev->ring_active) + goto err_unlock; + + if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT)) + goto err_unlock; + + if (len > PLX_DESC_SIZE_MASK) + goto err_unlock; + + plxdesc = plx_dma_get_desc(plxdev, plxdev->head); + plxdev->head++; + + plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst)); + plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst)); + plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src)); + plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src)); + + plxdesc->orig_size = len; + + if (flags & DMA_PREP_INTERRUPT) + len |= PLX_DESC_FLAG_INT_WHEN_DONE; + + plxdesc->hw->flags_and_size = cpu_to_le32(len); + plxdesc->txd.flags = flags; + + /* return with the lock held, it will be released in tx_submit */ + + return &plxdesc->txd; + +err_unlock: + /* + * Keep sparse happy by restoring an even lock count on + * this lock. + */ + __acquire(plxdev->ring_lock); + + spin_unlock_bh(&plxdev->ring_lock); + return NULL; +} + +static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc) + __releases(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan); + struct plx_dma_desc *plxdesc = to_plx_desc(desc); + dma_cookie_t cookie; + + cookie = dma_cookie_assign(desc); + + /* + * Ensure the descriptor updates are visible to the dma device + * before setting the valid bit. + */ + wmb(); + + plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID); + + spin_unlock_bh(&plxdev->ring_lock); + + return cookie; +} + +static enum dma_status plx_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + plx_dma_process_desc(plxdev); + + return dma_cookie_status(chan, cookie, txstate); +} + +static void plx_dma_issue_pending(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + /* + * Ensure the valid bits are visible before starting the + * DMA engine. + */ + wmb(); + + writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL); + + rcu_read_unlock(); +} + +static irqreturn_t plx_dma_isr(int irq, void *devid) +{ + struct plx_dma_dev *plxdev = devid; + u32 status; + + status = readw(plxdev->bar + PLX_REG_INTR_STATUS); + + if (!status) + return IRQ_NONE; + + if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active) + tasklet_schedule(&plxdev->desc_task); + + writew(status, plxdev->bar + PLX_REG_INTR_STATUS); + + return IRQ_HANDLED; +} + +static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev) +{ + struct plx_dma_desc *desc; + int i; + + plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT, + sizeof(*plxdev->desc_ring), GFP_KERNEL); + if (!plxdev->desc_ring) + return -ENOMEM; + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) { + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + goto free_and_exit; + + dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan); + desc->txd.tx_submit = plx_dma_tx_submit; + desc->hw = &plxdev->hw_ring[i]; + + plxdev->desc_ring[i] = desc; + } + + return 0; + +free_and_exit: + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + kfree(plxdev->desc_ring); + return -ENOMEM; +} + +static int plx_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + int rc; + + plxdev->head = plxdev->tail = 0; + plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz, + &plxdev->hw_ring_dma, GFP_KERNEL); + if (!plxdev->hw_ring) + return -ENOMEM; + + rc = plx_dma_alloc_desc(plxdev); + if (rc) + goto out_free_hw_ring; + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + rc = -ENODEV; + goto out_free_hw_ring; + } + + writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(upper_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); + writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT); + + plxdev->ring_active = true; + + rcu_read_unlock(); + + return PLX_DMA_RING_COUNT; + +out_free_hw_ring: + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + return rc; +} + +static void plx_dma_free_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + struct pci_dev *pdev; + int irq = -1; + int i; + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + plx_dma_stop(plxdev); + + rcu_read_lock(); + pdev = rcu_dereference(plxdev->pdev); + if (pdev) + irq = pci_irq_vector(pdev, 0); + rcu_read_unlock(); + + if (irq > 0) + synchronize_irq(irq); + + tasklet_kill(&plxdev->desc_task); + + plx_dma_abort_desc(plxdev); + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + + kfree(plxdev->desc_ring); + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + +} + +static void plx_dma_release(struct dma_device *dma_dev) +{ + struct plx_dma_dev *plxdev = + container_of(dma_dev, struct plx_dma_dev, dma_dev); + + put_device(dma_dev->dev); + kfree(plxdev); +} + +static int plx_dma_create(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev; + struct dma_device *dma; + struct dma_chan *chan; + int rc; + + plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL); + if (!plxdev) + return -ENOMEM; + + rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0, + KBUILD_MODNAME, plxdev); + if (rc) { + kfree(plxdev); + return rc; + } + + spin_lock_init(&plxdev->ring_lock); + tasklet_init(&plxdev->desc_task, plx_dma_desc_task, + (unsigned long)plxdev); + + RCU_INIT_POINTER(plxdev->pdev, pdev); + plxdev->bar = pcim_iomap_table(pdev)[0]; + + dma = &plxdev->dma_dev; + dma->chancnt = 1; + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->copy_align = DMAENGINE_ALIGN_1_BYTE; + dma->dev = get_device(&pdev->dev); + + dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources; + dma->device_free_chan_resources = plx_dma_free_chan_resources; + dma->device_prep_dma_memcpy = plx_dma_prep_memcpy; + dma->device_issue_pending = plx_dma_issue_pending; + dma->device_tx_status = plx_dma_tx_status; + dma->device_release = plx_dma_release; + + chan = &plxdev->dma_chan; + chan->device = dma; + dma_cookie_init(chan); + list_add_tail(&chan->device_node, &dma->channels); + + rc = dma_async_device_register(dma); + if (rc) { + pci_err(pdev, "Failed to register dma device: %d\n", rc); + free_irq(pci_irq_vector(pdev, 0), plxdev); + kfree(plxdev); + return rc; + } + + pci_set_drvdata(pdev, plxdev); + + return 0; +} + +static int plx_dma_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int rc; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME); + if (rc) + return rc; + + rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (rc <= 0) + return rc; + + pci_set_master(pdev); + + rc = plx_dma_create(pdev); + if (rc) + goto err_free_irq_vectors; + + pci_info(pdev, "PLX DMA Channel Registered\n"); + + return 0; + +err_free_irq_vectors: + pci_free_irq_vectors(pdev); + return rc; +} + +static void plx_dma_remove(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev = pci_get_drvdata(pdev); + + free_irq(pci_irq_vector(pdev, 0), plxdev); + + rcu_assign_pointer(plxdev->pdev, NULL); + synchronize_rcu(); + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + __plx_dma_stop(plxdev); + plx_dma_abort_desc(plxdev); + + plxdev->bar = NULL; + dma_async_device_unregister(&plxdev->dma_dev); + + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id plx_dma_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_PLX, + .device = 0x87D0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = PCI_CLASS_SYSTEM_OTHER << 8, + .class_mask = 0xFFFFFFFF, + }, + {0} +}; +MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl); + +static struct pci_driver plx_dma_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = plx_dma_pci_tbl, + .probe = plx_dma_probe, + .remove = plx_dma_remove, +}; +module_pci_driver(plx_dma_pci_driver); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 43da8eeb18ef..8e14c72d03f0 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) s3c24xx_dma_start_next_sg(s3cchan, txd); } -static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, - struct s3c24xx_dma_chan *s3cchan) -{ - LIST_HEAD(head); - - vchan_get_all_descriptors(&s3cchan->vc, &head); - vchan_dma_desc_free_list(&s3cchan->vc, &head); -} - /* * Try to allocate a physical channel. When successful, assign it to * this virtual channel, and initiate the next descriptor. The @@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; + LIST_HEAD(head); unsigned long flags; - int ret = 0; + int ret; spin_lock_irqsave(&s3cchan->vc.lock, flags); @@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) } /* Dequeue jobs not yet fired as well */ - s3c24xx_dma_free_txd_list(s3cdma, s3cchan); + + vchan_get_all_descriptors(&s3cchan->vc, &head); + + spin_unlock_irqrestore(&s3cchan->vc.lock, flags); + + vchan_dma_desc_free_list(&s3cchan->vc, &head); + + return 0; + unlock: spin_unlock_irqrestore(&s3cchan->vc.lock, flags); @@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) /* Basic sanity check */ if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { - dev_err(&pdev->dev, "to many dma channels %d, max %d\n", + dev_err(&pdev->dev, "too many dma channels %d, max %d\n", pdata->num_phy_channels, MAX_DMA_CHANNELS); return -EINVAL; } diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 465256fe8b1f..6d0bec947636 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan) kfree(chan->desc); chan->desc = NULL; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); sf_pdma_disclaim_chan(chan); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); } static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, @@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan) chan->desc = NULL; chan->xfer_err = false; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); return 0; } diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index e397a50058c8..bbc2bda3b902 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dma_addr_t src, dest; u32 endpoints; int nr_periods, offset, plength, i; + u8 ram_type, io_mode, linear_mode; if (!is_slave_direction(dir)) { dev_err(chan2dev(chan), "Invalid DMA direction\n"); return NULL; } - if (vchan->is_dedicated) { - /* - * As we are using this just for audio data, we need to use - * normal DMA. There is nothing stopping us from supporting - * dedicated DMA here as well, so if a client comes up and - * requires it, it will be simple to implement it. - */ - dev_err(chan2dev(chan), - "Cyclic transfers are only supported on Normal DMA\n"); - return NULL; - } - contract = generate_dma_contract(); if (!contract) return NULL; contract->is_cyclic = 1; - /* Figure out the endpoints and the address we need */ + if (vchan->is_dedicated) { + io_mode = SUN4I_DDMA_ADDR_MODE_IO; + linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; + } else { + io_mode = SUN4I_NDMA_ADDR_MODE_IO; + linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; + } + if (dir == DMA_MEM_TO_DEV) { src = buf; dest = sconfig->dst_addr; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type); } else { src = sconfig->src_addr; dest = buf; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); } /* @@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dest = buf + offset; /* Make the promise */ - promise = generate_ndma_promise(chan, src, dest, - plength, sconfig, dir); + if (vchan->is_dedicated) + promise = generate_ddma_promise(chan, src, dest, + plength, sconfig); + else + promise = generate_ndma_promise(chan, src, dest, + plength, sconfig, dir); + if (!promise) { /* TODO: should we free everything? */ return NULL; @@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan) } spin_lock_irqsave(&vchan->vc.lock, flags); - vchan_dma_desc_free_list(&vchan->vc, &head); /* Clear these so the vchan is usable again */ vchan->processing = NULL; vchan->pchan = NULL; spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index d507c24fbf31..f76e06651f80 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -34,5 +34,29 @@ config DMA_OMAP Enable support for the TI sDMA (System DMA or DMA4) controller. This DMA engine is found on OMAP and DRA7xx parts. +config TI_K3_UDMA + bool "Texas Instruments UDMA support" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_PROTOCOL + depends on TI_SCI_INTA_IRQCHIP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select TI_K3_RINGACC + select TI_K3_PSIL + help + Enable support for the TI UDMA (Unified DMA) controller. This + DMA engine is used in AM65x and j721e. + +config TI_K3_UDMA_GLUE_LAYER + bool "Texas Instruments UDMA Glue layer for non DMAengine users" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_K3_UDMA + help + Say y here to support the K3 NAVSS DMA glue interface + If unsure, say N. + +config TI_K3_PSIL + bool + config TI_DMA_CROSSBAR bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index 113e59ec9c32..9a29a107e374 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -2,4 +2,7 @@ obj-$(CONFIG_TI_CPPI41) += cppi41.o obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o +obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o +obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 756a3c951dc7..03a7f647f7b2 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev) if (!info) return -ENODEV; - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync() failed\n"); - return ret; - } - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ecc); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + pm_runtime_disable(dev); + return ret; + } + /* Get eDMA3 configuration from IP */ ret = edma_setup_from_hw(dev, info, ecc); if (ret) - return ret; + goto err_disable_pm; /* Allocate memory based on the information we got from the IP */ ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, sizeof(*ecc->slave_chans), GFP_KERNEL); - if (!ecc->slave_chans) - return -ENOMEM; ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->slot_inuse) - return -ENOMEM; ecc->channels_mask = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_channels), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->channels_mask) - return -ENOMEM; + if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { + ret = -ENOMEM; + goto err_disable_pm; + } /* Mark all channels available initially */ bitmap_fill(ecc->channels_mask, ecc->num_channels); @@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccint = irq; } @@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccerrint = irq; } @@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev) ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(dev, "Can't allocate PaRAM dummy slot\n"); - return ecc->dummy_slot; + ret = ecc->dummy_slot; + goto err_disable_pm; } queue_priority_mapping = info->queue_priority_mapping; @@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev) err_reg1: edma_free_slot(ecc, ecc->dummy_slot); +err_disable_pm: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev) if (ecc->dma_memcpy) dma_async_device_unregister(ecc->dma_memcpy); edma_free_slot(ecc, ecc->dummy_slot); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return 0; } diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c new file mode 100644 index 000000000000..a896a15908cf --- /dev/null +++ b/drivers/dma/ti/k3-psil-am654.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am654_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0x4300), + PSIL_ETHERNET(0x4301), + PSIL_ETHERNET(0x4302), + PSIL_ETHERNET(0x4303), + /* PDMA0 - McASPs */ + PSIL_PDMA_XY_TR(0x4400), + PSIL_PDMA_XY_TR(0x4401), + PSIL_PDMA_XY_TR(0x4402), + /* PDMA1 - SPI0-4 */ + PSIL_PDMA_XY_PKT(0x4500), + PSIL_PDMA_XY_PKT(0x4501), + PSIL_PDMA_XY_PKT(0x4502), + PSIL_PDMA_XY_PKT(0x4503), + PSIL_PDMA_XY_PKT(0x4504), + PSIL_PDMA_XY_PKT(0x4505), + PSIL_PDMA_XY_PKT(0x4506), + PSIL_PDMA_XY_PKT(0x4507), + PSIL_PDMA_XY_PKT(0x4508), + PSIL_PDMA_XY_PKT(0x4509), + PSIL_PDMA_XY_PKT(0x450a), + PSIL_PDMA_XY_PKT(0x450b), + PSIL_PDMA_XY_PKT(0x450c), + PSIL_PDMA_XY_PKT(0x450d), + PSIL_PDMA_XY_PKT(0x450e), + PSIL_PDMA_XY_PKT(0x450f), + PSIL_PDMA_XY_PKT(0x4510), + PSIL_PDMA_XY_PKT(0x4511), + PSIL_PDMA_XY_PKT(0x4512), + PSIL_PDMA_XY_PKT(0x4513), + /* PDMA1 - USART0-2 */ + PSIL_PDMA_XY_PKT(0x4514), + PSIL_PDMA_XY_PKT(0x4515), + PSIL_PDMA_XY_PKT(0x4516), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 - ADCs */ + PSIL_PDMA_XY_TR(0x7100), + PSIL_PDMA_XY_TR(0x7101), + PSIL_PDMA_XY_TR(0x7102), + PSIL_PDMA_XY_TR(0x7103), + /* MCU_PDMA1 - MCU_SPI0-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + PSIL_PDMA_XY_PKT(0x7208), + PSIL_PDMA_XY_PKT(0x7209), + PSIL_PDMA_XY_PKT(0x720a), + PSIL_PDMA_XY_PKT(0x720b), + /* MCU_PDMA1 - MCU_USART0 */ + PSIL_PDMA_XY_PKT(0x7212), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am654_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0xc300), + PSIL_ETHERNET(0xc301), + PSIL_ETHERNET(0xc302), + PSIL_ETHERNET(0xc303), + PSIL_ETHERNET(0xc304), + PSIL_ETHERNET(0xc305), + PSIL_ETHERNET(0xc306), + PSIL_ETHERNET(0xc307), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), +}; + +struct psil_ep_map am654_ep_map = { + .name = "am654", + .src = am654_src_ep_map, + .src_count = ARRAY_SIZE(am654_src_ep_map), + .dst = am654_dst_ep_map, + .dst_count = ARRAY_SIZE(am654_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c new file mode 100644 index 000000000000..e3cfd5f66842 --- /dev/null +++ b/drivers/dma/ti/k3-psil-j721e.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j721e_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */ + PSIL_PDMA_MCASP(0x4400), + PSIL_PDMA_MCASP(0x4401), + PSIL_PDMA_MCASP(0x4402), + /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */ + PSIL_PDMA_MCASP(0x4500), + PSIL_PDMA_MCASP(0x4501), + PSIL_PDMA_MCASP(0x4502), + PSIL_PDMA_MCASP(0x4503), + PSIL_PDMA_MCASP(0x4504), + PSIL_PDMA_MCASP(0x4505), + PSIL_PDMA_MCASP(0x4506), + PSIL_PDMA_MCASP(0x4507), + PSIL_PDMA_MCASP(0x4508), + /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */ + PSIL_PDMA_XY_PKT(0x4600), + PSIL_PDMA_XY_PKT(0x4601), + PSIL_PDMA_XY_PKT(0x4602), + PSIL_PDMA_XY_PKT(0x4603), + PSIL_PDMA_XY_PKT(0x4604), + PSIL_PDMA_XY_PKT(0x4605), + PSIL_PDMA_XY_PKT(0x4606), + PSIL_PDMA_XY_PKT(0x4607), + /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */ + PSIL_PDMA_XY_PKT(0x460c), + PSIL_PDMA_XY_PKT(0x460d), + PSIL_PDMA_XY_PKT(0x460e), + PSIL_PDMA_XY_PKT(0x460f), + PSIL_PDMA_XY_PKT(0x4610), + PSIL_PDMA_XY_PKT(0x4611), + PSIL_PDMA_XY_PKT(0x4612), + PSIL_PDMA_XY_PKT(0x4613), + /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */ + PSIL_PDMA_XY_PKT(0x4618), + PSIL_PDMA_XY_PKT(0x4619), + PSIL_PDMA_XY_PKT(0x461a), + PSIL_PDMA_XY_PKT(0x461b), + PSIL_PDMA_XY_PKT(0x461c), + PSIL_PDMA_XY_PKT(0x461d), + PSIL_PDMA_XY_PKT(0x461e), + PSIL_PDMA_XY_PKT(0x461f), + /* PDMA11 (PDMA_MISC_G3) */ + PSIL_PDMA_XY_PKT(0x4624), + PSIL_PDMA_XY_PKT(0x4625), + PSIL_PDMA_XY_PKT(0x4626), + PSIL_PDMA_XY_PKT(0x4627), + PSIL_PDMA_XY_PKT(0x4628), + PSIL_PDMA_XY_PKT(0x4629), + PSIL_PDMA_XY_PKT(0x4630), + PSIL_PDMA_XY_PKT(0x463a), + /* PDMA13 (PDMA_USART_G0) - UART0-1 */ + PSIL_PDMA_XY_PKT(0x4700), + PSIL_PDMA_XY_PKT(0x4701), + /* PDMA14 (PDMA_USART_G1) - UART2-3 */ + PSIL_PDMA_XY_PKT(0x4702), + PSIL_PDMA_XY_PKT(0x4703), + /* PDMA15 (PDMA_USART_G2) - UART4-9 */ + PSIL_PDMA_XY_PKT(0x4704), + PSIL_PDMA_XY_PKT(0x4705), + PSIL_PDMA_XY_PKT(0x4706), + PSIL_PDMA_XY_PKT(0x4707), + PSIL_PDMA_XY_PKT(0x4708), + PSIL_PDMA_XY_PKT(0x4709), + /* CPSW9 */ + PSIL_ETHERNET(0x4a00), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ + PSIL_PDMA_XY_PKT(0x7100), + PSIL_PDMA_XY_PKT(0x7101), + PSIL_PDMA_XY_PKT(0x7102), + PSIL_PDMA_XY_PKT(0x7103), + /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ + PSIL_PDMA_XY_PKT(0x7300), + /* MCU_PDMA_ADC - ADC0-1 */ + PSIL_PDMA_XY_TR(0x7400), + PSIL_PDMA_XY_TR(0x7401), + PSIL_PDMA_XY_TR(0x7402), + PSIL_PDMA_XY_TR(0x7403), + /* SA2UL */ + PSIL_SA2UL(0x7500, 0), + PSIL_SA2UL(0x7501, 0), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j721e_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* CPSW9 */ + PSIL_ETHERNET(0xca00), + PSIL_ETHERNET(0xca01), + PSIL_ETHERNET(0xca02), + PSIL_ETHERNET(0xca03), + PSIL_ETHERNET(0xca04), + PSIL_ETHERNET(0xca05), + PSIL_ETHERNET(0xca06), + PSIL_ETHERNET(0xca07), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* SA2UL */ + PSIL_SA2UL(0xf500, 1), +}; + +struct psil_ep_map j721e_ep_map = { + .name = "j721e", + .src = j721e_src_ep_map, + .src_count = ARRAY_SIZE(j721e_src_ep_map), + .dst = j721e_dst_ep_map, + .dst_count = ARRAY_SIZE(j721e_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h new file mode 100644 index 000000000000..a1f389ca371e --- /dev/null +++ b/drivers/dma/ti/k3-psil-priv.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_PSIL_PRIV_H_ +#define K3_PSIL_PRIV_H_ + +#include <linux/dma/k3-psil.h> + +struct psil_ep { + u32 thread_id; + struct psil_endpoint_config ep_config; +}; + +/** + * struct psil_ep_map - PSI-L thread ID configuration maps + * @name: Name of the map, set it to the name of the SoC + * @src: Array of source PSI-L thread configurations + * @src_count: Number of entries in the src array + * @dst: Array of destination PSI-L thread configurations + * @dst_count: Number of entries in the dst array + * + * In case of symmetric configuration for a matching src/dst thread (for example + * 0x4400 and 0xc400) only the src configuration can be present. If no dst + * configuration found the code will look for (dst_thread_id & ~0x8000) to find + * the symmetric match. + */ +struct psil_ep_map { + char *name; + struct psil_ep *src; + int src_count; + struct psil_ep *dst; + int dst_count; +}; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id); + +/* SoC PSI-L endpoint maps */ +extern struct psil_ep_map am654_ep_map; +extern struct psil_ep_map j721e_ep_map; + +#endif /* K3_PSIL_PRIV_H_ */ diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c new file mode 100644 index 000000000000..d7b965049ccb --- /dev/null +++ b/drivers/dma/ti/k3-psil.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mutex.h> +#include <linux/of.h> + +#include "k3-psil-priv.h" + +static DEFINE_MUTEX(ep_map_mutex); +static struct psil_ep_map *soc_ep_map; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) +{ + int i; + + mutex_lock(&ep_map_mutex); + if (!soc_ep_map) { + if (of_machine_is_compatible("ti,am654")) { + soc_ep_map = &am654_ep_map; + } else if (of_machine_is_compatible("ti,j721e")) { + soc_ep_map = &j721e_ep_map; + } else { + pr_err("PSIL: No compatible machine found for map\n"); + return ERR_PTR(-ENOTSUPP); + } + pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); + } + mutex_unlock(&ep_map_mutex); + + if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) { + /* check in destination thread map */ + for (i = 0; i < soc_ep_map->dst_count; i++) { + if (soc_ep_map->dst[i].thread_id == thread_id) + return &soc_ep_map->dst[i].ep_config; + } + } + + thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET; + if (soc_ep_map->src) { + for (i = 0; i < soc_ep_map->src_count; i++) { + if (soc_ep_map->src[i].thread_id == thread_id) + return &soc_ep_map->src[i].ep_config; + } + } + + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL_GPL(psil_get_ep_config); + +int psil_set_new_ep_config(struct device *dev, const char *name, + struct psil_endpoint_config *ep_config) +{ + struct psil_endpoint_config *dst_ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int index; + + if (!dev || !dev->of_node) + return -EINVAL; + + index = of_property_match_string(dev->of_node, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells", + index, &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + dst_ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(dst_ep_config)) { + pr_err("PSIL: thread ID 0x%04x not defined in map\n", + thread_id); + of_node_put(dma_spec.np); + return PTR_ERR(dst_ep_config); + } + + memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config)); + + of_node_put(dma_spec.np); + return 0; +} +EXPORT_SYMBOL_GPL(psil_set_new_ep_config); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c new file mode 100644 index 000000000000..c1511298ece2 --- /dev/null +++ b/drivers/dma/ti/k3-udma-glue.c @@ -0,0 +1,1198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * K3 NAVSS DMA glue interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/dma/ti-cppi5.h> +#include <linux/dma/k3-udma-glue.h> + +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct k3_udma_glue_common { + struct device *dev; + struct udma_dev *udmax; + const struct udma_tisci_rm *tisci_rm; + struct k3_ringacc *ringacc; + u32 src_thread; + u32 dst_thread; + + u32 hdesc_size; + bool epib; + u32 psdata_size; + u32 swdata_size; +}; + +struct k3_udma_glue_tx_channel { + struct k3_udma_glue_common common; + + struct udma_tchan *udma_tchanx; + int udma_tchan_id; + + struct k3_ring *ringtx; + struct k3_ring *ringtxcq; + + bool psil_paired; + + int virq; + + atomic_t free_pkts; + bool tx_pause_on_err; + bool tx_filt_einfo; + bool tx_filt_pswords; + bool tx_supr_tdpkt; +}; + +struct k3_udma_glue_rx_flow { + struct udma_rflow *udma_rflow; + int udma_rflow_id; + struct k3_ring *ringrx; + struct k3_ring *ringrxfdq; + + int virq; +}; + +struct k3_udma_glue_rx_channel { + struct k3_udma_glue_common common; + + struct udma_rchan *udma_rchanx; + int udma_rchan_id; + bool remote; + + bool psil_paired; + + u32 swdata_size; + int flow_id_base; + + struct k3_udma_glue_rx_flow *flows; + u32 flow_num; + u32 flows_ready; +}; + +#define K3_UDMAX_TDOWN_TIMEOUT_US 1000 + +static int of_k3_udma_glue_parse(struct device_node *udmax_np, + struct k3_udma_glue_common *common) +{ + common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np, + "ti,ringacc"); + if (IS_ERR(common->ringacc)) + return PTR_ERR(common->ringacc); + + common->udmax = of_xudma_dev_get(udmax_np, NULL); + if (IS_ERR(common->udmax)) + return PTR_ERR(common->udmax); + + common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); + + return 0; +} + +static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, + const char *name, struct k3_udma_glue_common *common, + bool tx_chn) +{ + struct psil_endpoint_config *ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int ret = 0; + int index; + + if (unlikely(!name)) + return -EINVAL; + + index = of_property_match_string(chn_np, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, + &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + /* get psil endpoint config */ + ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(ep_config)) { + dev_err(common->dev, + "No configuration for psi-l thread 0x%04x\n", + thread_id); + ret = PTR_ERR(ep_config); + goto out_put_spec; + } + + common->epib = ep_config->needs_epib; + common->psdata_size = ep_config->psd_size; + + if (tx_chn) + common->dst_thread = thread_id; + else + common->src_thread = thread_id; + + ret = of_k3_udma_glue_parse(dma_spec.np, common); + +out_put_spec: + of_node_put(dma_spec.np); + return ret; +}; + +static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + struct device *dev = tx_chn->common.dev; + + dev_dbg(dev, "dump_tx_chn:\n" + "udma_tchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n", + tx_chn->udma_tchan_id, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); +} + +static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG, + xudma_tchanrt_read(chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG)); +} + +static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.index = tx_chn->udma_tchan_id; + if (tx_chn->tx_pause_on_err) + req.tx_pause_on_err = 1; + if (tx_chn->tx_filt_einfo) + req.tx_filt_einfo = 1; + if (tx_chn->tx_filt_pswords) + req.tx_filt_pswords = 1; + req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + if (tx_chn->tx_supr_tdpkt) + req.tx_supr_tdpkt = 1; + req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; + req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + + return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); +} + +struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, + const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) +{ + struct k3_udma_glue_tx_channel *tx_chn; + int ret; + + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); + if (!tx_chn) + return ERR_PTR(-ENOMEM); + + tx_chn->common.dev = dev; + tx_chn->common.swdata_size = cfg->swdata_size; + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &tx_chn->common, true); + if (ret) + goto err; + + tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, + tx_chn->common.psdata_size, + tx_chn->common.swdata_size); + + /* request and cfg UDMAP TX channel */ + tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1); + if (IS_ERR(tx_chn->udma_tchanx)) { + ret = PTR_ERR(tx_chn->udma_tchanx); + dev_err(dev, "UDMAX tchanx get err %d\n", ret); + goto err; + } + tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); + + atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); + + /* request and cfg rings */ + tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc, + tx_chn->udma_tchan_id, 0); + if (!tx_chn->ringtx) { + ret = -ENODEV; + dev_err(dev, "Failed to get TX ring %u\n", + tx_chn->udma_tchan_id); + goto err; + } + + tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc, + -1, 0); + if (!tx_chn->ringtxcq) { + ret = -ENODEV; + dev_err(dev, "Failed to get TXCQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + /* request and cfg psi-l */ + tx_chn->common.src_thread = + xudma_dev_get_psil_base(tx_chn->common.udmax) + + tx_chn->udma_tchan_id; + + ret = k3_udma_glue_cfg_tx_chn(tx_chn); + if (ret) { + dev_err(dev, "Failed to cfg tchan %d\n", ret); + goto err; + } + + ret = xudma_navss_psil_pair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + tx_chn->psil_paired = true; + + /* reset TX RT registers */ + k3_udma_glue_disable_tx_chn(tx_chn); + + k3_udma_glue_dump_tx_chn(tx_chn); + + return tx_chn; + +err: + k3_udma_glue_release_tx_chn(tx_chn); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); + +void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + if (tx_chn->psil_paired) { + xudma_navss_psil_unpair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + tx_chn->psil_paired = false; + } + + if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) + xudma_tchan_put(tx_chn->common.udmax, + tx_chn->udma_tchanx); + + if (tx_chn->ringtxcq) + k3_ringacc_ring_free(tx_chn->ringtxcq); + + if (tx_chn->ringtx) + k3_ringacc_ring_free(tx_chn->ringtx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); + +int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma) +{ + u32 ringtxcq_id; + + if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) + return -ENOMEM; + + ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); + + return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn); + +int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *desc_dma) +{ + int ret; + + ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); + if (!ret) + atomic_inc(&tx_chn->free_pkts); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); + +int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + u32 txrt_ctl; + + txrt_ctl = UDMA_PEER_RT_EN_ENABLE; + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + txrt_ctl); + + txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + txrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + txrt_ctl); + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn); + +void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0); + + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn); + +void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + bool sync) +{ + int i = 0; + u32 val; + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(tx_chn->common.dev, "TX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn); + +void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma)) +{ + dma_addr_t desc_dma; + int occ_tx, i, ret; + + /* reset TXCQ as it is not input for udma - expected to be empty */ + if (tx_chn->ringtxcq) + k3_ringacc_ring_reset(tx_chn->ringtxcq); + + /* + * TXQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save TXQ occ + * 2) clean up TXQ and call callback .cleanup() for each desc + * 3) reset TXQ in a special way + */ + occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); + dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx); + + for (i = 0; i < occ_tx; i++) { + ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); + if (ret) { + dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); + +u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn) +{ + return tx_chn->common.hdesc_size; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size); + +u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn) +{ + return k3_ringacc_get_ring_id(tx_chn->ringtxcq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); + +int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) +{ + tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); + + return tx_chn->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); + +static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req; + int ret; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + + req.nav_id = tisci_rm->tisci_dev_id; + req.index = rx_chn->udma_rchan_id; + req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; + /* + * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw + * and udmax impl, so just configure it to invalid value. + * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); + */ + req.rxcq_qnum = 0xFFFF; + if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { + /* Default flow + extra ones */ + req.flowid_start = rx_chn->flow_id_base; + req.flowid_cnt = rx_chn->flow_num; + } + req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + + ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); + if (ret) + dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", + rx_chn->udma_rchan_id, ret); + + return ret; +} + +static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + if (IS_ERR_OR_NULL(flow->udma_rflow)) + return; + + if (flow->ringrxfdq) + k3_ringacc_ring_free(flow->ringrxfdq); + + if (flow->ringrx) + k3_ringacc_ring_free(flow->ringrx); + + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + flow->udma_rflow = NULL; + rx_chn->flows_ready--; +} + +static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, + flow->udma_rflow_id); + if (IS_ERR(flow->udma_rflow)) { + ret = PTR_ERR(flow->udma_rflow); + dev_err(dev, "UDMAX rflow get err %d\n", ret); + goto err; + } + + if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + return -ENODEV; + } + + /* request and cfg rings */ + flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxq_id, 0); + if (!flow->ringrx) { + ret = -ENODEV; + dev_err(dev, "Failed to get RX ring\n"); + goto err; + } + + flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxfdq0_id, 0); + if (!flow->ringrxfdq) { + ret = -ENODEV; + dev_err(dev, "Failed to get RXFDQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); + goto err; + } + + if (rx_chn->remote) { + rx_ring_id = TI_SCI_RESOURCE_NULL; + rx_ringfdq_id = TI_SCI_RESOURCE_NULL; + } else { + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + } + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + if (rx_chn->common.epib) + req.rx_einfo_present = 1; + if (rx_chn->common.psdata_size) + req.rx_psinfo_present = 1; + if (flow_cfg->rx_error_handling) + req.rx_error_handling = 1; + req.rx_desc_type = 0; + req.rx_dest_qnum = rx_ring_id; + req.rx_src_tag_hi_sel = 0; + req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; + req.rx_dest_tag_hi_sel = 0; + req.rx_dest_tag_lo_sel = 0; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, + ret); + goto err; + } + + rx_chn->flows_ready++; + dev_dbg(dev, "flow%d config done. ready:%d\n", + flow->udma_rflow_id, rx_chn->flows_ready); + + return 0; +err: + k3_udma_glue_release_rx_flow(rx_chn, flow_idx); + return ret; +} + +static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "dump_rx_chn:\n" + "udma_rchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n" + "epib: %d\n" + "hdesc_size: %u\n" + "psdata_size: %u\n" + "swdata_size: %u\n" + "flow_id_base: %d\n" + "flow_num: %d\n", + chn->udma_rchan_id, + chn->common.src_thread, + chn->common.dst_thread, + chn->common.epib, + chn->common.hdesc_size, + chn->common.psdata_size, + chn->common.swdata_size, + chn->flow_id_base, + chn->flow_num); +} + +static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG, + xudma_rchanrt_read(chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG)); +} + +static int +k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + int ret; + + /* default rflow */ + if (cfg->flow_id_use_rxchan_id) + return 0; + + /* not a GP rflows */ + if (rx_chn->flow_id_base != -1 && + !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + return 0; + + /* Allocate range of GP rflows */ + ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + if (ret < 0) { + dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", + rx_chn->flow_id_base, rx_chn->flow_num, ret); + return ret; + } + rx_chn->flow_id_base = ret; + + return 0; +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0) + return ERR_PTR(-EINVAL); + + if (cfg->flow_id_num != 1 && + (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) + return ERR_PTR(-EINVAL); + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + /* request and cfg UDMAP RX channel */ + rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); + if (IS_ERR(rx_chn->udma_rchanx)) { + ret = PTR_ERR(rx_chn->udma_rchanx); + dev_err(dev, "UDMAX rchanx get err %d\n", ret); + goto err; + } + rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); + + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + + /* Use RX channel id as flow id: target dev can't generate flow_id */ + if (cfg->flow_id_use_rxchan_id) + rx_chn->flow_id_base = rx_chn->udma_rchan_id; + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + /* request and cfg psi-l */ + rx_chn->common.dst_thread = + xudma_dev_get_psil_base(rx_chn->common.udmax) + + rx_chn->udma_rchan_id; + + ret = k3_udma_glue_cfg_rx_chn(rx_chn); + if (ret) { + dev_err(dev, "Failed to cfg rchan %d\n", ret); + goto err; + } + + /* init default RX flow only if flow_num = 1 */ + if (cfg->def_flow_cfg) { + ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); + if (ret) + goto err; + } + + ret = xudma_navss_psil_pair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + rx_chn->psil_paired = true; + + /* reset RX RT registers */ + k3_udma_glue_disable_rx_chn(rx_chn); + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0 || + cfg->flow_id_use_rxchan_id || + cfg->def_flow_cfg || + cfg->flow_id_base < 0) + return ERR_PTR(-EINVAL); + + /* + * Remote RX channel is under control of Remote CPU core, so + * Linux can only request and manipulate by dedicated RX flows + */ + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = true; + rx_chn->udma_rchan_id = -1; + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + rx_chn->psil_paired = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + if (cfg->remote) + return k3_udma_glue_request_remote_rx_chn(dev, name, cfg); + else + return k3_udma_glue_request_rx_chn_priv(dev, name, cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn); + +void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + int i; + + if (IS_ERR_OR_NULL(rx_chn->common.udmax)) + return; + + if (rx_chn->psil_paired) { + xudma_navss_psil_unpair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + rx_chn->psil_paired = false; + } + + for (i = 0; i < rx_chn->flow_num; i++) + k3_udma_glue_release_rx_flow(rx_chn, i); + + if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + xudma_free_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + + if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) + xudma_rchan_put(rx_chn->common.udmax, + rx_chn->udma_rchanx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); + +int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init); + +u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow; + + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + flow = &rx_chn->flows[flow_idx]; + + return k3_ringacc_get_ring_id(flow->ringrxfdq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id); + +u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) +{ + return rx_chn->flow_id_base; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base); + +int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = rx_ring_id; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable); + +int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + memset(&req, 0, sizeof(req)); + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); + +int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + u32 rxrt_ctl; + + if (rx_chn->remote) + return -EINVAL; + + if (rx_chn->flows_ready < rx_chn->flow_num) + return -EINVAL; + + rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + rxrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, + rxrt_ctl); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn); + +void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + 0); + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn); + +void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + bool sync) +{ + int i = 0; + u32 val; + + if (rx_chn->remote) + return; + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(rx_chn->common.dev, "RX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); + +void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + struct device *dev = rx_chn->common.dev; + dma_addr_t desc_dma; + int occ_rx, i, ret; + + /* reset RXCQ as it is not input for udma - expected to be empty */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); + dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); + if (flow->ringrx) + k3_ringacc_ring_reset(flow->ringrx); + + /* Skip RX FDQ in case one FDQ is used for the set of flows */ + if (skip_fdq) + return; + + /* + * RX FDQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save RX FDQ occ + * 2) clean up RX FDQ and call callback .cleanup() for each desc + * 3) reset RX FDQ in a special way + */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); + dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx); + + for (i = 0; i < occ_rx; i++) { + ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); + if (ret) { + dev_err(dev, "RX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); + +int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, struct cppi5_host_desc_t *desc_rx, + dma_addr_t desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn); + +int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, dma_addr_t *desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_pop(flow->ringrx, desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn); + +int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow; + + flow = &rx_chn->flows[flow_num]; + + flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); + + return flow->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c new file mode 100644 index 000000000000..0b8f3dd6b146 --- /dev/null +++ b/drivers/dma/ti/k3-udma-private.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_pair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_pair); + +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_unpair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_unpair); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) +{ + struct device_node *udma_node = np; + struct platform_device *pdev; + struct udma_dev *ud; + + if (property) { + udma_node = of_parse_phandle(np, property, 0); + if (!udma_node) { + pr_err("UDMA node is not found\n"); + return ERR_PTR(-ENODEV); + } + } + + pdev = of_find_device_by_node(udma_node); + if (!pdev) { + pr_debug("UDMA device not found\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + if (np != udma_node) + of_node_put(udma_node); + + ud = platform_get_drvdata(pdev); + if (!ud) { + pr_debug("UDMA has not been probed\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + return ud; +} +EXPORT_SYMBOL(of_xudma_dev_get); + +u32 xudma_dev_get_psil_base(struct udma_dev *ud) +{ + return ud->psil_base; +} +EXPORT_SYMBOL(xudma_dev_get_psil_base); + +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud) +{ + return &ud->tisci_rm; +} +EXPORT_SYMBOL(xudma_dev_get_tisci_rm); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_alloc_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_alloc_gp_rflow_range); + +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_free_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_free_gp_rflow_range); + +bool xudma_rflow_is_gp(struct udma_dev *ud, int id) +{ + return !test_bit(id, ud->rflow_gp_map); +} +EXPORT_SYMBOL(xudma_rflow_is_gp); + +#define XUDMA_GET_PUT_RESOURCE(res) \ +struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \ +{ \ + return __udma_reserve_##res(ud, false, id); \ +} \ +EXPORT_SYMBOL(xudma_##res##_get); \ + \ +void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \ +{ \ + clear_bit(p->id, ud->res##_map); \ +} \ +EXPORT_SYMBOL(xudma_##res##_put) +XUDMA_GET_PUT_RESOURCE(tchan); +XUDMA_GET_PUT_RESOURCE(rchan); + +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id) +{ + return __udma_get_rflow(ud, id); +} +EXPORT_SYMBOL(xudma_rflow_get); + +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p) +{ + __udma_put_rflow(ud, p); +} +EXPORT_SYMBOL(xudma_rflow_put); + +#define XUDMA_GET_RESOURCE_ID(res) \ +int xudma_##res##_get_id(struct udma_##res *p) \ +{ \ + return p->id; \ +} \ +EXPORT_SYMBOL(xudma_##res##_get_id) +XUDMA_GET_RESOURCE_ID(tchan); +XUDMA_GET_RESOURCE_ID(rchan); +XUDMA_GET_RESOURCE_ID(rflow); + +/* Exported register access functions */ +#define XUDMA_RT_IO_FUNCTIONS(res) \ +u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \ +{ \ + return udma_##res##rt_read(p, reg); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_read); \ + \ +void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \ +{ \ + udma_##res##rt_write(p, reg, val); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_write) +XUDMA_RT_IO_FUNCTIONS(tchan); +XUDMA_RT_IO_FUNCTIONS(rchan); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c new file mode 100644 index 000000000000..ea79c2df28e0 --- /dev/null +++ b/drivers/dma/ti/k3-udma.c @@ -0,0 +1,3432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/of_dma.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/dma/ti-cppi5.h> + +#include "../virt-dma.h" +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct udma_static_tr { + u8 elsize; /* RPSTR0 */ + u16 elcnt; /* RPSTR0 */ + u16 bstcnt; /* RPSTR1 */ +}; + +#define K3_UDMA_MAX_RFLOWS 1024 +#define K3_UDMA_DEFAULT_RING_SIZE 16 + +/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ +#define UDMA_RFLOW_SRCTAG_NONE 0 +#define UDMA_RFLOW_SRCTAG_CFG_TAG 1 +#define UDMA_RFLOW_SRCTAG_FLOW_ID 2 +#define UDMA_RFLOW_SRCTAG_SRC_TAG 4 + +#define UDMA_RFLOW_DSTTAG_NONE 0 +#define UDMA_RFLOW_DSTTAG_CFG_TAG 1 +#define UDMA_RFLOW_DSTTAG_FLOW_ID 2 +#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 +#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 + +struct udma_chan; + +enum udma_mmr { + MMR_GCFG = 0, + MMR_RCHANRT, + MMR_TCHANRT, + MMR_LAST, +}; + +static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" }; + +struct udma_tchan { + void __iomem *reg_rt; + + int id; + struct k3_ring *t_ring; /* Transmit ring */ + struct k3_ring *tc_ring; /* Transmit Completion ring */ +}; + +struct udma_rflow { + int id; + struct k3_ring *fd_ring; /* Free Descriptor ring */ + struct k3_ring *r_ring; /* Receive ring */ +}; + +struct udma_rchan { + void __iomem *reg_rt; + + int id; +}; + +#define UDMA_FLAG_PDMA_ACC32 BIT(0) +#define UDMA_FLAG_PDMA_BURST BIT(1) + +struct udma_match_data { + u32 psil_base; + bool enable_memcpy_support; + u32 flags; + u32 statictr_z_mask; + u32 rchan_oes_offset; + + u8 tpl_levels; + u32 level_start_idx[]; +}; + +struct udma_dev { + struct dma_device ddev; + struct device *dev; + void __iomem *mmrs[MMR_LAST]; + const struct udma_match_data *match_data; + + size_t desc_align; /* alignment to use for descriptors */ + + struct udma_tisci_rm tisci_rm; + + struct k3_ringacc *ringacc; + + struct work_struct purge_work; + struct list_head desc_to_purge; + spinlock_t lock; + + int tchan_cnt; + int echan_cnt; + int rchan_cnt; + int rflow_cnt; + unsigned long *tchan_map; + unsigned long *rchan_map; + unsigned long *rflow_gp_map; + unsigned long *rflow_gp_map_allocated; + unsigned long *rflow_in_use; + + struct udma_tchan *tchans; + struct udma_rchan *rchans; + struct udma_rflow *rflows; + + struct udma_chan *channels; + u32 psil_base; +}; + +struct udma_hwdesc { + size_t cppi5_desc_size; + void *cppi5_desc_vaddr; + dma_addr_t cppi5_desc_paddr; + + /* TR descriptor internal pointers */ + void *tr_req_base; + struct cppi5_tr_resp_t *tr_resp_base; +}; + +struct udma_desc { + struct virt_dma_desc vd; + + bool terminated; + + enum dma_transfer_direction dir; + + struct udma_static_tr static_tr; + u32 residue; + + unsigned int sglen; + unsigned int desc_idx; /* Only used for cyclic in packet mode */ + unsigned int tr_idx; + + u32 metadata_size; + void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ + + unsigned int hwdesc_count; + struct udma_hwdesc hwdesc[0]; +}; + +enum udma_chan_state { + UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ + UDMA_CHAN_IS_ACTIVE, /* Normal operation */ + UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ +}; + +struct udma_tx_drain { + struct delayed_work work; + unsigned long jiffie; + u32 residue; +}; + +struct udma_chan_config { + bool pkt_mode; /* TR or packet */ + bool needs_epib; /* EPIB is needed for the communication or not */ + u32 psd_size; /* size of Protocol Specific Data */ + u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ + u32 hdesc_size; /* Size of a packet descriptor in packet mode */ + bool notdpkt; /* Suppress sending TDC packet */ + int remote_thread_id; + u32 src_thread; + u32 dst_thread; + enum psil_endpoint_type ep_type; + bool enable_acc32; + bool enable_burst; + enum udma_tp_level channel_tpl; /* Channel Throughput Level */ + + enum dma_transfer_direction dir; +}; + +struct udma_chan { + struct virt_dma_chan vc; + struct dma_slave_config cfg; + struct udma_dev *ud; + struct udma_desc *desc; + struct udma_desc *terminated_desc; + struct udma_static_tr static_tr; + char *name; + + struct udma_tchan *tchan; + struct udma_rchan *rchan; + struct udma_rflow *rflow; + + bool psil_paired; + + int irq_num_ring; + int irq_num_udma; + + bool cyclic; + bool paused; + + enum udma_chan_state state; + struct completion teardown_completed; + + struct udma_tx_drain tx_drain; + + u32 bcnt; /* number of bytes completed since the start of the channel */ + u32 in_ring_cnt; /* number of descriptors in flight */ + + /* Channel configuration parameters */ + struct udma_chan_config config; + + /* dmapool for packet mode descriptors */ + bool use_dma_pool; + struct dma_pool *hdesc_pool; + + u32 id; +}; + +static inline struct udma_dev *to_udma_dev(struct dma_device *d) +{ + return container_of(d, struct udma_dev, ddev); +} + +static inline struct udma_chan *to_udma_chan(struct dma_chan *c) +{ + return container_of(c, struct udma_chan, vc.chan); +} + +static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct udma_desc, vd.tx); +} + +/* Generic register access functions */ +static inline u32 udma_read(void __iomem *base, int reg) +{ + return readl(base + reg); +} + +static inline void udma_write(void __iomem *base, int reg, u32 val) +{ + writel(val, base + reg); +} + +static inline void udma_update_bits(void __iomem *base, int reg, + u32 mask, u32 val) +{ + u32 tmp, orig; + + orig = readl(base + reg); + tmp = orig & ~mask; + tmp |= (val & mask); + + if (tmp != orig) + writel(tmp, base + reg); +} + +/* TCHANRT */ +static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg) +{ + if (!tchan) + return 0; + return udma_read(tchan->reg_rt, reg); +} + +static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg, + u32 val) +{ + if (!tchan) + return; + udma_write(tchan->reg_rt, reg, val); +} + +static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg, + u32 mask, u32 val) +{ + if (!tchan) + return; + udma_update_bits(tchan->reg_rt, reg, mask, val); +} + +/* RCHANRT */ +static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg) +{ + if (!rchan) + return 0; + return udma_read(rchan->reg_rt, reg); +} + +static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg, + u32 val) +{ + if (!rchan) + return; + udma_write(rchan->reg_rt, reg, val); +} + +static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg, + u32 mask, u32 val) +{ + if (!rchan) + return; + udma_update_bits(rchan->reg_rt, reg, mask, val); +} + +static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static void udma_reset_uchan(struct udma_chan *uc) +{ + memset(&uc->config, 0, sizeof(uc->config)); + uc->config.remote_thread_id = -1; + uc->state = UDMA_CHAN_IS_IDLE; +} + +static void udma_dump_chan_stdata(struct udma_chan *uc) +{ + struct device *dev = uc->ud->dev; + u32 offset; + int i; + + if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "TCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_TCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, + udma_tchanrt_read(uc->tchan, offset)); + } + } + + if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "RCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_RCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, + udma_rchanrt_read(uc->rchan, offset)); + } + } +} + +static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, + int idx) +{ + return d->hwdesc[idx].cppi5_desc_paddr; +} + +static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) +{ + return d->hwdesc[idx].cppi5_desc_vaddr; +} + +static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, + dma_addr_t paddr) +{ + struct udma_desc *d = uc->terminated_desc; + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + + if (!d) { + d = uc->desc; + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + } + + return d; +} + +static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) +{ + if (uc->use_dma_pool) { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_pool_free(uc->hdesc_pool, + d->hwdesc[i].cppi5_desc_vaddr, + d->hwdesc[i].cppi5_desc_paddr); + + d->hwdesc[i].cppi5_desc_vaddr = NULL; + } + } else if (d->hwdesc[0].cppi5_desc_vaddr) { + struct udma_dev *ud = uc->ud; + + dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size, + d->hwdesc[0].cppi5_desc_vaddr, + d->hwdesc[0].cppi5_desc_paddr); + + d->hwdesc[0].cppi5_desc_vaddr = NULL; + } +} + +static void udma_purge_desc_work(struct work_struct *work) +{ + struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); + struct virt_dma_desc *vd, *_vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&ud->lock, flags); + list_splice_tail_init(&ud->desc_to_purge, &head); + spin_unlock_irqrestore(&ud->lock, flags); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + + udma_free_hwdesc(uc, d); + list_del(&vd->node); + kfree(d); + } + + /* If more to purge, schedule the work again */ + if (!list_empty(&ud->desc_to_purge)) + schedule_work(&ud->purge_work); +} + +static void udma_desc_free(struct virt_dma_desc *vd) +{ + struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + unsigned long flags; + + if (uc->terminated_desc == d) + uc->terminated_desc = NULL; + + if (uc->use_dma_pool) { + udma_free_hwdesc(uc, d); + kfree(d); + return; + } + + spin_lock_irqsave(&ud->lock, flags); + list_add_tail(&vd->node, &ud->desc_to_purge); + spin_unlock_irqrestore(&ud->lock, flags); + + schedule_work(&ud->purge_work); +} + +static bool udma_is_chan_running(struct udma_chan *uc) +{ + u32 trt_ctl = 0; + u32 rrt_ctl = 0; + + if (uc->tchan) + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + if (uc->rchan) + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + + if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) + return true; + + return false; +} + +static bool udma_is_chan_paused(struct udma_chan *uc) +{ + u32 val, pause_mask; + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + val = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_DEV: + val = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_MEM: + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + pause_mask = UDMA_CHAN_RT_CTL_PAUSE; + break; + default: + return false; + } + + if (val & pause_mask) + return true; + + return false; +} + +static void udma_sync_for_device(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + if (uc->cyclic && uc->config.pkt_mode) { + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[idx].cppi5_desc_paddr, + d->hwdesc[idx].cppi5_desc_size, + DMA_TO_DEVICE); + } else { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[i].cppi5_desc_paddr, + d->hwdesc[i].cppi5_desc_size, + DMA_TO_DEVICE); + } + } +} + +static int udma_push_to_ring(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + struct k3_ring *ring = NULL; + int ret = -EINVAL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->fd_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->t_ring; + break; + default: + break; + } + + if (ring) { + dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx); + + wmb(); /* Ensure that writes are not moved over this point */ + udma_sync_for_device(uc, idx); + ret = k3_ringacc_ring_push(ring, &desc_addr); + uc->in_ring_cnt++; + } + + return ret; +} + +static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) +{ + struct k3_ring *ring = NULL; + int ret = -ENOENT; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->r_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring && k3_ringacc_ring_get_occ(ring)) { + struct udma_desc *d = NULL; + + ret = k3_ringacc_ring_pop(ring, addr); + if (ret) + return ret; + + /* Teardown completion */ + if (cppi5_desc_is_tdcm(*addr)) + return ret; + + d = udma_udma_desc_from_paddr(uc, *addr); + + if (d) + dma_sync_single_for_cpu(uc->ud->dev, *addr, + d->hwdesc[0].cppi5_desc_size, + DMA_FROM_DEVICE); + rmb(); /* Ensure that reads are not moved before this point */ + + if (!ret) + uc->in_ring_cnt--; + } + + return ret; +} + +static void udma_reset_rings(struct udma_chan *uc) +{ + struct k3_ring *ring1 = NULL; + struct k3_ring *ring2 = NULL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + if (uc->rchan) { + ring1 = uc->rflow->fd_ring; + ring2 = uc->rflow->r_ring; + } + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + if (uc->tchan) { + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + } + break; + default: + break; + } + + if (ring1) + k3_ringacc_ring_reset_dma(ring1, + k3_ringacc_ring_get_occ(ring1)); + if (ring2) + k3_ringacc_ring_reset(ring2); + + /* make sure we are not leaking memory by stalled descriptor */ + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + uc->in_ring_cnt = 0; +} + +static void udma_reset_counters(struct udma_chan *uc) +{ + u32 val; + + if (uc->tchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + } + + if (uc->rchan) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val); + } + + uc->bcnt = 0; +} + +static int udma_reset_chan(struct udma_chan *uc, bool hard) +{ + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + /* Reset all counters */ + udma_reset_counters(uc); + + /* Hard reset: re-initialize the channel to reset */ + if (hard) { + struct udma_chan_config ucc_backup; + int ret; + + memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); + uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); + + /* restore the channel configuration */ + memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); + ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); + if (ret) + return ret; + + /* + * Setting forced teardown after forced reset helps recovering + * the rchan. + */ + if (uc->config.dir == DMA_DEV_TO_MEM) + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN | + UDMA_CHAN_RT_CTL_FTDOWN); + } + uc->state = UDMA_CHAN_IS_IDLE; + + return 0; +} + +static void udma_start_desc(struct udma_chan *uc) +{ + struct udma_chan_config *ucc = &uc->config; + + if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { + int i; + + /* Push all descriptors to ring for packet mode cyclic or RX */ + for (i = 0; i < uc->desc->sglen; i++) + udma_push_to_ring(uc, i); + } else { + udma_push_to_ring(uc, 0); + } +} + +static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) +{ + /* Only PDMAs have staticTR */ + if (uc->config.ep_type == PSIL_EP_NATIVE) + return false; + + /* Check if the staticTR configuration has changed for TX */ + if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) + return true; + + return false; +} + +static int udma_start(struct udma_chan *uc) +{ + struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); + + if (!vd) { + uc->desc = NULL; + return -ENOENT; + } + + list_del(&vd->node); + + uc->desc = to_udma_desc(&vd->tx); + + /* Channel is already running and does not need reconfiguration */ + if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { + udma_start_desc(uc); + goto out; + } + + /* Make sure that we clear the teardown bit, if it is set */ + udma_reset_chan(uc, false); + + /* Push descriptors before we start the channel */ + udma_start_desc(uc); + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + const struct udma_match_data *match_data = + uc->ud->match_data; + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG, + PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, + match_data->statictr_z_mask)); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + /* Enable remote */ + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + break; + case DMA_MEM_TO_DEV: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_tchanrt_write(uc->tchan, + UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + /* Enable remote */ + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + default: + return -EINVAL; + } + + uc->state = UDMA_CHAN_IS_ACTIVE; +out: + + return 0; +} + +static int udma_stop(struct udma_chan *uc) +{ + enum udma_chan_state old_state = uc->state; + + uc->state = UDMA_CHAN_IS_TERMINATING; + reinit_completion(&uc->teardown_completed); + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_TEARDOWN); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_FLUSH); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + default: + uc->state = old_state; + complete_all(&uc->teardown_completed); + return -EINVAL; + } + + return 0; +} + +static void udma_cyclic_packet_elapsed(struct udma_chan *uc) +{ + struct udma_desc *d = uc->desc; + struct cppi5_host_desc_t *h_desc; + + h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; + cppi5_hdesc_reset_to_original(h_desc); + udma_push_to_ring(uc, d->desc_idx); + d->desc_idx = (d->desc_idx + 1) % d->sglen; +} + +static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) +{ + struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + memcpy(d->metadata, h_desc->epib, d->metadata_size); +} + +static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) +{ + u32 peer_bcnt, bcnt; + + /* Only TX towards PDMA is affected */ + if (uc->config.ep_type == PSIL_EP_NATIVE || + uc->config.dir != DMA_MEM_TO_DEV) + return true; + + peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + + if (peer_bcnt < bcnt) { + uc->tx_drain.residue = bcnt - peer_bcnt; + uc->tx_drain.jiffie = jiffies; + return false; + } + + return true; +} + +static void udma_check_tx_completion(struct work_struct *work) +{ + struct udma_chan *uc = container_of(work, typeof(*uc), + tx_drain.work.work); + bool desc_done = true; + u32 residue_diff; + unsigned long jiffie_diff, delay; + + if (uc->desc) { + residue_diff = uc->tx_drain.residue; + jiffie_diff = uc->tx_drain.jiffie; + desc_done = udma_is_desc_really_done(uc, uc->desc); + } + + if (!desc_done) { + jiffie_diff = uc->tx_drain.jiffie - jiffie_diff; + residue_diff -= uc->tx_drain.residue; + if (residue_diff) { + /* Try to guess when we should check next time */ + residue_diff /= jiffie_diff; + delay = uc->tx_drain.residue / residue_diff / 3; + if (jiffies_to_msecs(delay) < 5) + delay = 0; + } else { + /* No progress, check again in 1 second */ + delay = HZ; + } + + schedule_delayed_work(&uc->tx_drain.work, delay); + } else if (uc->desc) { + struct udma_desc *d = uc->desc; + + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } +} + +static irqreturn_t udma_ring_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + dma_addr_t paddr = 0; + + if (udma_pop_from_ring(uc, &paddr) || !paddr) + return IRQ_HANDLED; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* Teardown completion message */ + if (cppi5_desc_is_tdcm(paddr)) { + /* Compensate our internal pop/push counter */ + uc->in_ring_cnt++; + + complete_all(&uc->teardown_completed); + + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + if (!uc->desc) + udma_start(uc); + + goto out; + } + + d = udma_udma_desc_from_paddr(uc, paddr); + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + if (desc_paddr != paddr) { + dev_err(uc->ud->dev, "not matching descriptors!\n"); + goto out; + } + + if (uc->cyclic) { + /* push the descriptor back to the ring */ + if (d == uc->desc) { + udma_cyclic_packet_elapsed(uc); + vchan_cyclic_callback(&d->vd); + } + } else { + bool desc_done = false; + + if (d == uc->desc) { + desc_done = udma_is_desc_really_done(uc, d); + + if (desc_done) { + uc->bcnt += d->residue; + udma_start(uc); + } else { + schedule_delayed_work(&uc->tx_drain.work, + 0); + } + } + + if (desc_done) + vchan_cookie_complete(&d->vd); + } + } +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t udma_udma_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + d = uc->desc; + if (d) { + d->tr_idx = (d->tr_idx + 1) % d->sglen; + + if (uc->cyclic) { + vchan_cyclic_callback(&d->vd); + } else { + /* TODO: figure out the real amount of data */ + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +/** + * __udma_alloc_gp_rflow_range - alloc range of GP RX flows + * @ud: UDMA device + * @from: Start the search from this flow id number + * @cnt: Number of consecutive flow ids to allocate + * + * Allocate range of RX flow ids for future use, those flows can be requested + * only using explicit flow id number. if @from is set to -1 it will try to find + * first free range. if @from is positive value it will force allocation only + * of the specified range of flows. + * + * Returns -ENOMEM if can't find free range. + * -EEXIST if requested range is busy. + * -EINVAL if wrong input values passed. + * Returns flow id on success. + */ +static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + int start, tmp_from; + DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); + + tmp_from = from; + if (tmp_from < 0) + tmp_from = ud->rchan_cnt; + /* default flows can't be allocated and accessible only by id */ + if (tmp_from < ud->rchan_cnt) + return -EINVAL; + + if (tmp_from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, + ud->rflow_cnt); + + start = bitmap_find_next_zero_area(tmp, + ud->rflow_cnt, + tmp_from, cnt, 0); + if (start >= ud->rflow_cnt) + return -ENOMEM; + + if (from >= 0 && start != from) + return -EEXIST; + + bitmap_set(ud->rflow_gp_map_allocated, start, cnt); + return start; +} + +static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + if (from < ud->rchan_cnt) + return -EINVAL; + if (from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); + return 0; +} + +static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) +{ + /* + * Attempt to request rflow by ID can be made for any rflow + * if not in use with assumption that caller knows what's doing. + * TI-SCI FW will perform additional permission check ant way, it's + * safe + */ + + if (id < 0 || id >= ud->rflow_cnt) + return ERR_PTR(-ENOENT); + + if (test_bit(id, ud->rflow_in_use)) + return ERR_PTR(-ENOENT); + + /* GP rflow has to be allocated first */ + if (!test_bit(id, ud->rflow_gp_map) && + !test_bit(id, ud->rflow_gp_map_allocated)) + return ERR_PTR(-EINVAL); + + dev_dbg(ud->dev, "get rflow%d\n", id); + set_bit(id, ud->rflow_in_use); + return &ud->rflows[id]; +} + +static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) +{ + if (!test_bit(rflow->id, ud->rflow_in_use)) { + dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); + return; + } + + dev_dbg(ud->dev, "put rflow%d\n", rflow->id); + clear_bit(rflow->id, ud->rflow_in_use); +} + +#define UDMA_RESERVE_RESOURCE(res) \ +static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ + enum udma_tp_level tpl, \ + int id) \ +{ \ + if (id >= 0) { \ + if (test_bit(id, ud->res##_map)) { \ + dev_err(ud->dev, "res##%d is in use\n", id); \ + return ERR_PTR(-ENOENT); \ + } \ + } else { \ + int start; \ + \ + if (tpl >= ud->match_data->tpl_levels) \ + tpl = ud->match_data->tpl_levels - 1; \ + \ + start = ud->match_data->level_start_idx[tpl]; \ + \ + id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ + start); \ + if (id == ud->res##_cnt) { \ + return ERR_PTR(-ENOENT); \ + } \ + } \ + \ + set_bit(id, ud->res##_map); \ + return &ud->res##s[id]; \ +} + +UDMA_RESERVE_RESOURCE(tchan); +UDMA_RESERVE_RESOURCE(rchan); + +static int udma_get_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->tchan)) + return PTR_ERR(uc->tchan); + + return 0; +} + +static int udma_get_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return 0; + } + + uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->rchan)) + return PTR_ERR(uc->rchan); + + return 0; +} + +static int udma_get_chan_pair(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + const struct udma_match_data *match_data = ud->match_data; + int chan_id, end; + + if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { + dev_info(ud->dev, "chan%d: already have %d pair allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + if (uc->tchan) { + dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return -EBUSY; + } else if (uc->rchan) { + dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return -EBUSY; + } + + /* Can be optimized, but let's have it like this for now */ + end = min(ud->tchan_cnt, ud->rchan_cnt); + /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */ + chan_id = match_data->level_start_idx[match_data->tpl_levels - 1]; + for (; chan_id < end; chan_id++) { + if (!test_bit(chan_id, ud->tchan_map) && + !test_bit(chan_id, ud->rchan_map)) + break; + } + + if (chan_id == end) + return -ENOENT; + + set_bit(chan_id, ud->tchan_map); + set_bit(chan_id, ud->rchan_map); + uc->tchan = &ud->tchans[chan_id]; + uc->rchan = &ud->rchans[chan_id]; + + return 0; +} + +static int udma_get_rflow(struct udma_chan *uc, int flow_id) +{ + struct udma_dev *ud = uc->ud; + + if (!uc->rchan) { + dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); + return -EINVAL; + } + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", + uc->id, uc->rflow->id); + return 0; + } + + uc->rflow = __udma_get_rflow(ud, flow_id); + if (IS_ERR(uc->rflow)) + return PTR_ERR(uc->rflow); + + return 0; +} + +static void udma_put_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, + uc->rchan->id); + clear_bit(uc->rchan->id, ud->rchan_map); + uc->rchan = NULL; + } +} + +static void udma_put_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, + uc->tchan->id); + clear_bit(uc->tchan->id, ud->tchan_map); + uc->tchan = NULL; + } +} + +static void udma_put_rflow(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, + uc->rflow->id); + __udma_put_rflow(ud, uc->rflow); + uc->rflow = NULL; + } +} + +static void udma_free_tx_resources(struct udma_chan *uc) +{ + if (!uc->tchan) + return; + + k3_ringacc_ring_free(uc->tchan->t_ring); + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->t_ring = NULL; + uc->tchan->tc_ring = NULL; + + udma_put_tchan(uc); +} + +static int udma_alloc_tx_resources(struct udma_chan *uc) +{ + struct k3_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int ret; + + ret = udma_get_tchan(uc); + if (ret) + return ret; + + uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc, + uc->tchan->id, 0); + if (!uc->tchan->t_ring) { + ret = -EBUSY; + goto err_tx_ring; + } + + uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!uc->tchan->tc_ring) { + ret = -EBUSY; + goto err_txc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); + ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->tc_ring = NULL; +err_txc_ring: + k3_ringacc_ring_free(uc->tchan->t_ring); + uc->tchan->t_ring = NULL; +err_tx_ring: + udma_put_tchan(uc); + + return ret; +} + +static void udma_free_rx_resources(struct udma_chan *uc) +{ + if (!uc->rchan) + return; + + if (uc->rflow) { + struct udma_rflow *rflow = uc->rflow; + + k3_ringacc_ring_free(rflow->fd_ring); + k3_ringacc_ring_free(rflow->r_ring); + rflow->fd_ring = NULL; + rflow->r_ring = NULL; + + udma_put_rflow(uc); + } + + udma_put_rchan(uc); +} + +static int udma_alloc_rx_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct k3_ring_cfg ring_cfg; + struct udma_rflow *rflow; + int fd_ring_id; + int ret; + + ret = udma_get_rchan(uc); + if (ret) + return ret; + + /* For MEM_TO_MEM we don't need rflow or rings */ + if (uc->config.dir == DMA_MEM_TO_MEM) + return 0; + + ret = udma_get_rflow(uc, uc->rchan->id); + if (ret) { + ret = -EBUSY; + goto err_rflow; + } + + rflow = uc->rflow; + fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; + rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0); + if (!rflow->fd_ring) { + ret = -EBUSY; + goto err_rx_ring; + } + + rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!rflow->r_ring) { + ret = -EBUSY; + goto err_rxc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + + if (uc->config.pkt_mode) + ring_cfg.size = SG_MAX_SEGMENTS; + else + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(rflow->r_ring); + rflow->r_ring = NULL; +err_rxc_ring: + k3_ringacc_ring_free(rflow->fd_ring); + rflow->fd_ring = NULL; +err_rx_ring: + udma_put_rflow(uc); +err_rflow: + udma_put_rchan(uc); + + return ret; +} + +#define TISCI_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID) + +#define TISCI_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID) + +static int udma_tisci_m2m_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + struct udma_rchan *rchan = uc->rchan; + int ret = 0; + + /* Non synchronized - mem to mem type of transfer */ + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) { + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + return ret; + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_rx.rxcq_qnum = tc_ring; + req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) + dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); + + return ret; +} + +static int udma_tisci_tx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = mode; + req_tx.tx_supr_tdpkt = uc->config.notdpkt; + req_tx.tx_fetch_size = fetch_size >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + + return ret; +} + +static int udma_tisci_rx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_rchan *rchan = uc->rchan; + int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); + int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = fetch_size >> 2; + req_rx.rxcq_qnum = rx_ring; + req_rx.rx_chan_type = mode; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) { + dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); + return ret; + } + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + + flow_req.nav_id = tisci_rm->tisci_dev_id; + flow_req.flow_index = rchan->id; + + if (uc->config.needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + if (uc->config.psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + flow_req.rx_error_handling = 1; + flow_req.rx_dest_qnum = rx_ring; + flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; + flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; + flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; + flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; + flow_req.rx_fdq0_sz0_qnum = fd_ring; + flow_req.rx_fdq1_qnum = fd_ring; + flow_req.rx_fdq2_qnum = fd_ring; + flow_req.rx_fdq3_qnum = fd_ring; + + ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); + + if (ret) + dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); + + return 0; +} + +static int udma_alloc_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + const struct udma_match_data *match_data = ud->match_data; + struct k3_ring *irq_ring; + u32 irq_udma_idx; + int ret; + + if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { + uc->use_dma_pool = true; + /* in case of MEM_TO_MEM we have maximum of two TRs */ + if (uc->config.dir == DMA_MEM_TO_MEM) { + uc->config.hdesc_size = cppi5_trdesc_calc_size( + sizeof(struct cppi5_tr_type15_t), 2); + uc->config.pkt_mode = false; + } + } + + if (uc->use_dma_pool) { + uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, + uc->config.hdesc_size, + ud->desc_align, + 0); + if (!uc->hdesc_pool) { + dev_err(ud->ddev.dev, + "Descriptor pool allocation failed\n"); + uc->use_dma_pool = false; + return -ENOMEM; + } + } + + /* + * Make sure that the completion is in a known state: + * No teardown, the channel is idle + */ + reinit_completion(&uc->teardown_completed); + complete_all(&uc->teardown_completed); + uc->state = UDMA_CHAN_IS_IDLE; + + switch (uc->config.dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, + uc->id); + + ret = udma_get_chan_pair(uc); + if (ret) + return ret; + + ret = udma_alloc_tx_resources(uc); + if (ret) + return ret; + + ret = udma_alloc_rx_resources(uc); + if (ret) { + udma_free_tx_resources(uc); + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_m2m_channel_config(uc); + break; + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, + uc->id); + + ret = udma_alloc_tx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = uc->config.remote_thread_id; + uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_tx_channel_config(uc); + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, + uc->id); + + ret = udma_alloc_rx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = uc->config.remote_thread_id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->rflow->r_ring; + irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id; + + ret = udma_tisci_rx_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + /* PSI-L pairing */ + ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); + if (ret) { + dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", + uc->config.src_thread, uc->config.dst_thread); + goto err_res_free; + } + + uc->psil_paired = true; + + uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); + if (uc->irq_num_ring <= 0) { + dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", + k3_ringacc_get_ring_id(irq_ring)); + ret = -EINVAL; + goto err_psi_free; + } + + ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, + IRQF_TRIGGER_HIGH, uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); + goto err_irq_free; + } + + /* Event from UDMA (TR events) only needed for slave TR mode channels */ + if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { + uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, + irq_udma_idx); + if (uc->irq_num_udma <= 0) { + dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", + irq_udma_idx); + free_irq(uc->irq_num_ring, uc); + ret = -EINVAL; + goto err_irq_free; + } + + ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, + uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: UDMA irq request failed\n", + uc->id); + free_irq(uc->irq_num_ring, uc); + goto err_irq_free; + } + } else { + uc->irq_num_udma = 0; + } + + udma_reset_rings(uc); + + INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, + udma_check_tx_completion); + return 0; + +err_irq_free: + uc->irq_num_ring = 0; + uc->irq_num_udma = 0; +err_psi_free: + navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); + uc->psil_paired = false; +err_res_free: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } + + return ret; +} + +static int udma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct udma_chan *uc = to_udma_chan(chan); + + memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); + + return 0; +} + +static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, + size_t tr_size, int tr_count, + enum dma_transfer_direction dir) +{ + struct udma_hwdesc *hwdesc; + struct cppi5_desc_hdr_t *tr_desc; + struct udma_desc *d; + u32 reload_count = 0; + u32 ring_id; + + switch (tr_size) { + case 16: + case 32: + case 64: + case 128: + break; + default: + dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); + return NULL; + } + + /* We have only one descriptor containing multiple TRs */ + d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = tr_count; + + d->hwdesc_count = 1; + hwdesc = &d->hwdesc[0]; + + /* Allocate memory for DMA ring descriptor */ + if (uc->use_dma_pool) { + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + } else { + hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, + tr_count); + hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, + uc->ud->desc_align); + hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, + hwdesc->cppi5_desc_size, + &hwdesc->cppi5_desc_paddr, + GFP_NOWAIT); + } + + if (!hwdesc->cppi5_desc_vaddr) { + kfree(d); + return NULL; + } + + /* Start of the TR req records */ + hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; + /* Start address of the TR response array */ + hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; + + tr_desc = hwdesc->cppi5_desc_vaddr; + + if (uc->cyclic) + reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); + cppi5_desc_set_pktids(tr_desc, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); + + return d; +} + +static struct udma_desc * +udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req = NULL; + unsigned int i; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, sglen, dir); + if (!d) + return NULL; + + d->sglen = sglen; + + tr_req = d->hwdesc[0].tr_req_base; + for_each_sg(sgl, sgent, sglen, i) { + d->residue += sg_dma_len(sgent); + + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[i].addr = sg_dma_address(sgent); + tr_req[i].icnt0 = burst * dev_width; + tr_req[i].dim1 = burst * dev_width; + tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0; + } + + cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP); + + return d; +} + +static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, + enum dma_slave_buswidth dev_width, + u16 elcnt) +{ + if (uc->config.ep_type != PSIL_EP_PDMA_XY) + return 0; + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + d->static_tr.elsize = 0; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + d->static_tr.elsize = 1; + break; + case DMA_SLAVE_BUSWIDTH_3_BYTES: + d->static_tr.elsize = 2; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + d->static_tr.elsize = 3; + break; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + d->static_tr.elsize = 4; + break; + default: /* not reached */ + return -EINVAL; + } + + d->static_tr.elcnt = elcnt; + + /* + * PDMA must to close the packet when the channel is in packet mode. + * For TR mode when the channel is not cyclic we also need PDMA to close + * the packet otherwise the transfer will stall because PDMA holds on + * the data it has received from the peripheral. + */ + if (uc->config.pkt_mode || !uc->cyclic) { + unsigned int div = dev_width * elcnt; + + if (uc->cyclic) + d->static_tr.bstcnt = d->residue / d->sglen / div; + else + d->static_tr.bstcnt = d->residue / div; + + if (uc->config.dir == DMA_DEV_TO_MEM && + d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) + return -EINVAL; + } else { + d->static_tr.bstcnt = 0; + } + + return 0; +} + +static struct udma_desc * +udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct scatterlist *sgent; + struct cppi5_host_desc_t *h_desc = NULL; + struct udma_desc *d; + u32 ring_id; + unsigned int i; + + d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = sglen; + d->hwdesc_count = sglen; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for_each_sg(sgl, sgent, sglen, i) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t sg_addr = sg_dma_address(sgent); + struct cppi5_host_desc_t *desc; + size_t sg_len = sg_dma_len(sgent); + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + d->residue += sg_len; + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + desc = hwdesc->cppi5_desc_vaddr; + + if (i == 0) { + cppi5_hdesc_init(desc, 0, 0); + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); + } else { + cppi5_hdesc_reset_hbdesc(desc); + cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); + } + + /* attach the sg buffer to the descriptor */ + cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); + + /* Attach link as host buffer descriptor */ + if (h_desc) + cppi5_hdesc_link_hbdesc(h_desc, + hwdesc->cppi5_desc_paddr); + + if (dir == DMA_MEM_TO_DEV) + h_desc = desc; + } + + if (d->residue >= SZ_4M) { + dev_err(uc->ud->dev, + "%s: Transfer size %u is over the supported 4M range\n", + __func__, d->residue); + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + cppi5_hdesc_set_pktlen(h_desc, d->residue); + + return d; +} + +static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (!data || len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + if (d->dir == DMA_MEM_TO_DEV) + memcpy(h_desc->epib, data, len); + + if (uc->config.needs_epib) + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + + d->metadata = data; + d->metadata_size = len; + if (uc->config.needs_epib) + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return ERR_PTR(-ENOTSUPP); + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + *max_len = uc->config.metadata_size; + + *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? + CPPI5_INFO0_HDESC_EPIB_SIZE : 0; + *payload_len += cppi5_hdesc_get_psdata_size(h_desc); + + return h_desc->epib; +} + +static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = payload_len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (payload_len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + if (uc->config.needs_epib) { + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + } + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static struct dma_descriptor_metadata_ops metadata_ops = { + .attach = udma_attach_metadata, + .get_ptr = udma_get_metadata_ptr, + .set_len = udma_set_metadata_len, +}; + +static struct dma_async_tx_descriptor * +udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, + context); + else + d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, + context); + + if (!d) + return NULL; + + d->dir = dir; + d->desc_idx = 0; + d->tr_idx = 0; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static struct udma_desc * +udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req; + unsigned int i; + unsigned int periods = buf_len / period_len; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, periods, dir); + if (!d) + return NULL; + + tr_req = d->hwdesc[0].tr_req_base; + for (i = 0; i < periods; i++) { + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[i].addr = buf_addr + period_len * i; + tr_req[i].icnt0 = dev_width; + tr_req[i].icnt1 = period_len / dev_width; + tr_req[i].dim1 = dev_width; + + if (!(flags & DMA_PREP_INTERRUPT)) + cppi5_tr_csf_set(&tr_req[i].flags, + CPPI5_TR_CSF_SUPR_EVT); + } + + return d; +} + +static struct udma_desc * +udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct udma_desc *d; + u32 ring_id; + int i; + int periods = buf_len / period_len; + + if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) + return NULL; + + if (period_len >= SZ_4M) + return NULL; + + d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->hwdesc_count = periods; + + /* TODO: re-check this... */ + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for (i = 0; i < periods; i++) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t period_addr = buf_addr + (period_len * i); + struct cppi5_host_desc_t *h_desc; + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + h_desc = hwdesc->cppi5_desc_vaddr; + + cppi5_hdesc_init(h_desc, 0, 0); + cppi5_hdesc_set_pktlen(h_desc, period_len); + + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&h_desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); + + /* attach each period to a new descriptor */ + cppi5_hdesc_attach_buf(h_desc, + period_addr, period_len, + period_addr, period_len); + } + + return d; +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + uc->cyclic = true; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, + dir, flags); + else + d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, + dir, flags); + + if (!d) + return NULL; + + d->sglen = buf_len / period_len; + + d->dir = dir; + d->residue = buf_len; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, flags); +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_desc *d; + struct cppi5_tr_type15_t *tr_req; + int num_tr; + size_t tr_size = sizeof(struct cppi5_tr_type15_t); + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + + if (uc->config.dir != DMA_MEM_TO_MEM) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(DMA_MEM_TO_MEM)); + return NULL; + } + + if (len < SZ_64K) { + num_tr = 1; + tr0_cnt0 = len; + tr0_cnt1 = 1; + } else { + unsigned long align_to = __ffs(src | dest); + + if (align_to > 3) + align_to = 3; + /* + * Keep simple: tr0: SZ_64K-alignment blocks, + * tr1: the remaining + */ + num_tr = 2; + tr0_cnt0 = (SZ_64K - BIT(align_to)); + if (len / tr0_cnt0 >= SZ_64K) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; + } + + tr0_cnt1 = len / tr0_cnt0; + tr1_cnt0 = len % tr0_cnt0; + } + + d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); + if (!d) + return NULL; + + d->dir = DMA_MEM_TO_MEM; + d->desc_idx = 0; + d->tr_idx = 0; + d->residue = len; + + tr_req = d->hwdesc[0].tr_req_base; + + cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[0].addr = src; + tr_req[0].icnt0 = tr0_cnt0; + tr_req[0].icnt1 = tr0_cnt1; + tr_req[0].icnt2 = 1; + tr_req[0].icnt3 = 1; + tr_req[0].dim1 = tr0_cnt0; + + tr_req[0].daddr = dest; + tr_req[0].dicnt0 = tr0_cnt0; + tr_req[0].dicnt1 = tr0_cnt1; + tr_req[0].dicnt2 = 1; + tr_req[0].dicnt3 = 1; + tr_req[0].ddim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; + tr_req[1].icnt0 = tr1_cnt0; + tr_req[1].icnt1 = 1; + tr_req[1].icnt2 = 1; + tr_req[1].icnt3 = 1; + + tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; + tr_req[1].dicnt0 = tr1_cnt0; + tr_req[1].dicnt1 = 1; + tr_req[1].dicnt2 = 1; + tr_req[1].dicnt3 = 1; + } + + cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static void udma_issue_pending(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* If we have something pending and no active descriptor, then */ + if (vchan_issue_pending(&uc->vc) && !uc->desc) { + /* + * start a descriptor if the channel is NOT [marked as + * terminating _and_ it is still running (teardown has not + * completed yet)]. + */ + if (!(uc->state == UDMA_CHAN_IS_TERMINATING && + udma_is_chan_running(uc))) + udma_start(uc); + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); +} + +static enum dma_status udma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_status ret; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + ret = dma_cookie_status(chan, cookie, txstate); + + if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) + ret = DMA_PAUSED; + + if (ret == DMA_COMPLETE || !txstate) + goto out; + + if (uc->desc && uc->desc->vd.tx.cookie == cookie) { + u32 peer_bcnt = 0; + u32 bcnt = 0; + u32 residue = uc->desc->residue; + u32 delay = 0; + + if (uc->desc->dir == DMA_MEM_TO_DEV) { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_SBCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_BCNT_REG); + + if (bcnt > peer_bcnt) + delay = bcnt - peer_bcnt; + } + } else if (uc->desc->dir == DMA_DEV_TO_MEM) { + bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_BCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_BCNT_REG); + + if (peer_bcnt > bcnt) + delay = peer_bcnt - bcnt; + } + } else { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_BCNT_REG); + } + + bcnt -= uc->bcnt; + if (bcnt && !(bcnt % uc->desc->residue)) + residue = 0; + else + residue -= bcnt % uc->desc->residue; + + if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { + ret = DMA_COMPLETE; + delay = 0; + } + + dma_set_residue(txstate, residue); + dma_set_in_flight_bytes(txstate, delay); + + } else { + ret = DMA_COMPLETE; + } + +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + return ret; +} + +static int udma_pause(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* pause the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, + UDMA_CHAN_RT_CTL_PAUSE); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_resume(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* resume the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_terminate_all(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&uc->vc.lock, flags); + + if (udma_is_chan_running(uc)) + udma_stop(uc); + + if (uc->desc) { + uc->terminated_desc = uc->desc; + uc->desc = NULL; + uc->terminated_desc->terminated = true; + cancel_delayed_work(&uc->tx_drain.work); + } + + uc->paused = false; + + vchan_get_all_descriptors(&uc->vc, &head); + spin_unlock_irqrestore(&uc->vc.lock, flags); + vchan_dma_desc_free_list(&uc->vc, &head); + + return 0; +} + +static void udma_synchronize(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long timeout = msecs_to_jiffies(1000); + + vchan_synchronize(&uc->vc); + + if (uc->state == UDMA_CHAN_IS_TERMINATING) { + timeout = wait_for_completion_timeout(&uc->teardown_completed, + timeout); + if (!timeout) { + dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", + uc->id); + udma_dump_chan_stdata(uc); + udma_reset_chan(uc, true); + } + } + + udma_reset_chan(uc, false); + if (udma_is_chan_running(uc)) + dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); + + cancel_delayed_work_sync(&uc->tx_drain.work); + udma_reset_rings(uc); +} + +static void udma_desc_pre_callback(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, + struct dmaengine_result *result) +{ + struct udma_chan *uc = to_udma_chan(&vc->chan); + struct udma_desc *d; + + if (!vd) + return; + + d = to_udma_desc(&vd->tx); + + if (d->metadata_size) + udma_fetch_epib(uc, d); + + /* Provide residue information for the client */ + if (result) { + void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); + + if (cppi5_desc_get_type(desc_vaddr) == + CPPI5_INFO0_DESC_TYPE_VAL_HOST) { + result->residue = d->residue - + cppi5_hdesc_get_pktlen(desc_vaddr); + if (result->residue) + result->result = DMA_TRANS_ABORTED; + else + result->result = DMA_TRANS_NOERROR; + } else { + result->residue = 0; + result->result = DMA_TRANS_NOERROR; + } + } +} + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void udma_vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd, *_vd; + struct dmaengine_desc_callback cb; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + dmaengine_desc_get_callback(&vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); + } + spin_unlock_irq(&vc->lock); + + udma_desc_pre_callback(vc, vd, NULL); + dmaengine_desc_callback_invoke(&cb, NULL); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct dmaengine_result result; + + dmaengine_desc_get_callback(&vd->tx, &cb); + + list_del(&vd->node); + + udma_desc_pre_callback(vc, vd, &result); + dmaengine_desc_callback_invoke(&cb, &result); + + vchan_vdesc_fini(vd); + } +} + +static void udma_free_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + + udma_terminate_all(chan); + if (uc->terminated_desc) { + udma_reset_chan(uc, false); + udma_reset_rings(uc); + } + + cancel_delayed_work_sync(&uc->tx_drain.work); + destroy_delayed_work_on_stack(&uc->tx_drain.work); + + if (uc->irq_num_ring > 0) { + free_irq(uc->irq_num_ring, uc); + + uc->irq_num_ring = 0; + } + if (uc->irq_num_udma > 0) { + free_irq(uc->irq_num_udma, uc); + + uc->irq_num_udma = 0; + } + + /* Release PSI-L pairing */ + if (uc->psil_paired) { + navss_psil_unpair(ud, uc->config.src_thread, + uc->config.dst_thread); + uc->psil_paired = false; + } + + vchan_free_chan_resources(&uc->vc); + tasklet_kill(&uc->vc.task); + + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } +} + +static struct platform_driver udma_driver; + +static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) +{ + struct udma_chan_config *ucc; + struct psil_endpoint_config *ep_config; + struct udma_chan *uc; + struct udma_dev *ud; + u32 *args; + + if (chan->device->dev->driver != &udma_driver.driver) + return false; + + uc = to_udma_chan(chan); + ucc = &uc->config; + ud = uc->ud; + args = param; + + ucc->remote_thread_id = args[0]; + + if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) + ucc->dir = DMA_MEM_TO_DEV; + else + ucc->dir = DMA_DEV_TO_MEM; + + ep_config = psil_get_ep_config(ucc->remote_thread_id); + if (IS_ERR(ep_config)) { + dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", + ucc->remote_thread_id); + ucc->dir = DMA_MEM_TO_MEM; + ucc->remote_thread_id = -1; + return false; + } + + ucc->pkt_mode = ep_config->pkt_mode; + ucc->channel_tpl = ep_config->channel_tpl; + ucc->notdpkt = ep_config->notdpkt; + ucc->ep_type = ep_config->ep_type; + + if (ucc->ep_type != PSIL_EP_NATIVE) { + const struct udma_match_data *match_data = ud->match_data; + + if (match_data->flags & UDMA_FLAG_PDMA_ACC32) + ucc->enable_acc32 = ep_config->pdma_acc32; + if (match_data->flags & UDMA_FLAG_PDMA_BURST) + ucc->enable_burst = ep_config->pdma_burst; + } + + ucc->needs_epib = ep_config->needs_epib; + ucc->psd_size = ep_config->psd_size; + ucc->metadata_size = + (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + + ucc->psd_size; + + if (ucc->pkt_mode) + ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + + ucc->metadata_size, ud->desc_align); + + dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, + ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); + + return true; +} + +static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct udma_dev *ud = ofdma->of_dma_data; + dma_cap_mask_t mask = ud->ddev.cap_mask; + struct dma_chan *chan; + + if (dma_spec->args_count != 1) + return NULL; + + chan = __dma_request_channel(&mask, udma_dma_filter_fn, + &dma_spec->args[0], ofdma->of_node); + if (!chan) { + dev_err(ud->dev, "get channel fail in %s.\n", __func__); + return ERR_PTR(-EINVAL); + } + + return chan; +} + +static struct udma_match_data am654_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 8, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data am654_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = true, /* TEST: DMA domains */ + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 3, + .level_start_idx = { + [0] = 16, /* Normal channels */ + [1] = 4, /* High Throughput channels */ + [2] = 0, /* Ultra High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static const struct of_device_id udma_of_match[] = { + { + .compatible = "ti,am654-navss-main-udmap", + .data = &am654_main_data, + }, + { + .compatible = "ti,am654-navss-mcu-udmap", + .data = &am654_mcu_data, + }, { + .compatible = "ti,j721e-navss-main-udmap", + .data = &j721e_main_data, + }, { + .compatible = "ti,j721e-navss-mcu-udmap", + .data = &j721e_mcu_data, + }, + { /* Sentinel */ }, +}; + +static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) +{ + struct resource *res; + int i; + + for (i = 0; i < MMR_LAST; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mmr_names[i]); + ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ud->mmrs[i])) + return PTR_ERR(ud->mmrs[i]); + } + + return 0; +} + +static int udma_setup_resources(struct udma_dev *ud) +{ + struct device *dev = ud->dev; + int ch_count, ret, i, j; + u32 cap2, cap3; + struct ti_sci_resource_desc *rm_desc; + struct ti_sci_resource *rm_res, irq_res; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + static const char * const range_names[] = { "ti,sci-rm-range-tchan", + "ti,sci-rm-range-rchan", + "ti,sci-rm-range-rflow" }; + + cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); + cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); + + ud->rflow_cnt = cap3 & 0x3fff; + ud->tchan_cnt = cap2 & 0x1ff; + ud->echan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + ch_count = ud->tchan_cnt + ud->rchan_cnt; + + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_gp_map_allocated = devm_kcalloc(dev, + BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + + if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || + !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || + !ud->rflows || !ud->rflow_in_use) + return -ENOMEM; + + /* + * RX flows with the same Ids as RX channels are reserved to be used + * as default flows if remote HW can't generate flow_ids. Those + * RX flows can be requested only explicitly by id. + */ + bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); + + /* by default no GP rflows are assigned to Linux */ + bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + irq_res.sets = rm_res->sets; + + /* rchan and matching default flow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + irq_res.sets += rm_res->sets; + irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + for (i = 0; i < rm_res->sets; i++) { + irq_res.desc[i].start = rm_res->desc[i].start; + irq_res.desc[i].num = rm_res->desc[i].num; + } + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + for (j = 0; j < rm_res->sets; j++, i++) { + irq_res.desc[i].start = rm_res->desc[j].start + + ud->match_data->rchan_oes_offset; + irq_res.desc[i].num = rm_res->desc[j].num; + } + ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); + kfree(irq_res.desc); + if (ret) { + dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); + return ret; + } + + /* GP rflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; + if (IS_ERR(rm_res)) { + /* all gp flows are assigned exclusively to Linux */ + bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, + ud->rflow_cnt - ud->rchan_cnt); + } else { + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rflow_gp_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); + ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); + if (!ch_count) + return -ENODEV; + + ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), + GFP_KERNEL); + if (!ud->channels) + return -ENOMEM; + + dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt), + ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, + ud->rflow_cnt)); + + return ch_count; +} + +#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +static int udma_probe(struct platform_device *pdev) +{ + struct device_node *navss_node = pdev->dev.parent->of_node; + struct device *dev = &pdev->dev; + struct udma_dev *ud; + const struct of_device_id *match; + int i, ret; + int ch_count; + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) + dev_err(dev, "failed to set dma mask stuff\n"); + + ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); + if (!ud) + return -ENOMEM; + + ret = udma_get_mmrs(pdev, ud); + if (ret) + return ret; + + ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); + if (IS_ERR(ud->tisci_rm.tisci)) + return PTR_ERR(ud->tisci_rm.tisci); + + ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); + return ret; + } + pdev->id = ud->tisci_rm.tisci_dev_id; + + ret = of_property_read_u32(navss_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_navss_dev_id); + if (ret) { + dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); + return ret; + } + + ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; + ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; + + ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); + if (IS_ERR(ud->ringacc)) + return PTR_ERR(ud->ringacc); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + match = of_match_node(udma_of_match, dev->of_node); + if (!match) { + dev_err(dev, "No compatible match found\n"); + return -ENODEV; + } + ud->match_data = match->data; + + dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); + + ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources; + ud->ddev.device_config = udma_slave_config; + ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; + ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; + ud->ddev.device_issue_pending = udma_issue_pending; + ud->ddev.device_tx_status = udma_tx_status; + ud->ddev.device_pause = udma_pause; + ud->ddev.device_resume = udma_resume; + ud->ddev.device_terminate_all = udma_terminate_all; + ud->ddev.device_synchronize = udma_synchronize; + + ud->ddev.device_free_chan_resources = udma_free_chan_resources; + ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES; + ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | + DESC_METADATA_ENGINE; + if (ud->match_data->enable_memcpy_support) { + dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); + ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; + ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); + } + + ud->ddev.dev = dev; + ud->dev = dev; + ud->psil_base = ud->match_data->psil_base; + + INIT_LIST_HEAD(&ud->ddev.channels); + INIT_LIST_HEAD(&ud->desc_to_purge); + + ch_count = udma_setup_resources(ud); + if (ch_count <= 0) + return ch_count; + + spin_lock_init(&ud->lock); + INIT_WORK(&ud->purge_work, udma_purge_desc_work); + + ud->desc_align = 64; + if (ud->desc_align < dma_get_cache_alignment()) + ud->desc_align = dma_get_cache_alignment(); + + for (i = 0; i < ud->tchan_cnt; i++) { + struct udma_tchan *tchan = &ud->tchans[i]; + + tchan->id = i; + tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rchan_cnt; i++) { + struct udma_rchan *rchan = &ud->rchans[i]; + + rchan->id = i; + rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rflow_cnt; i++) { + struct udma_rflow *rflow = &ud->rflows[i]; + + rflow->id = i; + } + + for (i = 0; i < ch_count; i++) { + struct udma_chan *uc = &ud->channels[i]; + + uc->ud = ud; + uc->vc.desc_free = udma_desc_free; + uc->id = i; + uc->tchan = NULL; + uc->rchan = NULL; + uc->config.remote_thread_id = -1; + uc->config.dir = DMA_MEM_TO_MEM; + uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", + dev_name(dev), i); + + vchan_init(&uc->vc, &ud->ddev); + /* Use custom vchan completion handling */ + tasklet_init(&uc->vc.task, udma_vchan_complete, + (unsigned long)&uc->vc); + init_completion(&uc->teardown_completed); + } + + ret = dma_async_device_register(&ud->ddev); + if (ret) { + dev_err(dev, "failed to register slave DMA engine: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, ud); + + ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); + if (ret) { + dev_err(dev, "failed to register of_dma controller\n"); + dma_async_device_unregister(&ud->ddev); + } + + return ret; +} + +static struct platform_driver udma_driver = { + .driver = { + .name = "ti-udma", + .of_match_table = udma_of_match, + .suppress_bind_attrs = true, + }, + .probe = udma_probe, +}; +builtin_platform_driver(udma_driver); + +/* Private interfaces to UDMA */ +#include "k3-udma-private.c" diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h new file mode 100644 index 000000000000..128d8744a435 --- /dev/null +++ b/drivers/dma/ti/k3-udma.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_UDMA_H_ +#define K3_UDMA_H_ + +#include <linux/soc/ti/ti_sci_protocol.h> + +/* Global registers */ +#define UDMA_REV_REG 0x0 +#define UDMA_PERF_CTL_REG 0x4 +#define UDMA_EMU_CTL_REG 0x8 +#define UDMA_PSIL_TO_REG 0x10 +#define UDMA_UTC_CTL_REG 0x1c +#define UDMA_CAP_REG(i) (0x20 + ((i) * 4)) +#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80 +#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88 + +/* TX chan RT regs */ +#define UDMA_TCHAN_RT_CTL_REG 0x0 +#define UDMA_TCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_TCHAN_RT_STDATA_REG 0x80 + +#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_TCHAN_RT_PEER_BCNT_REG \ + UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_TCHAN_RT_PEER_RT_EN_REG \ + UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_TCHAN_RT_PCNT_REG 0x400 +#define UDMA_TCHAN_RT_BCNT_REG 0x408 +#define UDMA_TCHAN_RT_SBCNT_REG 0x410 + +/* RX chan RT regs */ +#define UDMA_RCHAN_RT_CTL_REG 0x0 +#define UDMA_RCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_RCHAN_RT_STDATA_REG 0x80 + +#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_RCHAN_RT_PEER_BCNT_REG \ + UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_RCHAN_RT_PEER_RT_EN_REG \ + UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_RCHAN_RT_PCNT_REG 0x400 +#define UDMA_RCHAN_RT_BCNT_REG 0x408 +#define UDMA_RCHAN_RT_SBCNT_REG 0x410 + +/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */ +#define UDMA_CHAN_RT_CTL_EN BIT(31) +#define UDMA_CHAN_RT_CTL_TDOWN BIT(30) +#define UDMA_CHAN_RT_CTL_PAUSE BIT(29) +#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28) +#define UDMA_CHAN_RT_CTL_ERROR BIT(0) + +/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */ +#define UDMA_PEER_RT_EN_ENABLE BIT(31) +#define UDMA_PEER_RT_EN_TEARDOWN BIT(30) +#define UDMA_PEER_RT_EN_PAUSE BIT(29) +#define UDMA_PEER_RT_EN_FLUSH BIT(28) +#define UDMA_PEER_RT_EN_IDLE BIT(1) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG + */ +#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24) +#define PDMA_STATIC_TR_X_SHIFT (24) +#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Y_SHIFT (0) + +#define PDMA_STATIC_TR_Y(x) \ + (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK) +#define PDMA_STATIC_TR_X(x) \ + (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK) + +#define PDMA_STATIC_TR_XY_ACC32 BIT(30) +#define PDMA_STATIC_TR_XY_BURST BIT(31) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG + */ +#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask)) + +struct udma_dev; +struct udma_tchan; +struct udma_rchan; +struct udma_rflow; + +enum udma_rm_range { + RM_RANGE_TCHAN = 0, + RM_RANGE_RCHAN, + RM_RANGE_RFLOW, + RM_RANGE_LAST, +}; + +struct udma_tisci_rm { + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_udmap_ops *tisci_udmap_ops; + u32 tisci_dev_id; + + /* tisci information for PSI-L thread pairing/unpairing */ + const struct ti_sci_rm_psil_ops *tisci_psil_ops; + u32 tisci_navss_dev_id; + + struct ti_sci_resource *rm_ranges[RM_RANGE_LAST]; +}; + +/* Direct access to UDMA low lever resources for the glue layer */ +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread); +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property); +void xudma_dev_put(struct udma_dev *ud); +u32 xudma_dev_get_psil_base(struct udma_dev *ud); +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt); +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt); + +struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id); +struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id); +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id); + +void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p); +void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p); +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p); + +int xudma_tchan_get_id(struct udma_tchan *p); +int xudma_rchan_get_id(struct udma_rchan *p); +int xudma_rflow_get_id(struct udma_rflow *p); + +u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg); +void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val); +u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg); +void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val); +bool xudma_rflow_is_gp(struct udma_dev *ud, int id); + +#endif /* K3_UDMA_H_ */ diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index ec4adf4260a0..23e33a85f033 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); - vchan_vdesc_fini(vd); - dmaengine_desc_callback_invoke(&cb, &vd->tx_result); + vchan_vdesc_fini(vd); } } @@ -115,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) struct virt_dma_desc *vd, *_vd; list_for_each_entry_safe(vd, _vd, head, node) { - if (dmaengine_desc_test_reuse(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + vchan_vdesc_fini(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -135,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); + INIT_LIST_HEAD(&vc->desc_terminated); tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index ab158bac03a7..e9f5250fbe4d 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -31,9 +31,9 @@ struct virt_dma_chan { struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; + struct list_head desc_terminated; struct virt_dma_desc *cyclic; - struct virt_dma_desc *vd_terminated; }; static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) @@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - if (dmaengine_desc_test_reuse(&vd->tx)) + if (dmaengine_desc_test_reuse(&vd->tx)) { + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); list_add(&vd->node, &vc->desc_allocated); - else + spin_unlock_irqrestore(&vc->lock, flags); + } else { vc->desc_free(vd); + } } /** @@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - /* free up stuck descriptor */ - if (vc->vd_terminated) - vchan_vdesc_fini(vc->vd_terminated); + list_add_tail(&vd->node, &vc->desc_terminated); - vc->vd_terminated = vd; if (vc->cyclic == vd) vc->cyclic = NULL; } @@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); + list_splice_tail_init(&vc->desc_terminated, head); } static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) @@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) */ static inline void vchan_synchronize(struct virt_dma_chan *vc) { + LIST_HEAD(head); unsigned long flags; tasklet_kill(&vc->task); spin_lock_irqsave(&vc->lock, flags); - if (vc->vd_terminated) { - vchan_vdesc_fini(vc->vd_terminated); - vc->vd_terminated = NULL; - } + + list_splice_tail_init(&vc->desc_terminated, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); } #endif diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9c845c07b107..d47749a35863 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -123,10 +123,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F @@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } @@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan); - chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); return 0; } @@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 417dad635526..b3c99bb5fe77 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -462,7 +462,7 @@ config EDAC_ALTERA_SDMMC config EDAC_SIFIVE bool "Sifive platform EDAC driver" - depends on EDAC=y && RISCV + depends on EDAC=y && SIFIVE_L2 help Support for error detection and correction on the SiFive SoCs. @@ -491,8 +491,7 @@ config EDAC_TI tristate "Texas Instruments DDR3 ECC Controller" depends on ARCH_KEYSTONE || SOC_DRA7XX help - Support for error detection and correction on the - TI SoCs. + Support for error detection and correction on the TI SoCs. config EDAC_QCOM tristate "QCOM EDAC Controller" diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 428ce98f6776..9fbad908a854 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate) scrubval = scrubrates[i].scrubval; - if (pvt->fam == 0x17 || pvt->fam == 0x18) { + if (pvt->umc) { __f17h_set_scrubval(pvt, scrubval); } else if (pvt->fam == 0x15 && pvt->model == 0x60) { f15h_select_dct(pvt, 0); @@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci) int i, retval = -EINVAL; u32 scrubval = 0; - switch (pvt->fam) { - case 0x15: - /* Erratum #505 */ - if (pvt->model < 0x10) - f15h_select_dct(pvt, 0); - - if (pvt->model == 0x60) - amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); - break; - - case 0x17: - case 0x18: + if (pvt->umc) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); if (scrubval & BIT(0)) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); @@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci) } else { scrubval = 0; } - break; + } else if (pvt->fam == 0x15) { + /* Erratum #505 */ + if (pvt->model < 0x10) + f15h_select_dct(pvt, 0); - default: + if (pvt->model == 0x60) + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); - break; } scrubval = scrubval & 0x001F; @@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; + if (pvt->umc) { + if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) + pvt->dram_type = MEM_LRDDR4; + else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) + pvt->dram_type = MEM_RDDR4; + else + pvt->dram_type = MEM_DDR4; + return; + } + switch (pvt->fam) { case 0xf: if (pvt->ext_model >= K8_REV_F) @@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt) case 0x16: goto ddr3; - case 0x17: - case 0x18: - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - default: WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); pvt->dram_type = MEM_EMPTY; @@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F19_CPUS] = { + .ctl_name = "F19h", + .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6, + .max_mcs = 8, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, }; /* @@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) family_types[F17_CPUS].ctl_name = "F18h"; break; + case 0x19: + fam_type = &family_types[F19_CPUS]; + pvt->ops = &family_types[F19_CPUS].ops; + family_types[F19_CPUS].ctl_name = "F19h"; + break; + default: amd64_err("Unsupported family!\n"); return NULL; @@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid) struct mem_ctl_info *mci; struct amd64_pvt *pvt; - mci = find_mci_by_dev(&F3->dev); - WARN_ON(!mci); - /* Remove from EDAC CORE tracking list */ mci = edac_mc_del_mc(&F3->dev); if (!mci) @@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = { { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { } }; MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 9be31688110b..abbf3c274d74 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -122,6 +122,8 @@ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 +#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 +#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 /* * Function 1 - Address Map @@ -292,6 +294,7 @@ enum amd_families { F17_M10H_CPUS, F17_M30H_CPUS, F17_M70H_CPUS, + F19_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c index 09a9e3de9595..b194658b8b5c 100644 --- a/drivers/edac/aspeed_edac.c +++ b/drivers/edac/aspeed_edac.c @@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (!np) { dev_err(mci->pdev, "dt: missing /memory node\n"); return -ENODEV; - }; + } rc = of_address_to_resource(np, 0, &r); @@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (rc) { dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n"); return rc; - }; + } dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n", r.start, resource_size(&r), PAGE_SHIFT); diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c index f564a4a8a4ae..5c1eea96230c 100644 --- a/drivers/edac/i3000_edac.c +++ b/drivers/edac/i3000_edac.c @@ -324,7 +324,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); mchbar &= I3000_MCHBAR_MASK; - window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE); + window = ioremap(mchbar, I3000_MMR_WINDOW_SIZE); if (!window) { printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n", mchbar); diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index 432b375a4075..a8988db6d423 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c @@ -280,7 +280,7 @@ static void __iomem *i3200_map_mchbar(struct pci_dev *pdev) return NULL; } - window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); + window = ioremap(u.mchbar, I3200_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 0ddc41e47a96..191aa7c19ded 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a) return a & ((1 << 16) - 1); } -static inline u32 i5100_redmemb_ecc_locator(u32 a) -{ - return a & ((1 << 18) - 1); -} - static inline u32 i5100_recmema_merr(u32 a) { return i5100_nrecmema_merr(a); @@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, u32 dw; u32 dw2; unsigned syndrome = 0; - unsigned ecc_loc = 0; unsigned merr; unsigned bank; unsigned rank; @@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); syndrome = dw2; pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); - ecc_loc = i5100_redmemb_ecc_locator(dw2); } if (i5100_validlog_recmemvalid(dw)) { diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 7c6a2d4d2360..6be99e0d850d 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c @@ -485,7 +485,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) goto fail0; } mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ - mch_window = ioremap_nocache(mchbar, 0x1000); + mch_window = ioremap(mchbar, 0x1000); if (!mch_window) { edac_dbg(3, "error ioremapping MCHBAR!\n"); goto fail0; diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 4f65073f230b..d68346a8e141 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -357,7 +357,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev) return NULL; } - window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE); + window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE); if (!window) ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ea622c6f3a39..ea980c556f2e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -6,7 +6,7 @@ #include "mce_amd.h" -static struct amd_decoder_ops *fam_ops; +static struct amd_decoder_ops fam_ops; static u8 xec_mask = 0xf; @@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = { "L2 Fill Data error", }; +static const char * const smca_ls2_mce_desc[] = { + "An ECC error was detected on a data cache read by a probe or victimization", + "An ECC error or L2 poison was detected on a data cache read by a load", + "An ECC error was detected on a data cache read-modify-write by a store", + "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization", + "An ECC error or poison bit mismatch was detected on a tag read by a load", + "An ECC error or poison bit mismatch was detected on a tag read by a store", + "An ECC error was detected on an EMEM read by a load", + "An ECC error was detected on an EMEM read-modify-write by a store", + "A parity error was detected in an L1 TLB entry by any access", + "A parity error was detected in an L2 TLB entry by any access", + "A parity error was detected in a PWC entry by any access", + "A parity error was detected in an STQ entry by any access", + "A parity error was detected in an LDQ entry by any access", + "A parity error was detected in a MAB entry by any access", + "A parity error was detected in an SCB entry state field by any access", + "A parity error was detected in an SCB entry address field by any access", + "A parity error was detected in an SCB entry data field by any access", + "A parity error was detected in a WCB entry by any access", + "A poisoned line was detected in an SCB entry by any access", + "A SystemReadDataError error was reported on read data returned from L2 for a load", + "A SystemReadDataError error was reported on read data returned from L2 for an SCB store", + "A SystemReadDataError error was reported on read data returned from L2 for a WCB store", + "A hardware assertion error was reported", + "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access", +}; + static const char * const smca_if_mce_desc[] = { "Op Cache Microtag Probe Port Parity Error", "IC Microtag or Full Tag Multi-hit Error", @@ -378,6 +405,7 @@ struct smca_mce_desc { static struct smca_mce_desc smca_mce_descs[] = { [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) }, + [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) }, [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) }, [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) }, [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) }, @@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m) : (xec ? "multimatch" : "parity"))); return; } - } else if (fam_ops->mc0_mce(ec, xec)) + } else if (fam_ops.mc0_mce(ec, xec)) ; else pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n"); @@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m) pr_cont("Hardware Assert.\n"); else goto wrong_mc1_mce; - } else if (fam_ops->mc1_mce(ec, xec)) + } else if (fam_ops.mc1_mce(ec, xec)) ; else goto wrong_mc1_mce; @@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m) pr_emerg(HW_ERR "MC2 Error: "); - if (!fam_ops->mc2_mce(ec, xec)) + if (!fam_ops.mc2_mce(ec, xec)) pr_cont(HW_ERR "Corrupted MC2 MCE info?\n"); } @@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) if (m->tsc) pr_emerg(HW_ERR "TSC: %llu\n", m->tsc); - if (!fam_ops) + /* Doesn't matter which member to test. */ + if (!fam_ops.mc0_mce) goto err_code; switch (m->bank) { @@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void) c->x86_vendor != X86_VENDOR_HYGON) return -ENODEV; - fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); - if (!fam_ops) - return -ENOMEM; + if (boot_cpu_has(X86_FEATURE_SMCA)) { + xec_mask = 0x3f; + goto out; + } switch (c->x86) { case 0xf: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x10: - fam_ops->mc0_mce = f10h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f10h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x11: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x12: - fam_ops->mc0_mce = f12h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f12h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x14: - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x15: xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f; - fam_ops->mc0_mce = f15h_mc0_mce; - fam_ops->mc1_mce = f15h_mc1_mce; - fam_ops->mc2_mce = f15h_mc2_mce; + fam_ops.mc0_mce = f15h_mc0_mce; + fam_ops.mc1_mce = f15h_mc1_mce; + fam_ops.mc2_mce = f15h_mc2_mce; break; case 0x16: xec_mask = 0x1f; - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = f16h_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = f16h_mc2_mce; break; case 0x17: case 0x18: - xec_mask = 0x3f; - if (!boot_cpu_has(X86_FEATURE_SMCA)) { - printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n"); - goto err_out; - } - break; + pr_warn("Decoding supported only on Scalable MCA processors.\n"); + return -EINVAL; default: printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86); - goto err_out; + return -EINVAL; } +out: pr_info("MCE: In-kernel MCE decoding enabled.\n"); mce_register_decode_chain(&amd_mce_dec_nb); return 0; - -err_out: - kfree(fam_ops); - fam_ops = NULL; - return -EINVAL; } early_initcall(mce_amd_init); @@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init); static void __exit mce_amd_exit(void) { mce_unregister_decode_chain(&amd_mce_dec_nb); - kfree(fam_ops); } MODULE_DESCRIPTION("AMD MCE decoder"); diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index 413cdb4a591d..3a3dcb14ed99 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -10,7 +10,7 @@ #include <linux/edac.h> #include <linux/platform_device.h> #include "edac_module.h" -#include <asm/sifive_l2_cache.h> +#include <soc/sifive/sifive_l2_cache.h> #define DRVNAME "sifive_edac" @@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev) p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc", 1, 1, NULL, 0, edac_device_alloc_index()); - if (IS_ERR(p->dci)) - return PTR_ERR(p->dci); + if (!p->dci) + return -ENOMEM; p->dci->dev = &pdev->dev; p->dci->mod_name = "Sifive ECC Manager"; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 95662a4ff4c4..99bbaf629b8d 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL); if (!pdev) { - skx_printk(KERN_ERR, "Can't get tolm/tohm\n"); + edac_dbg(2, "Can't get tolm/tohm\n"); return -ENODEV; } diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index cc779f3f9e2d..a65e2f78a402 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c @@ -266,7 +266,7 @@ static void __iomem *x38_map_mchbar(struct pci_dev *pdev) return NULL; } - window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE); + window = ioremap(u.mchbar, X38_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 0cc746673677..6ca2f5ab6c57 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -551,7 +551,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) INIT_LIST_HEAD(&lynx->client_list); kref_init(&lynx->kref); - lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), + lynx->registers = ioremap(pci_resource_start(dev, 0), PCILYNX_MAX_REGISTER); if (lynx->registers == NULL) { dev_err(&dev->dev, "Failed to map registers\n"); diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c index da04fdae62a1..835ece9c00f1 100644 --- a/drivers/firmware/broadcom/bcm47xx_nvram.c +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c @@ -120,7 +120,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) void __iomem *iobase; int err; - iobase = ioremap_nocache(base, lim); + iobase = ioremap(base, lim); if (!iobase) return -ENOMEM; diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index 5b7ef89eb701..ed10da5313e8 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev) fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); if (IS_ERR(fw_shm_pool)) { - tee_client_close_context(pvt_data.ctx); dev_err(pvt_data.dev, "tee_shm_alloc failed\n"); err = PTR_ERR(fw_shm_pool); goto out_sess; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index bcc378c19ebe..ecc83e2f032c 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -215,6 +215,28 @@ config EFI_RCI2_TABLE Say Y here for Dell EMC PowerEdge systems. +config EFI_DISABLE_PCI_DMA + bool "Clear Busmaster bit on PCI bridges during ExitBootServices()" + help + Disable the busmaster bit in the control register on all PCI bridges + while calling ExitBootServices() and passing control to the runtime + kernel. System firmware may configure the IOMMU to prevent malicious + PCI devices from being able to attack the OS via DMA. However, since + firmware can't guarantee that the OS is IOMMU-aware, it will tear + down IOMMU configuration when ExitBootServices() is called. This + leaves a window between where a hostile device could still cause + damage before Linux configures the IOMMU again. + + If you say Y here, the EFI stub will clear the busmaster bit on all + PCI bridges before ExitBootServices() is called. This will prevent + any malicious PCI devices from being able to perform DMA until the + kernel reenables busmastering after configuring the IOMMU. + + This option will cause failures with some poorly behaved hardware + and should not be enabled without testing. The kernel commandline + options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma" + may be used to override this option. + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 904fa09e6a6b..d99f5b0c8a09 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -10,10 +10,12 @@ #define pr_fmt(fmt) "efi: " fmt #include <linux/efi.h> +#include <linux/fwnode.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mm_types.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/platform_device.h> #include <linux/screen_info.h> @@ -276,15 +278,112 @@ void __init efi_init(void) efi_memmap_unmap(); } +static bool efifb_overlaps_pci_range(const struct of_pci_range *range) +{ + u64 fb_base = screen_info.lfb_base; + + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; + + return fb_base >= range->cpu_addr && + fb_base < (range->cpu_addr + range->size); +} + +static struct device_node *find_pci_overlap_node(void) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + struct of_pci_range_parser parser; + struct of_pci_range range; + int err; + + err = of_pci_range_parser_init(&parser, np); + if (err) { + pr_warn("of_pci_range_parser_init() failed: %d\n", err); + continue; + } + + for_each_of_pci_range(&parser, &range) + if (efifb_overlaps_pci_range(&range)) + return np; + } + return NULL; +} + +/* + * If the efifb framebuffer is backed by a PCI graphics controller, we have + * to ensure that this relation is expressed using a device link when + * running in DT mode, or the probe order may be reversed, resulting in a + * resource reservation conflict on the memory window that the efifb + * framebuffer steals from the PCIe host bridge. + */ +static int efifb_add_links(const struct fwnode_handle *fwnode, + struct device *dev) +{ + struct device_node *sup_np; + struct device *sup_dev; + + sup_np = find_pci_overlap_node(); + + /* + * If there's no PCI graphics controller backing the efifb, we are + * done here. + */ + if (!sup_np) + return 0; + + sup_dev = get_dev_from_fwnode(&sup_np->fwnode); + of_node_put(sup_np); + + /* + * Return -ENODEV if the PCI graphics controller device hasn't been + * registered yet. This ensures that efifb isn't allowed to probe + * and this function is retried again when new devices are + * registered. + */ + if (!sup_dev) + return -ENODEV; + + /* + * If this fails, retrying this function at a later point won't + * change anything. So, don't return an error after this. + */ + if (!device_link_add(dev, sup_dev, 0)) + dev_warn(dev, "device_link_add() failed\n"); + + put_device(sup_dev); + + return 0; +} + +static const struct fwnode_operations efifb_fwnode_ops = { + .add_links = efifb_add_links, +}; + +static struct fwnode_handle efifb_fwnode = { + .ops = &efifb_fwnode_ops, +}; + static int __init register_gop_device(void) { - void *pd; + struct platform_device *pd; + int err; if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) return 0; - pd = platform_device_register_data(NULL, "efi-framebuffer", 0, - &screen_info, sizeof(screen_info)); - return PTR_ERR_OR_ZERO(pd); + pd = platform_device_alloc("efi-framebuffer", 0); + if (!pd) + return -ENOMEM; + + if (IS_ENABLED(CONFIG_PCI)) + pd->dev.fwnode = &efifb_fwnode; + + err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); + if (err) + return err; + + return platform_device_add(pd); } subsys_initcall(register_gop_device); diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index b1395133389e..d3067cbd5114 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/highmem.h> +#include <linux/io.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/efi.h> diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c index c9a0efca17b0..5d4f84781aa0 100644 --- a/drivers/firmware/efi/earlycon.c +++ b/drivers/firmware/efi/earlycon.c @@ -13,18 +13,58 @@ #include <asm/early_ioremap.h> +static const struct console *earlycon_console __initdata; static const struct font_desc *font; static u32 efi_x, efi_y; static u64 fb_base; -static pgprot_t fb_prot; +static bool fb_wb; +static void *efi_fb; + +/* + * EFI earlycon needs to use early_memremap() to map the framebuffer. + * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon', + * memremap() should be used instead. memremap() will be available after + * paging_init() which is earlier than initcall callbacks. Thus adding this + * early initcall function early_efi_map_fb() to map the whole EFI framebuffer. + */ +static int __init efi_earlycon_remap_fb(void) +{ + /* bail if there is no bootconsole or it has been disabled already */ + if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED)) + return 0; + + efi_fb = memremap(fb_base, screen_info.lfb_size, + fb_wb ? MEMREMAP_WB : MEMREMAP_WC); + + return efi_fb ? 0 : -ENOMEM; +} +early_initcall(efi_earlycon_remap_fb); + +static int __init efi_earlycon_unmap_fb(void) +{ + /* unmap the bootconsole fb unless keep_bootcon has left it enabled */ + if (efi_fb && !(earlycon_console->flags & CON_ENABLED)) + memunmap(efi_fb); + return 0; +} +late_initcall(efi_earlycon_unmap_fb); static __ref void *efi_earlycon_map(unsigned long start, unsigned long len) { + pgprot_t fb_prot; + + if (efi_fb) + return efi_fb + start; + + fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL); return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot)); } static __ref void efi_earlycon_unmap(void *addr, unsigned long len) { + if (efi_fb) + return; + early_memunmap(addr, len); } @@ -176,10 +216,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) fb_base |= (u64)screen_info.ext_lfb_base << 32; - if (opt && !strcmp(opt, "ram")) - fb_prot = PAGE_KERNEL; - else - fb_prot = pgprot_writecombine(PAGE_KERNEL); + fb_wb = opt && !strcmp(opt, "ram"); si = &screen_info; xres = si->lfb_width; @@ -201,6 +238,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, efi_earlycon_scroll_up(); device->con->write = efi_earlycon_write; + earlycon_console = device->con; return 0; } EARLYCON_DECLARE(efifb, efi_earlycon_setup); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index d101f072c8f8..621220ab3d0e 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -681,7 +681,7 @@ device_initcall(efi_load_efivars); { name }, \ { prop }, \ offsetof(struct efi_fdt_params, field), \ - FIELD_SIZEOF(struct efi_fdt_params, field) \ + sizeof_field(struct efi_fdt_params, field) \ } struct params { @@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr) * * Search in the EFI memory map for the region covering @phys_addr. * Returns the EFI memory type if the region was found in the memory - * map, EFI_RESERVED_TYPE (zero) otherwise. + * map, -EINVAL otherwise. */ int efi_mem_type(unsigned long phys_addr) { @@ -979,6 +979,24 @@ static int __init efi_memreserve_map_root(void) return 0; } +static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) +{ + struct resource *res, *parent; + + res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return -ENOMEM; + + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + res->start = addr; + res->end = addr + size - 1; + + /* we expect a conflict with a 'System RAM' region */ + parent = request_resource_conflict(&iomem_resource, res); + return parent ? request_resource(parent, res) : 0; +} + int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) { struct linux_efi_memreserve *rsv; @@ -1003,7 +1021,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) rsv->entry[index].size = size; memunmap(rsv); - return 0; + return efi_mem_reserve_iomem(addr, size); } memunmap(rsv); } @@ -1013,6 +1031,12 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) if (!rsv) return -ENOMEM; + rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); + if (rc) { + free_page((unsigned long)rsv); + return rc; + } + /* * The memremap() call above assumes that a linux_efi_memreserve entry * never crosses a page boundary, so let's ensure that this remains true @@ -1029,7 +1053,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) efi_memreserve_root->next = __pa(rsv); spin_unlock(&efi_mem_reserve_persistent_lock); - return 0; + return efi_mem_reserve_iomem(addr, size); } static int __init efi_memreserve_root_init(void) diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c index bb9fc70d0cfa..6e0f34a38171 100644 --- a/drivers/firmware/efi/fake_mem.c +++ b/drivers/firmware/efi/fake_mem.c @@ -34,46 +34,45 @@ static int __init cmp_fake_mem(const void *x1, const void *x2) return 0; } -void __init efi_fake_memmap(void) +static void __init efi_fake_range(struct efi_mem_range *efi_range) { + struct efi_memory_map_data data = { 0 }; int new_nr_map = efi.memmap.nr_map; efi_memory_desc_t *md; - phys_addr_t new_memmap_phy; void *new_memmap; - int i; - - if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) - return; /* count up the number of EFI memory descriptor */ - for (i = 0; i < nr_fake_mem; i++) { - for_each_efi_memory_desc(md) { - struct range *r = &efi_fake_mems[i].range; - - new_nr_map += efi_memmap_split_count(md, r); - } - } + for_each_efi_memory_desc(md) + new_nr_map += efi_memmap_split_count(md, &efi_range->range); /* allocate memory for new EFI memmap */ - new_memmap_phy = efi_memmap_alloc(new_nr_map); - if (!new_memmap_phy) + if (efi_memmap_alloc(new_nr_map, &data) != 0) return; /* create new EFI memmap */ - new_memmap = early_memremap(new_memmap_phy, - efi.memmap.desc_size * new_nr_map); + new_memmap = early_memremap(data.phys_map, data.size); if (!new_memmap) { - memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map); + __efi_memmap_free(data.phys_map, data.size, data.flags); return; } - for (i = 0; i < nr_fake_mem; i++) - efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]); + efi_memmap_insert(&efi.memmap, new_memmap, efi_range); /* swap into new EFI memmap */ - early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map); + early_memunmap(new_memmap, data.size); + + efi_memmap_install(&data); +} + +void __init efi_fake_memmap(void) +{ + int i; - efi_memmap_install(new_memmap_phy, new_nr_map); + if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) + return; + + for (i = 0; i < nr_fake_mem; i++) + efi_fake_range(&efi_fake_mems[i]); /* print new EFI memmap */ efi_print_memmap(); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index c35f893897e1..98a81576213d 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -39,7 +39,7 @@ OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ - random.o + random.o pci.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 817237ce2420..7bbef4a67350 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -37,16 +37,14 @@ static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; -void efi_char16_printk(efi_system_table_t *sys_table_arg, - efi_char16_t *str) -{ - struct efi_simple_text_output_protocol *out; +static efi_system_table_t *__efistub_global sys_table; - out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out; - out->output_string(out, str); +__pure efi_system_table_t *efi_system_table(void) +{ + return sys_table; } -static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) +static struct screen_info *setup_graphics(void) { efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; efi_status_t status; @@ -55,27 +53,27 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) struct screen_info *si = NULL; size = 0; - status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &gop_proto, NULL, &size, gop_handle); + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + &gop_proto, NULL, &size, gop_handle); if (status == EFI_BUFFER_TOO_SMALL) { - si = alloc_screen_info(sys_table_arg); + si = alloc_screen_info(); if (!si) return NULL; - efi_setup_gop(sys_table_arg, si, &gop_proto, size); + efi_setup_gop(si, &gop_proto, size); } return si; } -void install_memreserve_table(efi_system_table_t *sys_table_arg) +void install_memreserve_table(void) { struct linux_efi_memreserve *rsv; efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; efi_status_t status; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), - (void **)&rsv); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), + (void **)&rsv); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); + pr_efi_err("Failed to allocate memreserve entry!\n"); return; } @@ -83,11 +81,10 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) rsv->size = 0; atomic_set(&rsv->count, 0); - status = efi_call_early(install_configuration_table, - &memreserve_table_guid, - rsv); + status = efi_bs_call(install_configuration_table, + &memreserve_table_guid, rsv); if (status != EFI_SUCCESS) - pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); + pr_efi_err("Failed to install memreserve config table!\n"); } @@ -97,8 +94,7 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) * must be reserved. On failure it is required to free all * all allocations it has made. */ -efi_status_t handle_kernel_image(efi_system_table_t *sys_table, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -110,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * for both archictectures, with the arch-specific code provided in the * handle_kernel_image() function. */ -unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, +unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, unsigned long *image_addr) { efi_loaded_image_t *image; @@ -131,11 +127,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, enum efi_secureboot_mode secure_boot; struct screen_info *si; + sys_table = sys_table_arg; + /* Check if we were booted by the EFI firmware */ if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) goto fail; - status = check_platform_features(sys_table); + status = check_platform_features(); if (status != EFI_SUCCESS) goto fail; @@ -147,13 +145,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, status = sys_table->boottime->handle_protocol(handle, &loaded_image_proto, (void *)&image); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to get loaded image protocol\n"); + pr_efi_err("Failed to get loaded image protocol\n"); goto fail; } - dram_base = get_dram_base(sys_table); + dram_base = get_dram_base(); if (dram_base == EFI_ERROR) { - pr_efi_err(sys_table, "Failed to find DRAM base\n"); + pr_efi_err("Failed to find DRAM base\n"); goto fail; } @@ -162,9 +160,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, * protocol. We are going to copy the command line into the * device tree, so this can be allocated anywhere. */ - cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size); + cmdline_ptr = efi_convert_cmdline(image, &cmdline_size); if (!cmdline_ptr) { - pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n"); + pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n"); goto fail; } @@ -176,25 +174,25 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) efi_parse_options(cmdline_ptr); - pr_efi(sys_table, "Booting Linux Kernel...\n"); + pr_efi("Booting Linux Kernel...\n"); - si = setup_graphics(sys_table); + si = setup_graphics(); - status = handle_kernel_image(sys_table, image_addr, &image_size, + status = handle_kernel_image(image_addr, &image_size, &reserve_addr, &reserve_size, dram_base, image); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to relocate kernel\n"); + pr_efi_err("Failed to relocate kernel\n"); goto fail_free_cmdline; } - efi_retrieve_tpm2_eventlog(sys_table); + efi_retrieve_tpm2_eventlog(); /* Ask the firmware to clear memory on unclean shutdown */ - efi_enable_reset_attack_mitigation(sys_table); + efi_enable_reset_attack_mitigation(); - secure_boot = efi_get_secureboot(sys_table); + secure_boot = efi_get_secureboot(); /* * Unauthenticated device tree data is a security hazard, so ignore @@ -204,39 +202,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) || secure_boot != efi_secureboot_mode_disabled) { if (strstr(cmdline_ptr, "dtb=")) - pr_efi(sys_table, "Ignoring DTB from command line.\n"); + pr_efi("Ignoring DTB from command line.\n"); } else { - status = handle_cmdline_files(sys_table, image, cmdline_ptr, - "dtb=", + status = handle_cmdline_files(image, cmdline_ptr, "dtb=", ~0UL, &fdt_addr, &fdt_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to load device tree!\n"); + pr_efi_err("Failed to load device tree!\n"); goto fail_free_image; } } if (fdt_addr) { - pr_efi(sys_table, "Using DTB from command line\n"); + pr_efi("Using DTB from command line\n"); } else { /* Look for a device tree configuration table entry. */ - fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size); + fdt_addr = (uintptr_t)get_fdt(&fdt_size); if (fdt_addr) - pr_efi(sys_table, "Using DTB from configuration table\n"); + pr_efi("Using DTB from configuration table\n"); } if (!fdt_addr) - pr_efi(sys_table, "Generating empty DTB\n"); + pr_efi("Generating empty DTB\n"); - status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=", + status = handle_cmdline_files(image, cmdline_ptr, "initrd=", efi_get_max_initrd_addr(dram_base, *image_addr), (unsigned long *)&initrd_addr, (unsigned long *)&initrd_size); if (status != EFI_SUCCESS) - pr_efi_err(sys_table, "Failed initrd from command line!\n"); + pr_efi_err("Failed initrd from command line!\n"); - efi_random_get_seed(sys_table); + efi_random_get_seed(); /* hibernation expects the runtime regions to stay in the same place */ if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) { @@ -251,18 +248,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, EFI_RT_VIRTUAL_SIZE; u32 rnd; - status = efi_get_random_bytes(sys_table, sizeof(rnd), - (u8 *)&rnd); + status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd); if (status == EFI_SUCCESS) { virtmap_base = EFI_RT_VIRTUAL_BASE + (((headroom >> 21) * rnd) >> (32 - 21)); } } - install_memreserve_table(sys_table); + install_memreserve_table(); new_fdt_addr = fdt_addr; - status = allocate_new_fdt_and_exit_boot(sys_table, handle, + status = allocate_new_fdt_and_exit_boot(handle, &new_fdt_addr, efi_get_max_fdt_addr(dram_base), initrd_addr, initrd_size, cmdline_ptr, fdt_addr, fdt_size); @@ -275,17 +271,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (status == EFI_SUCCESS) return new_fdt_addr; - pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n"); + pr_efi_err("Failed to update FDT and exit boot services\n"); - efi_free(sys_table, initrd_size, initrd_addr); - efi_free(sys_table, fdt_size, fdt_addr); + efi_free(initrd_size, initrd_addr); + efi_free(fdt_size, fdt_addr); fail_free_image: - efi_free(sys_table, image_size, *image_addr); - efi_free(sys_table, reserve_size, reserve_addr); + efi_free(image_size, *image_addr); + efi_free(reserve_size, reserve_addr); fail_free_cmdline: - free_screen_info(sys_table, si); - efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr); + free_screen_info(si); + efi_free(cmdline_size, (unsigned long)cmdline_ptr); fail: return EFI_ERROR; } diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index 4566640de650..7b2a6382b647 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -7,7 +7,7 @@ #include "efistub.h" -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) +efi_status_t check_platform_features(void) { int block; @@ -18,7 +18,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) /* LPAE kernels need compatible hardware */ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); if (block < 5) { - pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n"); + pr_efi_err("This LPAE kernel is not supported by your CPU\n"); return EFI_UNSUPPORTED; } return EFI_SUCCESS; @@ -26,7 +26,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID; -struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg) +struct screen_info *alloc_screen_info(void) { struct screen_info *si; efi_status_t status; @@ -37,32 +37,31 @@ struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg) * its contents while we hand over to the kernel proper from the * decompressor. */ - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*si), (void **)&si); + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + sizeof(*si), (void **)&si); if (status != EFI_SUCCESS) return NULL; - status = efi_call_early(install_configuration_table, - &screen_info_guid, si); + status = efi_bs_call(install_configuration_table, + &screen_info_guid, si); if (status == EFI_SUCCESS) return si; - efi_call_early(free_pool, si); + efi_bs_call(free_pool, si); return NULL; } -void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si) +void free_screen_info(struct screen_info *si) { if (!si) return; - efi_call_early(install_configuration_table, &screen_info_guid, NULL); - efi_call_early(free_pool, si); + efi_bs_call(install_configuration_table, &screen_info_guid, NULL); + efi_bs_call(free_pool, si); } -static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, - unsigned long dram_base, +static efi_status_t reserve_kernel_base(unsigned long dram_base, unsigned long *reserve_addr, unsigned long *reserve_size) { @@ -92,8 +91,8 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, */ alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE; nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, - EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, + EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); if (status == EFI_SUCCESS) { if (alloc_addr == dram_base) { *reserve_addr = alloc_addr; @@ -119,10 +118,9 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, * released to the OS after ExitBootServices(), the decompressor can * safely overwrite them. */ - status = efi_get_memory_map(sys_table_arg, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, - "reserve_kernel_base(): Unable to retrieve memory map.\n"); + pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n"); return status; } @@ -158,14 +156,13 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, start = max(start, (u64)dram_base); end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE); - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, - (end - start) / EFI_PAGE_SIZE, - &start); + status = efi_bs_call(allocate_pages, + EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, + (end - start) / EFI_PAGE_SIZE, + &start); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, - "reserve_kernel_base(): alloc failed.\n"); + pr_efi_err("reserve_kernel_base(): alloc failed.\n"); goto out; } break; @@ -188,12 +185,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, status = EFI_SUCCESS; out: - efi_call_early(free_pool, memory_map); + efi_bs_call(free_pool, memory_map); return status; } -efi_status_t handle_kernel_image(efi_system_table_t *sys_table, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -221,10 +217,9 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, */ kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE; - status = reserve_kernel_base(sys_table, kernel_base, reserve_addr, - reserve_size); + status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n"); + pr_efi_err("Unable to allocate memory for uncompressed kernel.\n"); return status; } @@ -233,12 +228,11 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * memory window. */ *image_size = image->image_size; - status = efi_relocate_kernel(sys_table, image_addr, *image_size, - *image_size, + status = efi_relocate_kernel(image_addr, *image_size, *image_size, kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to relocate kernel.\n"); - efi_free(sys_table, *reserve_size, *reserve_addr); + pr_efi_err("Failed to relocate kernel.\n"); + efi_free(*reserve_size, *reserve_addr); *reserve_size = 0; return status; } @@ -249,10 +243,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * address at which the zImage is loaded. */ if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) { - pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n"); - efi_free(sys_table, *reserve_size, *reserve_addr); + pr_efi_err("Failed to relocate kernel, no low memory available.\n"); + efi_free(*reserve_size, *reserve_addr); *reserve_size = 0; - efi_free(sys_table, *image_size, *image_addr); + efi_free(*image_size, *image_addr); *image_size = 0; return EFI_LOAD_ERROR; } diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 1550d244e996..2915b44132e6 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -21,7 +21,7 @@ #include "efistub.h" -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) +efi_status_t check_platform_features(void) { u64 tg; @@ -32,16 +32,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) - pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n"); + pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else - pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n"); + pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n"); return EFI_UNSUPPORTED; } return EFI_SUCCESS; } -efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -56,17 +55,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (!nokaslr()) { - status = efi_get_random_bytes(sys_table_arg, - sizeof(phys_seed), + status = efi_get_random_bytes(sizeof(phys_seed), (u8 *)&phys_seed); if (status == EFI_NOT_FOUND) { - pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); + pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); } else if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n"); + pr_efi_err("efi_get_random_bytes() failed\n"); return status; } } else { - pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n"); + pr_efi("KASLR disabled on kernel command line\n"); } } @@ -108,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, * locate the kernel at a randomized offset in physical memory. */ *reserve_size = kernel_memsize + offset; - status = efi_random_alloc(sys_table_arg, *reserve_size, + status = efi_random_alloc(*reserve_size, MIN_KIMG_ALIGN, reserve_addr, (u32)phys_seed); @@ -131,19 +129,19 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, *image_addr = *reserve_addr = preferred_offset; *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN); - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, - *reserve_size / EFI_PAGE_SIZE, - (efi_physical_addr_t *)reserve_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, + *reserve_size / EFI_PAGE_SIZE, + (efi_physical_addr_t *)reserve_addr); } if (status != EFI_SUCCESS) { *reserve_size = kernel_memsize + TEXT_OFFSET; - status = efi_low_alloc(sys_table_arg, *reserve_size, + status = efi_low_alloc(*reserve_size, MIN_KIMG_ALIGN, reserve_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to relocate kernel\n"); + pr_efi_err("Failed to relocate kernel\n"); *reserve_size = 0; return status; } diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index e02579907f2e..74ddfb496140 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -27,24 +27,26 @@ */ #define EFI_READ_CHUNK_SIZE (1024 * 1024) -static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; +static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE; -static int __section(.data) __nokaslr; -static int __section(.data) __quiet; -static int __section(.data) __novamap; -static bool __section(.data) efi_nosoftreserve; +static bool __efistub_global efi_nokaslr; +static bool __efistub_global efi_quiet; +static bool __efistub_global efi_novamap; +static bool __efistub_global efi_nosoftreserve; +static bool __efistub_global efi_disable_pci_dma = + IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); -int __pure nokaslr(void) +bool __pure nokaslr(void) { - return __nokaslr; + return efi_nokaslr; } -int __pure is_quiet(void) +bool __pure is_quiet(void) { - return __quiet; + return efi_quiet; } -int __pure novamap(void) +bool __pure novamap(void) { - return __novamap; + return efi_novamap; } bool __pure __efi_soft_reserve_enabled(void) { @@ -58,7 +60,7 @@ struct file_info { u64 size; }; -void efi_printk(efi_system_table_t *sys_table_arg, char *str) +void efi_printk(char *str) { char *s8; @@ -68,10 +70,10 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str) ch[0] = *s8; if (*s8 == '\n') { efi_char16_t nl[2] = { '\r', 0 }; - efi_char16_printk(sys_table_arg, nl); + efi_char16_printk(nl); } - efi_char16_printk(sys_table_arg, ch); + efi_char16_printk(ch); } } @@ -84,8 +86,7 @@ static inline bool mmap_has_headroom(unsigned long buff_size, return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; } -efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, - struct efi_boot_memmap *map) +efi_status_t efi_get_memory_map(struct efi_boot_memmap *map) { efi_memory_desc_t *m = NULL; efi_status_t status; @@ -96,19 +97,19 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, *map->map_size = *map->desc_size * 32; *map->buff_size = *map->map_size; again: - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - *map->map_size, (void **)&m); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + *map->map_size, (void **)&m); if (status != EFI_SUCCESS) goto fail; *map->desc_size = 0; key = 0; - status = efi_call_early(get_memory_map, map->map_size, m, - &key, map->desc_size, &desc_version); + status = efi_bs_call(get_memory_map, map->map_size, m, + &key, map->desc_size, &desc_version); if (status == EFI_BUFFER_TOO_SMALL || !mmap_has_headroom(*map->buff_size, *map->map_size, *map->desc_size)) { - efi_call_early(free_pool, m); + efi_bs_call(free_pool, m); /* * Make sure there is some entries of headroom so that the * buffer can be reused for a new map after allocations are @@ -122,7 +123,7 @@ again: } if (status != EFI_SUCCESS) - efi_call_early(free_pool, m); + efi_bs_call(free_pool, m); if (map->key_ptr && status == EFI_SUCCESS) *map->key_ptr = key; @@ -135,7 +136,7 @@ fail: } -unsigned long get_dram_base(efi_system_table_t *sys_table_arg) +unsigned long get_dram_base(void) { efi_status_t status; unsigned long map_size, buff_size; @@ -151,7 +152,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) return membase; @@ -164,7 +165,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) } } - efi_call_early(free_pool, map.map); + efi_bs_call(free_pool, map.map); return membase; } @@ -172,8 +173,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) /* * Allocate at the highest possible address that is not above 'max'. */ -efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_high_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long max) { unsigned long map_size, desc_size, buff_size; @@ -191,7 +191,7 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) goto fail; @@ -251,9 +251,8 @@ again: if (!max_addr) status = EFI_NOT_FOUND; else { - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &max_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &max_addr); if (status != EFI_SUCCESS) { max = max_addr; max_addr = 0; @@ -263,7 +262,7 @@ again: *addr = max_addr; } - efi_call_early(free_pool, map); + efi_bs_call(free_pool, map); fail: return status; } @@ -271,8 +270,7 @@ fail: /* * Allocate at the lowest possible address that is not below 'min'. */ -efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, unsigned long *addr, unsigned long min) { unsigned long map_size, desc_size, buff_size; @@ -289,7 +287,7 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) goto fail; @@ -331,9 +329,8 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, if ((start + size) > end) continue; - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &start); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &start); if (status == EFI_SUCCESS) { *addr = start; break; @@ -343,13 +340,12 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, if (i == map_size / desc_size) status = EFI_NOT_FOUND; - efi_call_early(free_pool, map); + efi_bs_call(free_pool, map); fail: return status; } -void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, - unsigned long addr) +void efi_free(unsigned long size, unsigned long addr) { unsigned long nr_pages; @@ -357,12 +353,11 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, return; nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - efi_call_early(free_pages, addr, nr_pages); + efi_bs_call(free_pages, addr, nr_pages); } -static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, - efi_char16_t *filename_16, void **handle, - u64 *file_sz) +static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16, + void **handle, u64 *file_sz) { efi_file_handle_t *h, *fh = __fh; efi_file_info_t *info; @@ -370,81 +365,75 @@ static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, efi_guid_t info_guid = EFI_FILE_INFO_ID; unsigned long info_sz; - status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16, - EFI_FILE_MODE_READ, (u64)0); + status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to open file: "); - efi_char16_printk(sys_table_arg, filename_16); - efi_printk(sys_table_arg, "\n"); + efi_printk("Failed to open file: "); + efi_char16_printk(filename_16); + efi_printk("\n"); return status; } *handle = h; info_sz = 0; - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, - &info_sz, NULL); + status = h->get_info(h, &info_guid, &info_sz, NULL); if (status != EFI_BUFFER_TOO_SMALL) { - efi_printk(sys_table_arg, "Failed to get file info size\n"); + efi_printk("Failed to get file info size\n"); return status; } grow: - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - info_sz, (void **)&info); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz, + (void **)&info); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to alloc mem for file info\n"); + efi_printk("Failed to alloc mem for file info\n"); return status; } - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, - &info_sz, info); + status = h->get_info(h, &info_guid, &info_sz, info); if (status == EFI_BUFFER_TOO_SMALL) { - efi_call_early(free_pool, info); + efi_bs_call(free_pool, info); goto grow; } *file_sz = info->file_size; - efi_call_early(free_pool, info); + efi_bs_call(free_pool, info); if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to get initrd info\n"); + efi_printk("Failed to get initrd info\n"); return status; } -static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr) +static efi_status_t efi_file_read(efi_file_handle_t *handle, + unsigned long *size, void *addr) { - return efi_call_proto(efi_file_handle, read, handle, size, addr); + return handle->read(handle, size, addr); } -static efi_status_t efi_file_close(void *handle) +static efi_status_t efi_file_close(efi_file_handle_t *handle) { - return efi_call_proto(efi_file_handle, close, handle); + return handle->close(handle); } -static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +static efi_status_t efi_open_volume(efi_loaded_image_t *image, efi_file_handle_t **__fh) { efi_file_io_interface_t *io; efi_file_handle_t *fh; efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; efi_status_t status; - void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image, - device_handle, - image); + efi_handle_t handle = image->device_handle; - status = efi_call_early(handle_protocol, handle, - &fs_proto, (void **)&io); + status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to handle fs_proto\n"); + efi_printk("Failed to handle fs_proto\n"); return status; } - status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh); + status = io->open_volume(io, &fh); if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to open volume\n"); + efi_printk("Failed to open volume\n"); else *__fh = fh; @@ -465,11 +454,11 @@ efi_status_t efi_parse_options(char const *cmdline) str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - __nokaslr = 1; + efi_nokaslr = true; str = strstr(cmdline, "quiet"); if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - __quiet = 1; + efi_quiet = true; /* * If no EFI parameters were specified on the cmdline we've got @@ -489,18 +478,28 @@ efi_status_t efi_parse_options(char const *cmdline) while (*str && *str != ' ') { if (!strncmp(str, "nochunk", 7)) { str += strlen("nochunk"); - __chunk_size = -1UL; + efi_chunk_size = -1UL; } if (!strncmp(str, "novamap", 7)) { str += strlen("novamap"); - __novamap = 1; + efi_novamap = true; } if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && !strncmp(str, "nosoftreserve", 7)) { str += strlen("nosoftreserve"); - efi_nosoftreserve = 1; + efi_nosoftreserve = true; + } + + if (!strncmp(str, "disable_early_pci_dma", 21)) { + str += strlen("disable_early_pci_dma"); + efi_disable_pci_dma = true; + } + + if (!strncmp(str, "no_disable_early_pci_dma", 24)) { + str += strlen("no_disable_early_pci_dma"); + efi_disable_pci_dma = false; } /* Group words together, delimited by "," */ @@ -520,8 +519,7 @@ efi_status_t efi_parse_options(char const *cmdline) * We only support loading a file from the same filesystem as * the kernel image. */ -efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +efi_status_t handle_cmdline_files(efi_loaded_image_t *image, char *cmd_line, char *option_string, unsigned long max_addr, unsigned long *load_addr, @@ -570,10 +568,10 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, if (!nr_files) return EFI_SUCCESS; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - nr_files * sizeof(*files), (void **)&files); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + nr_files * sizeof(*files), (void **)&files); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n"); + pr_efi_err("Failed to alloc mem for file handle list\n"); goto fail; } @@ -612,13 +610,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, /* Only open the volume once. */ if (!i) { - status = efi_open_volume(sys_table_arg, image, &fh); + status = efi_open_volume(image, &fh); if (status != EFI_SUCCESS) goto free_files; } - status = efi_file_size(sys_table_arg, fh, filename_16, - (void **)&file->handle, &file->size); + status = efi_file_size(fh, filename_16, (void **)&file->handle, + &file->size); if (status != EFI_SUCCESS) goto close_handles; @@ -633,16 +631,16 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, * so allocate enough memory for all the files. This is used * for loading multiple files. */ - status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000, - &file_addr, max_addr); + status = efi_high_alloc(file_size_total, 0x1000, &file_addr, + max_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n"); + pr_efi_err("Failed to alloc highmem for files\n"); goto close_handles; } /* We've run out of free low memory. */ if (file_addr > max_addr) { - pr_efi_err(sys_table_arg, "We've run out of free low memory\n"); + pr_efi_err("We've run out of free low memory\n"); status = EFI_INVALID_PARAMETER; goto free_file_total; } @@ -655,8 +653,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, while (size) { unsigned long chunksize; - if (IS_ENABLED(CONFIG_X86) && size > __chunk_size) - chunksize = __chunk_size; + if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size) + chunksize = efi_chunk_size; else chunksize = size; @@ -664,7 +662,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, &chunksize, (void *)addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to read file\n"); + pr_efi_err("Failed to read file\n"); goto free_file_total; } addr += chunksize; @@ -676,7 +674,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, } - efi_call_early(free_pool, files); + efi_bs_call(free_pool, files); *load_addr = file_addr; *load_size = file_size_total; @@ -684,13 +682,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, return status; free_file_total: - efi_free(sys_table_arg, file_size_total, file_addr); + efi_free(file_size_total, file_addr); close_handles: for (k = j; k < i; k++) efi_file_close(files[k].handle); free_files: - efi_call_early(free_pool, files); + efi_bs_call(free_pool, files); fail: *load_addr = 0; *load_size = 0; @@ -707,8 +705,7 @@ fail: * address is not available the lowest available address will * be used. */ -efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, - unsigned long *image_addr, +efi_status_t efi_relocate_kernel(unsigned long *image_addr, unsigned long image_size, unsigned long alloc_size, unsigned long preferred_addr, @@ -737,20 +734,19 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, * as possible while respecting the required alignment. */ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &efi_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &efi_addr); new_addr = efi_addr; /* * If preferred address allocation failed allocate as low as * possible. */ if (status != EFI_SUCCESS) { - status = efi_low_alloc_above(sys_table_arg, alloc_size, - alignment, &new_addr, min_addr); + status = efi_low_alloc_above(alloc_size, alignment, &new_addr, + min_addr); } if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n"); + pr_efi_err("Failed to allocate usable memory for kernel.\n"); return status; } @@ -824,8 +820,7 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n) * Size of memory allocated return in *cmd_line_len. * Returns NULL on error. */ -char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) { const u16 *s2; @@ -854,8 +849,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, options_bytes++; /* NUL termination */ - status = efi_high_alloc(sys_table_arg, options_bytes, 0, - &cmdline_addr, MAX_CMDLINE_ADDRESS); + status = efi_high_alloc(options_bytes, 0, &cmdline_addr, + MAX_CMDLINE_ADDRESS); if (status != EFI_SUCCESS) return NULL; @@ -877,24 +872,26 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, * specific structure may be passed to the function via priv. The client * function may be called multiple times. */ -efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, - void *handle, +efi_status_t efi_exit_boot_services(void *handle, struct efi_boot_memmap *map, void *priv, efi_exit_boot_map_processing priv_func) { efi_status_t status; - status = efi_get_memory_map(sys_table_arg, map); + status = efi_get_memory_map(map); if (status != EFI_SUCCESS) goto fail; - status = priv_func(sys_table_arg, map, priv); + status = priv_func(map, priv); if (status != EFI_SUCCESS) goto free_map; - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); + if (efi_disable_pci_dma) + efi_pci_disable_bridge_busmaster(); + + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); if (status == EFI_INVALID_PARAMETER) { /* @@ -911,23 +908,23 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, * to get_memory_map() is expected to succeed here. */ *map->map_size = *map->buff_size; - status = efi_call_early(get_memory_map, - map->map_size, - *map->map, - map->key_ptr, - map->desc_size, - map->desc_ver); + status = efi_bs_call(get_memory_map, + map->map_size, + *map->map, + map->key_ptr, + map->desc_size, + map->desc_ver); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) goto fail; - status = priv_func(sys_table_arg, map, priv); + status = priv_func(map, priv); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) goto fail; - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); } /* exit_boot_services() was called, thus cannot free */ @@ -937,38 +934,31 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, return EFI_SUCCESS; free_map: - efi_call_early(free_pool, *map->map); + efi_bs_call(free_pool, *map->map); fail: return status; } -#define GET_EFI_CONFIG_TABLE(bits) \ -static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \ - efi_guid_t guid) \ -{ \ - efi_system_table_##bits##_t *sys_table; \ - efi_config_table_##bits##_t *tables; \ - int i; \ - \ - sys_table = (typeof(sys_table))_sys_table; \ - tables = (typeof(tables))(unsigned long)sys_table->tables; \ - \ - for (i = 0; i < sys_table->nr_tables; i++) { \ - if (efi_guidcmp(tables[i].guid, guid) != 0) \ - continue; \ - \ - return (void *)(unsigned long)tables[i].table; \ - } \ - \ - return NULL; \ +void *get_efi_config_table(efi_guid_t guid) +{ + unsigned long tables = efi_table_attr(efi_system_table(), tables); + int nr_tables = efi_table_attr(efi_system_table(), nr_tables); + int i; + + for (i = 0; i < nr_tables; i++) { + efi_config_table_t *t = (void *)tables; + + if (efi_guidcmp(t->guid, guid) == 0) + return efi_table_attr(t, table); + + tables += efi_is_native() ? sizeof(efi_config_table_t) + : sizeof(efi_config_table_32_t); + } + return NULL; } -GET_EFI_CONFIG_TABLE(32) -GET_EFI_CONFIG_TABLE(64) -void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) +void efi_char16_printk(efi_char16_t *str) { - if (efi_is_64bit()) - return get_efi_config_table64(sys_table, guid); - else - return get_efi_config_table32(sys_table, guid); + efi_call_proto(efi_table_attr(efi_system_table(), con_out), + output_string, str); } diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 05739ae013c8..c244b165005e 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -25,22 +25,30 @@ #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE #endif -extern int __pure nokaslr(void); -extern int __pure is_quiet(void); -extern int __pure novamap(void); +#ifdef CONFIG_ARM +#define __efistub_global __section(.data) +#else +#define __efistub_global +#endif + +extern bool __pure nokaslr(void); +extern bool __pure is_quiet(void); +extern bool __pure novamap(void); + +extern __pure efi_system_table_t *efi_system_table(void); -#define pr_efi(sys_table, msg) do { \ - if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \ +#define pr_efi(msg) do { \ + if (!is_quiet()) efi_printk("EFI stub: "msg); \ } while (0) -#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) +#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg) -void efi_char16_printk(efi_system_table_t *, efi_char16_t *); +void efi_char16_printk(efi_char16_t *); +void efi_char16_printk(efi_char16_t *); -unsigned long get_dram_base(efi_system_table_t *sys_table_arg); +unsigned long get_dram_base(void); -efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, - void *handle, +efi_status_t allocate_new_fdt_and_exit_boot(void *handle, unsigned long *new_fdt_addr, unsigned long max_addr, u64 initrd_addr, u64 initrd_size, @@ -48,22 +56,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, unsigned long fdt_addr, unsigned long fdt_size); -void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size); +void *get_fdt(unsigned long *fdt_size); void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, unsigned long desc_size, efi_memory_desc_t *runtime_map, int *count); -efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table, - unsigned long size, u8 *out); +efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); -efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed); -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg); +efi_status_t check_platform_features(void); -void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid); +void *get_efi_config_table(efi_guid_t guid); /* Helper macros for the usual case of using simple C variables: */ #ifndef fdt_setprop_inplace_var @@ -76,4 +82,12 @@ void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid); fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var)) #endif +#define get_efi_var(name, vendor, ...) \ + efi_rt_call(get_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + +#define set_efi_var(name, vendor, ...) \ + efi_rt_call(set_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 0bf0190917e0..0a91e5232127 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -16,7 +16,7 @@ #define EFI_DT_ADDR_CELLS_DEFAULT 2 #define EFI_DT_SIZE_CELLS_DEFAULT 2 -static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) +static void fdt_update_cell_size(void *fdt) { int offset; @@ -27,8 +27,7 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); } -static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, +static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, void *fdt, int new_fdt_size, char *cmdline_ptr, u64 initrd_addr, u64 initrd_size) { @@ -40,7 +39,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, /* Do some checks on provided FDT, if it exists: */ if (orig_fdt) { if (fdt_check_header(orig_fdt)) { - pr_efi_err(sys_table, "Device Tree header not valid!\n"); + pr_efi_err("Device Tree header not valid!\n"); return EFI_LOAD_ERROR; } /* @@ -48,7 +47,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, * configuration table: */ if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) { - pr_efi_err(sys_table, "Truncated device tree! foo!\n"); + pr_efi_err("Truncated device tree! foo!\n"); return EFI_LOAD_ERROR; } } @@ -62,7 +61,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, * Any failure from the following function is * non-critical: */ - fdt_update_cell_size(sys_table, fdt); + fdt_update_cell_size(fdt); } } @@ -111,7 +110,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, /* Add FDT entries for EFI runtime services in chosen node. */ node = fdt_subnode_offset(fdt, 0, "chosen"); - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table); + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table()); status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64); if (status) @@ -140,7 +139,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { efi_status_t efi_status; - efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64), + efi_status = efi_get_random_bytes(sizeof(fdt_val64), (u8 *)&fdt_val64); if (efi_status == EFI_SUCCESS) { status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64); @@ -210,8 +209,7 @@ struct exit_boot_struct { void *new_fdt_addr; }; -static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, - struct efi_boot_memmap *map, +static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) { struct exit_boot_struct *p = priv; @@ -244,8 +242,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, * with the final memory map in it. */ -efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, - void *handle, +efi_status_t allocate_new_fdt_and_exit_boot(void *handle, unsigned long *new_fdt_addr, unsigned long max_addr, u64 initrd_addr, u64 initrd_size, @@ -275,19 +272,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, * subsequent allocations adding entries, since they could not affect * the number of EFI_MEMORY_RUNTIME regions. */ - status = efi_get_memory_map(sys_table, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); + pr_efi_err("Unable to retrieve UEFI memory map.\n"); return status; } - pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n"); + pr_efi("Exiting boot services and installing virtual address map...\n"); map.map = &memory_map; - status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN, + status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN, new_fdt_addr, max_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n"); + pr_efi_err("Unable to allocate memory for new device tree.\n"); goto fail; } @@ -295,16 +292,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, * Now that we have done our final memory allocation (and free) * we can get the memory map key needed for exit_boot_services(). */ - status = efi_get_memory_map(sys_table, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) goto fail_free_new_fdt; - status = update_fdt(sys_table, (void *)fdt_addr, fdt_size, + status = update_fdt((void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, initrd_addr, initrd_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to construct new device tree.\n"); + pr_efi_err("Unable to construct new device tree.\n"); goto fail_free_new_fdt; } @@ -313,7 +310,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, priv.runtime_entry_count = &runtime_entry_count; priv.new_fdt_addr = (void *)*new_fdt_addr; - status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); + status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; @@ -322,7 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, return EFI_SUCCESS; /* Install the new virtual address map */ - svam = sys_table->runtime->set_virtual_address_map; + svam = efi_system_table()->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, desc_ver, runtime_map); @@ -350,28 +347,28 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, return EFI_SUCCESS; } - pr_efi_err(sys_table, "Exit boot services failed.\n"); + pr_efi_err("Exit boot services failed.\n"); fail_free_new_fdt: - efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr); + efi_free(MAX_FDT_SIZE, *new_fdt_addr); fail: - sys_table->boottime->free_pool(runtime_map); + efi_system_table()->boottime->free_pool(runtime_map); return EFI_LOAD_ERROR; } -void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size) +void *get_fdt(unsigned long *fdt_size) { void *fdt; - fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID); + fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) return NULL; if (fdt_check_header(fdt) != 0) { - pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n"); + pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n"); return NULL; } *fdt_size = fdt_totalsize(fdt); diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 0101ca4c13b1..55e6b3f286fe 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -10,6 +10,8 @@ #include <asm/efi.h> #include <asm/setup.h> +#include "efistub.h" + static void find_bits(unsigned long mask, u8 *pos, u8 *size) { u8 first, len; @@ -35,7 +37,7 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) static void setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, - struct efi_pixel_bitmask pixel_info, int pixel_format) + efi_pixel_bitmask_t pixel_info, int pixel_format) { if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { si->lfb_depth = 32; @@ -83,189 +85,44 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } } -static efi_status_t -__gop_query32(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_32 *gop32, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_32 *mode; - efi_graphics_output_protocol_query_mode query_mode; - efi_status_t status; - unsigned long m; - - m = gop32->mode; - mode = (struct efi_graphics_output_protocol_mode_32 *)m; - query_mode = (void *)(unsigned long)gop32->query_mode; - - status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - - *fb_base = mode->frame_buffer_base; - return status; -} - -static efi_status_t -setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, - efi_guid_t *proto, unsigned long size, void **gop_handle) +static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, + unsigned long size, void **handles) { - struct efi_graphics_output_protocol_32 *gop32, *first_gop; - unsigned long nr_gops; + efi_graphics_output_protocol_t *gop, *first_gop; u16 width, height; u32 pixels_per_scan_line; u32 ext_lfb_base; - u64 fb_base; - struct efi_pixel_bitmask pixel_info; + efi_physical_addr_t fb_base; + efi_pixel_bitmask_t pixel_info; int pixel_format; - efi_status_t status = EFI_NOT_FOUND; - u32 *handles = (u32 *)(unsigned long)gop_handle; - int i; - - first_gop = NULL; - gop32 = NULL; - - nr_gops = size / sizeof(u32); - for (i = 0; i < nr_gops; i++) { - struct efi_graphics_output_mode_info *info = NULL; - efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; - bool conout_found = false; - void *dummy = NULL; - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; - u64 current_fb_base; - - status = efi_call_early(handle_protocol, h, - proto, (void **)&gop32); - if (status != EFI_SUCCESS) - continue; - - status = efi_call_early(handle_protocol, h, - &conout_proto, &dummy); - if (status == EFI_SUCCESS) - conout_found = true; - - status = __gop_query32(sys_table_arg, gop32, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && - info->pixel_format != PIXEL_BLT_ONLY) { - /* - * Systems that use the UEFI Console Splitter may - * provide multiple GOP devices, not all of which are - * backed by real hardware. The workaround is to search - * for a GOP implementing the ConOut protocol, and if - * one isn't found, to just fall back to the first GOP. - */ - width = info->horizontal_resolution; - height = info->vertical_resolution; - pixel_format = info->pixel_format; - pixel_info = info->pixel_information; - pixels_per_scan_line = info->pixels_per_scan_line; - fb_base = current_fb_base; - - /* - * Once we've found a GOP supporting ConOut, - * don't bother looking any further. - */ - first_gop = gop32; - if (conout_found) - break; - } - } - - /* Did we find any GOPs? */ - if (!first_gop) - goto out; - - /* EFI framebuffer */ - si->orig_video_isVGA = VIDEO_TYPE_EFI; - - si->lfb_width = width; - si->lfb_height = height; - si->lfb_base = fb_base; - - ext_lfb_base = (u64)(unsigned long)fb_base >> 32; - if (ext_lfb_base) { - si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; - si->ext_lfb_base = ext_lfb_base; - } - - si->pages = 1; - - setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format); - - si->lfb_size = si->lfb_linelength * si->lfb_height; - - si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; -} - -static efi_status_t -__gop_query64(efi_system_table_t *sys_table_arg, - struct efi_graphics_output_protocol_64 *gop64, - struct efi_graphics_output_mode_info **info, - unsigned long *size, u64 *fb_base) -{ - struct efi_graphics_output_protocol_mode_64 *mode; - efi_graphics_output_protocol_query_mode query_mode; efi_status_t status; - unsigned long m; - - m = gop64->mode; - mode = (struct efi_graphics_output_protocol_mode_64 *)m; - query_mode = (void *)(unsigned long)gop64->query_mode; - - status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size, - info); - if (status != EFI_SUCCESS) - return status; - - *fb_base = mode->frame_buffer_base; - return status; -} - -static efi_status_t -setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, - efi_guid_t *proto, unsigned long size, void **gop_handle) -{ - struct efi_graphics_output_protocol_64 *gop64, *first_gop; - unsigned long nr_gops; - u16 width, height; - u32 pixels_per_scan_line; - u32 ext_lfb_base; - u64 fb_base; - struct efi_pixel_bitmask pixel_info; - int pixel_format; - efi_status_t status = EFI_NOT_FOUND; - u64 *handles = (u64 *)(unsigned long)gop_handle; + efi_handle_t h; int i; first_gop = NULL; - gop64 = NULL; + gop = NULL; - nr_gops = size / sizeof(u64); - for (i = 0; i < nr_gops; i++) { - struct efi_graphics_output_mode_info *info = NULL; + for_each_efi_handle(h, handles, size, i) { + efi_graphics_output_protocol_mode_t *mode; + efi_graphics_output_mode_info_t *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; void *dummy = NULL; - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; - u64 current_fb_base; + efi_physical_addr_t current_fb_base; - status = efi_call_early(handle_protocol, h, - proto, (void **)&gop64); + status = efi_bs_call(handle_protocol, h, proto, (void **)&gop); if (status != EFI_SUCCESS) continue; - status = efi_call_early(handle_protocol, h, - &conout_proto, &dummy); + status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy); if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query64(sys_table_arg, gop64, &info, &size, - ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found) && + mode = efi_table_attr(gop, mode); + info = efi_table_attr(mode, info); + current_fb_base = efi_table_attr(mode, frame_buffer_base); + + if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may @@ -285,7 +142,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, * Once we've found a GOP supporting ConOut, * don't bother looking any further. */ - first_gop = gop64; + first_gop = gop; if (conout_found) break; } @@ -293,7 +150,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, /* Did we find any GOPs? */ if (!first_gop) - goto out; + return EFI_NOT_FOUND; /* EFI framebuffer */ si->orig_video_isVGA = VIDEO_TYPE_EFI; @@ -315,40 +172,32 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, si->lfb_size = si->lfb_linelength * si->lfb_height; si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; -out: - return status; + + return EFI_SUCCESS; } /* * See if we have Graphics Output Protocol */ -efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, - struct screen_info *si, efi_guid_t *proto, +efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, unsigned long size) { efi_status_t status; void **gop_handle = NULL; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - size, (void **)&gop_handle); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)&gop_handle); if (status != EFI_SUCCESS) return status; - status = efi_call_early(locate_handle, - EFI_LOCATE_BY_PROTOCOL, - proto, NULL, &size, gop_handle); + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL, + &size, gop_handle); if (status != EFI_SUCCESS) goto free_handle; - if (efi_is_64bit()) { - status = setup_gop64(sys_table_arg, si, proto, size, - gop_handle); - } else { - status = setup_gop32(sys_table_arg, si, proto, size, - gop_handle); - } + status = setup_gop(si, proto, size, gop_handle); free_handle: - efi_call_early(free_pool, gop_handle); + efi_bs_call(free_pool, gop_handle); return status; } diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c new file mode 100644 index 000000000000..b025e59b94df --- /dev/null +++ b/drivers/firmware/efi/libstub/pci.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI-related functions used by the EFI stub on multiple + * architectures. + * + * Copyright 2019 Google, LLC + */ + +#include <linux/efi.h> +#include <linux/pci.h> + +#include <asm/efi.h> + +#include "efistub.h" + +void efi_pci_disable_bridge_busmaster(void) +{ + efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; + unsigned long pci_handle_size = 0; + efi_handle_t *pci_handle = NULL; + efi_handle_t handle; + efi_status_t status; + u16 class, command; + int i; + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, + NULL, &pci_handle_size, NULL); + + if (status != EFI_BUFFER_TOO_SMALL) { + if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) + pr_efi_err("Failed to locate PCI I/O handles'\n"); + return; + } + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size, + (void **)&pci_handle); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to allocate memory for 'pci_handle'\n"); + return; + } + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, + NULL, &pci_handle_size, pci_handle); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to locate PCI I/O handles'\n"); + goto free_handle; + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + unsigned long segment_nr, bus_nr, device_nr, func_nr; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != EFI_SUCCESS) + continue; + + /* + * Disregard devices living on bus 0 - these are not behind a + * bridge so no point in disconnecting them from their drivers. + */ + status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr, + &device_nr, &func_nr); + if (status != EFI_SUCCESS || bus_nr == 0) + continue; + + /* + * Don't disconnect VGA controllers so we don't risk losing + * access to the framebuffer. Drivers for true PCIe graphics + * controllers that are behind a PCIe root port do not use + * DMA to implement the GOP framebuffer anyway [although they + * may use it in their implentation of Gop->Blt()], and so + * disabling DMA in the PCI bridge should not interfere with + * normal operation of the device. + */ + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_CLASS_DEVICE, 1, &class); + if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA) + continue; + + /* Disconnect this handle from all its drivers */ + efi_bs_call(disconnect_controller, handle, NULL, NULL); + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != EFI_SUCCESS || !pci) + continue; + + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_CLASS_DEVICE, 1, &class); + + if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI) + continue; + + /* Disable busmastering */ + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_COMMAND, 1, &command); + if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER)) + continue; + + command &= ~PCI_COMMAND_MASTER; + status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16, + PCI_COMMAND, 1, &command); + if (status != EFI_SUCCESS) + pr_efi_err("Failed to disable PCI busmastering\n"); + } + +free_handle: + efi_bs_call(free_pool, pci_handle); +} diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 35edd7cfb6a1..316ce9ff0193 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -9,38 +9,34 @@ #include "efistub.h" -typedef struct efi_rng_protocol efi_rng_protocol_t; - -typedef struct { - u32 get_info; - u32 get_rng; -} efi_rng_protocol_32_t; - -typedef struct { - u64 get_info; - u64 get_rng; -} efi_rng_protocol_64_t; - -struct efi_rng_protocol { - efi_status_t (*get_info)(struct efi_rng_protocol *, - unsigned long *, efi_guid_t *); - efi_status_t (*get_rng)(struct efi_rng_protocol *, - efi_guid_t *, unsigned long, u8 *out); +typedef union efi_rng_protocol efi_rng_protocol_t; + +union efi_rng_protocol { + struct { + efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *, + unsigned long *, + efi_guid_t *); + efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *, + efi_guid_t *, unsigned long, + u8 *out); + }; + struct { + u32 get_info; + u32 get_rng; + } mixed_mode; }; -efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, - unsigned long size, u8 *out) +efi_status_t efi_get_random_bytes(unsigned long size, u8 *out) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_status_t status; - struct efi_rng_protocol *rng; + efi_rng_protocol_t *rng = NULL; - status = efi_call_early(locate_protocol, &rng_proto, NULL, - (void **)&rng); + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; - return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out); + return efi_call_proto(rng, get_rng, NULL, size, out); } /* @@ -81,8 +77,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, */ #define MD_NUM_SLOTS(md) ((md)->virt_addr) -efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, +efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed) @@ -101,7 +96,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, map.key_ptr = NULL; map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) return status; @@ -145,39 +140,38 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, target = round_up(md->phys_addr, align) + target_slot * align; pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, pages, &target); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, pages, &target); if (status == EFI_SUCCESS) *addr = target; break; } - efi_call_early(free_pool, memory_map); + efi_bs_call(free_pool, memory_map); return status; } -efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) +efi_status_t efi_random_get_seed(void) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; - struct efi_rng_protocol *rng; - struct linux_efi_random_seed *seed; + efi_rng_protocol_t *rng = NULL; + struct linux_efi_random_seed *seed = NULL; efi_status_t status; - status = efi_call_early(locate_protocol, &rng_proto, NULL, - (void **)&rng); + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*seed) + EFI_RANDOM_SEED_SIZE, - (void **)&seed); + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + sizeof(*seed) + EFI_RANDOM_SEED_SIZE, + (void **)&seed); if (status != EFI_SUCCESS) return status; - status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw, + status = efi_call_proto(rng, get_rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE, seed->bits); if (status == EFI_UNSUPPORTED) @@ -185,21 +179,20 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) * Use whatever algorithm we have available if the raw algorithm * is not implemented. */ - status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, - EFI_RANDOM_SEED_SIZE, seed->bits); + status = efi_call_proto(rng, get_rng, NULL, + EFI_RANDOM_SEED_SIZE, seed->bits); if (status != EFI_SUCCESS) goto err_freepool; seed->size = EFI_RANDOM_SEED_SIZE; - status = efi_call_early(install_configuration_table, &rng_table_guid, - seed); + status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); if (status != EFI_SUCCESS) goto err_freepool; return EFI_SUCCESS; err_freepool: - efi_call_early(free_pool, seed); + efi_bs_call(free_pool, seed); return status; } diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index edba5e7a3743..a765378ad18c 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode"; static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; -#define get_efi_var(name, vendor, ...) \ - efi_call_runtime(get_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__); - /* * Determine whether we're in secure boot mode. * * Please keep the logic in sync with * arch/x86/xen/efi.c:xen_efi_get_secureboot(). */ -enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) +enum efi_secureboot_mode efi_get_secureboot(void) { u32 attr; u8 secboot, setupmode, moksbstate; @@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) return efi_secureboot_mode_disabled; secure_boot_enabled: - pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n"); + pr_efi("UEFI Secure Boot is enabled.\n"); return efi_secureboot_mode_enabled; out_efi_err: - pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); + pr_efi_err("Could not determine UEFI Secure Boot status.\n"); return efi_secureboot_mode_unknown; } diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index eb9af83e4d59..1d59e103a2e3 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -20,23 +20,13 @@ static const efi_char16_t efi_MemoryOverWriteRequest_name[] = #define MEMORY_ONLY_RESET_CONTROL_GUID \ EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29) -#define get_efi_var(name, vendor, ...) \ - efi_call_runtime(get_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__) - -#define set_efi_var(name, vendor, ...) \ - efi_call_runtime(set_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__) - /* * Enable reboot attack mitigation. This requests that the firmware clear the * RAM on next reboot before proceeding with boot, ensuring that any secrets * are cleared. If userland has ensured that all secrets have been removed * from RAM before reboot it can simply reset this variable. */ -void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) +void efi_enable_reset_attack_mitigation(void) { u8 val = 1; efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID; @@ -57,7 +47,7 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) #endif -void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) +void efi_retrieve_tpm2_eventlog(void) { efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; @@ -69,23 +59,22 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) size_t log_size, last_entry_size; efi_bool_t truncated; int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; - void *tcg2_protocol = NULL; + efi_tcg2_protocol_t *tcg2_protocol = NULL; int final_events_size = 0; - status = efi_call_early(locate_protocol, &tcg2_guid, NULL, - &tcg2_protocol); + status = efi_bs_call(locate_protocol, &tcg2_guid, NULL, + (void **)&tcg2_protocol); if (status != EFI_SUCCESS) return; - status = efi_call_proto(efi_tcg2_protocol, get_event_log, - tcg2_protocol, version, &log_location, - &log_last_entry, &truncated); + status = efi_call_proto(tcg2_protocol, get_event_log, version, + &log_location, &log_last_entry, &truncated); if (status != EFI_SUCCESS || !log_location) { version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; - status = efi_call_proto(efi_tcg2_protocol, get_event_log, - tcg2_protocol, version, &log_location, - &log_last_entry, &truncated); + status = efi_call_proto(tcg2_protocol, get_event_log, version, + &log_location, &log_last_entry, + &truncated); if (status != EFI_SUCCESS || !log_location) return; @@ -126,13 +115,11 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) } /* Allocate space for the logs and copy them. */ - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - sizeof(*log_tbl) + log_size, - (void **) &log_tbl); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + sizeof(*log_tbl) + log_size, (void **)&log_tbl); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, - "Unable to allocate memory for event log\n"); + efi_printk("Unable to allocate memory for event log\n"); return; } @@ -140,8 +127,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) * Figure out whether any events have already been logged to the * final events structure, and if so how much space they take up */ - final_events_table = get_efi_config_table(sys_table_arg, - LINUX_EFI_TPM_FINAL_LOG_GUID); + final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID); if (final_events_table && final_events_table->nr_events) { struct tcg_pcr_event2_head *header; int offset; @@ -169,12 +155,12 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) log_tbl->version = version; memcpy(log_tbl->log, (void *) first_entry_addr, log_size); - status = efi_call_early(install_configuration_table, - &linux_eventlog_guid, log_tbl); + status = efi_bs_call(install_configuration_table, + &linux_eventlog_guid, log_tbl); if (status != EFI_SUCCESS) goto err_free; return; err_free: - efi_call_early(free_pool, log_tbl); + efi_bs_call(free_pool, log_tbl); } diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 38b686c67b17..2ff1883dc788 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -29,9 +29,32 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) return PFN_PHYS(page_to_pfn(p)); } +void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags) +{ + if (flags & EFI_MEMMAP_MEMBLOCK) { + if (slab_is_available()) + memblock_free_late(phys, size); + else + memblock_free(phys, size); + } else if (flags & EFI_MEMMAP_SLAB) { + struct page *p = pfn_to_page(PHYS_PFN(phys)); + unsigned int order = get_order(size); + + free_pages((unsigned long) page_address(p), order); + } +} + +static void __init efi_memmap_free(void) +{ + __efi_memmap_free(efi.memmap.phys_map, + efi.memmap.desc_size * efi.memmap.nr_map, + efi.memmap.flags); +} + /** * efi_memmap_alloc - Allocate memory for the EFI memory map * @num_entries: Number of entries in the allocated map. + * @data: efi memmap installation parameters * * Depending on whether mm_init() has already been invoked or not, * either memblock or "normal" page allocation is used. @@ -39,34 +62,47 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) * Returns the physical address of the allocated memory map on * success, zero on failure. */ -phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) +int __init efi_memmap_alloc(unsigned int num_entries, + struct efi_memory_map_data *data) { - unsigned long size = num_entries * efi.memmap.desc_size; - - if (slab_is_available()) - return __efi_memmap_alloc_late(size); + /* Expect allocation parameters are zero initialized */ + WARN_ON(data->phys_map || data->size); + + data->size = num_entries * efi.memmap.desc_size; + data->desc_version = efi.memmap.desc_version; + data->desc_size = efi.memmap.desc_size; + data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK); + data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE; + + if (slab_is_available()) { + data->flags |= EFI_MEMMAP_SLAB; + data->phys_map = __efi_memmap_alloc_late(data->size); + } else { + data->flags |= EFI_MEMMAP_MEMBLOCK; + data->phys_map = __efi_memmap_alloc_early(data->size); + } - return __efi_memmap_alloc_early(size); + if (!data->phys_map) + return -ENOMEM; + return 0; } /** * __efi_memmap_init - Common code for mapping the EFI memory map * @data: EFI memory map data - * @late: Use early or late mapping function? * * This function takes care of figuring out which function to use to * map the EFI memory map in efi.memmap based on how far into the boot * we are. * - * During bootup @late should be %false since we only have access to - * the early_memremap*() functions as the vmalloc space isn't setup. - * Once the kernel is fully booted we can fallback to the more robust - * memremap*() API. + * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we + * only have access to the early_memremap*() functions as the vmalloc + * space isn't setup. Once the kernel is fully booted we can fallback + * to the more robust memremap*() API. * * Returns zero on success, a negative error code on failure. */ -static int __init -__efi_memmap_init(struct efi_memory_map_data *data, bool late) +static int __init __efi_memmap_init(struct efi_memory_map_data *data) { struct efi_memory_map map; phys_addr_t phys_map; @@ -76,7 +112,7 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) phys_map = data->phys_map; - if (late) + if (data->flags & EFI_MEMMAP_LATE) map.map = memremap(phys_map, data->size, MEMREMAP_WB); else map.map = early_memremap(phys_map, data->size); @@ -86,13 +122,16 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) return -ENOMEM; } + /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */ + efi_memmap_free(); + map.phys_map = data->phys_map; map.nr_map = data->size / data->desc_size; map.map_end = map.map + data->size; map.desc_version = data->desc_version; map.desc_size = data->desc_size; - map.late = late; + map.flags = data->flags; set_bit(EFI_MEMMAP, &efi.flags); @@ -111,9 +150,10 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) int __init efi_memmap_init_early(struct efi_memory_map_data *data) { /* Cannot go backwards */ - WARN_ON(efi.memmap.late); + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); - return __efi_memmap_init(data, false); + data->flags = 0; + return __efi_memmap_init(data); } void __init efi_memmap_unmap(void) @@ -121,7 +161,7 @@ void __init efi_memmap_unmap(void) if (!efi_enabled(EFI_MEMMAP)) return; - if (!efi.memmap.late) { + if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) { unsigned long size; size = efi.memmap.desc_size * efi.memmap.nr_map; @@ -162,13 +202,14 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) struct efi_memory_map_data data = { .phys_map = addr, .size = size, + .flags = EFI_MEMMAP_LATE, }; /* Did we forget to unmap the early EFI memmap? */ WARN_ON(efi.memmap.map); /* Were we already called? */ - WARN_ON(efi.memmap.late); + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); /* * It makes no sense to allow callers to register different @@ -178,13 +219,12 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) data.desc_version = efi.memmap.desc_version; data.desc_size = efi.memmap.desc_size; - return __efi_memmap_init(&data, true); + return __efi_memmap_init(&data); } /** * efi_memmap_install - Install a new EFI memory map in efi.memmap - * @addr: Physical address of the memory map - * @nr_map: Number of entries in the memory map + * @ctx: map allocation parameters (address, size, flags) * * Unlike efi_memmap_init_*(), this function does not allow the caller * to switch from early to late mappings. It simply uses the existing @@ -192,18 +232,11 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) * * Returns zero on success, a negative error code on failure. */ -int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map) +int __init efi_memmap_install(struct efi_memory_map_data *data) { - struct efi_memory_map_data data; - efi_memmap_unmap(); - data.phys_map = addr; - data.size = efi.memmap.desc_size * nr_map; - data.desc_version = efi.memmap.desc_version; - data.desc_size = efi.memmap.desc_size; - - return __efi_memmap_init(&data, efi.memmap.late); + return __efi_memmap_init(data); } /** diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c index 76b0c354a027..de1a9a1f9f14 100644 --- a/drivers/firmware/efi/rci2-table.c +++ b/drivers/firmware/efi/rci2-table.c @@ -81,6 +81,9 @@ static int __init efi_rci2_sysfs_init(void) struct kobject *tables_kobj; int ret = -ENOMEM; + if (rci2_table_phys == EFI_INVALID_TABLE_ADDR) + return 0; + rci2_base = memremap(rci2_table_phys, sizeof(struct rci2_table_global_hdr), MEMREMAP_WB); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 8adffd42f8cb..f57d95a3db02 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -479,6 +479,15 @@ config GPIO_SAMA5D2_PIOBU The difference from regular GPIOs is that they maintain their value during backup/self-refresh. +config GPIO_SIFIVE + bool "SiFive GPIO support" + depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY + select GPIO_GENERIC + select GPIOLIB_IRQCHIP + select REGMAP_MMIO + help + Say yes here to support the GPIO device on SiFive SoCs. + config GPIO_SIOX tristate "SIOX GPIO support" depends on SIOX @@ -553,8 +562,8 @@ config GPIO_TEGRA config GPIO_TEGRA186 tristate "NVIDIA Tegra186 GPIO support" - default ARCH_TEGRA_186_SOC - depends on ARCH_TEGRA_186_SOC || COMPILE_TEST + default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC + depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST depends on OF_GPIO select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY @@ -573,7 +582,6 @@ config GPIO_THUNDERX tristate "Cavium ThunderX/OCTEON-TX GPIO" depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) depends on PCI_MSI - select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY select IRQ_FASTEOI_HIERARCHY_HANDLERS help @@ -1148,6 +1156,7 @@ config GPIO_MADERA config GPIO_MAX77620 tristate "GPIO support for PMIC MAX77620 and MAX20024" depends on MFD_MAX77620 + select GPIOLIB_IRQCHIP help GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor. MAX77620 PMIC has 8 pins that can be configured as GPIOs. The diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 34eb8b2b12dd..11eeeebbde0d 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -124,6 +124,7 @@ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o obj-$(CONFIG_GPIO_SCH) += gpio-sch.o +obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c index 7e99860ca447..8319812593e3 100644 --- a/drivers/gpio/gpio-aspeed-sgpio.c +++ b/drivers/gpio/gpio-aspeed-sgpio.c @@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio, return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS; default: /* acturally if code runs to here, it's an error case */ - BUG_ON(1); + BUG(); } } diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 56d647a30e3e..94b8d3ae27bc 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, mutex_lock(&chip->lock); if (test_bit(FLAG_REQUESTED, &desc->flags) && - !test_bit(FLAG_IS_OUT, &desc->flags)) { + !test_bit(FLAG_IS_OUT, &desc->flags)) { curr = __gpio_mockup_get(chip, offset); if (curr == value) goto out; @@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, irq_type = irq_get_trigger_type(irq); if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) || - (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) + (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) irq_sim_fire(sim, offset); } @@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) int direction; mutex_lock(&chip->lock); - direction = !chip->lines[offset].dir; + direction = chip->lines[offset].dir; mutex_unlock(&chip->lock); return direction; @@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev) struct gpio_chip *gc; struct device *dev; const char *name; - int rv, base; + int rv, base, i; u16 ngpio; dev = &pdev->dev; @@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev) if (!chip->lines) return -ENOMEM; + for (i = 0; i < gc->ngpio; i++) + chip->lines[i].dir = GPIO_LINE_DIRECTION_IN; + if (device_property_read_bool(dev, "named-gpio-lines")) { rv = gpio_mockup_name_lines(dev, chip); if (rv) diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index f1e164cecff8..5ae30de3490a 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) return -ENOMEM; gc = &mpc8xxx_gc->gc; + gc->parent = &pdev->dev; if (of_property_read_bool(np, "little-endian")) { ret = bgpio_init(gc, &pdev->dev, 4, diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 6652bee01966..9853547e7276 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ); + clear_bit(hwirq, chip->irq_mask); } static void pca953x_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ); + set_bit(hwirq, chip->irq_mask); } static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on) @@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); - int bank_nb = d->hwirq / BANK_SZ; - u8 mask = BIT(d->hwirq % BANK_SZ); + irq_hw_number_t hwirq = irqd_to_hwirq(d); if (!(type & IRQ_TYPE_EDGE_BOTH)) { dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", @@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) return -EINVAL; } - if (type & IRQ_TYPE_EDGE_FALLING) - chip->irq_trig_fall[bank_nb] |= mask; - else - chip->irq_trig_fall[bank_nb] &= ~mask; - - if (type & IRQ_TYPE_EDGE_RISING) - chip->irq_trig_raise[bank_nb] |= mask; - else - chip->irq_trig_raise[bank_nb] &= ~mask; + assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING); + assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING); return 0; } @@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pca953x_chip *chip = gpiochip_get_data(gc); - u8 mask = BIT(d->hwirq % BANK_SZ); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask; - chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask; + clear_bit(hwirq, chip->irq_trig_raise); + clear_bit(hwirq, chip->irq_trig_fall); } static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending) diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c new file mode 100644 index 000000000000..147a1bd04515 --- /dev/null +++ b/drivers/gpio/gpio-sifive.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 SiFive + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/of_irq.h> +#include <linux/gpio/driver.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/regmap.h> + +#define SIFIVE_GPIO_INPUT_VAL 0x00 +#define SIFIVE_GPIO_INPUT_EN 0x04 +#define SIFIVE_GPIO_OUTPUT_EN 0x08 +#define SIFIVE_GPIO_OUTPUT_VAL 0x0C +#define SIFIVE_GPIO_RISE_IE 0x18 +#define SIFIVE_GPIO_RISE_IP 0x1C +#define SIFIVE_GPIO_FALL_IE 0x20 +#define SIFIVE_GPIO_FALL_IP 0x24 +#define SIFIVE_GPIO_HIGH_IE 0x28 +#define SIFIVE_GPIO_HIGH_IP 0x2C +#define SIFIVE_GPIO_LOW_IE 0x30 +#define SIFIVE_GPIO_LOW_IP 0x34 +#define SIFIVE_GPIO_OUTPUT_XOR 0x40 + +#define SIFIVE_GPIO_MAX 32 +#define SIFIVE_GPIO_IRQ_OFFSET 7 + +struct sifive_gpio { + void __iomem *base; + struct gpio_chip gc; + struct regmap *regs; + u32 irq_state; + unsigned int trigger[SIFIVE_GPIO_MAX]; + unsigned int irq_parent[SIFIVE_GPIO_MAX]; +}; + +static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset) +{ + unsigned long flags; + unsigned int trigger; + + spin_lock_irqsave(&chip->gc.bgpio_lock, flags); + trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0; + regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset), + (trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0); + regmap_update_bits(chip->regs, SIFIVE_GPIO_FALL_IE, BIT(offset), + (trigger & IRQ_TYPE_EDGE_FALLING) ? BIT(offset) : 0); + regmap_update_bits(chip->regs, SIFIVE_GPIO_HIGH_IE, BIT(offset), + (trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0); + regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset), + (trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0); + spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags); +} + +static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d); + + if (offset < 0 || offset >= gc->ngpio) + return -EINVAL; + + chip->trigger[offset] = trigger; + sifive_gpio_set_ie(chip, offset); + return 0; +} + +static void sifive_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX; + u32 bit = BIT(offset); + unsigned long flags; + + irq_chip_enable_parent(d); + + /* Switch to input */ + gc->direction_input(gc, offset); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + /* Clear any sticky pending interrupts */ + regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + /* Enable interrupts */ + assign_bit(offset, (unsigned long *)&chip->irq_state, 1); + sifive_gpio_set_ie(chip, offset); +} + +static void sifive_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX; + + assign_bit(offset, (unsigned long *)&chip->irq_state, 0); + sifive_gpio_set_ie(chip, offset); + irq_chip_disable_parent(d); +} + +static void sifive_gpio_irq_eoi(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct sifive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX; + u32 bit = BIT(offset); + unsigned long flags; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + /* Clear all pending interrupts */ + regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit); + regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + irq_chip_eoi_parent(d); +} + +static struct irq_chip sifive_gpio_irqchip = { + .name = "sifive-gpio", + .irq_set_type = sifive_gpio_irq_set_type, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_enable = sifive_gpio_irq_enable, + .irq_disable = sifive_gpio_irq_disable, + .irq_eoi = sifive_gpio_irq_eoi, +}; + +static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc, + unsigned int child, + unsigned int child_type, + unsigned int *parent, + unsigned int *parent_type) +{ + *parent_type = IRQ_TYPE_NONE; + *parent = child + SIFIVE_GPIO_IRQ_OFFSET; + return 0; +} + +static const struct regmap_config sifive_gpio_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + .disable_locking = true, +}; + +static int sifive_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct device_node *irq_parent; + struct irq_domain *parent; + struct gpio_irq_chip *girq; + struct sifive_gpio *chip; + int ret, ngpio; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(chip->base)) { + dev_err(dev, "failed to allocate device memory\n"); + return PTR_ERR(chip->base); + } + + chip->regs = devm_regmap_init_mmio(dev, chip->base, + &sifive_gpio_regmap_config); + if (IS_ERR(chip->regs)) + return PTR_ERR(chip->regs); + + ngpio = of_irq_count(node); + if (ngpio >= SIFIVE_GPIO_MAX) { + dev_err(dev, "Too many GPIO interrupts (max=%d)\n", + SIFIVE_GPIO_MAX); + return -ENXIO; + } + + irq_parent = of_irq_find_parent(node); + if (!irq_parent) { + dev_err(dev, "no IRQ parent node\n"); + return -ENODEV; + } + parent = irq_find_host(irq_parent); + if (!parent) { + dev_err(dev, "no IRQ parent domain\n"); + return -ENODEV; + } + + ret = bgpio_init(&chip->gc, dev, 4, + chip->base + SIFIVE_GPIO_INPUT_VAL, + chip->base + SIFIVE_GPIO_OUTPUT_VAL, + NULL, + chip->base + SIFIVE_GPIO_OUTPUT_EN, + chip->base + SIFIVE_GPIO_INPUT_EN, + 0); + if (ret) { + dev_err(dev, "unable to init generic GPIO\n"); + return ret; + } + + /* Disable all GPIO interrupts before enabling parent interrupts */ + regmap_write(chip->regs, SIFIVE_GPIO_RISE_IE, 0); + regmap_write(chip->regs, SIFIVE_GPIO_FALL_IE, 0); + regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IE, 0); + regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0); + chip->irq_state = 0; + + chip->gc.base = -1; + chip->gc.ngpio = ngpio; + chip->gc.label = dev_name(dev); + chip->gc.parent = dev; + chip->gc.owner = THIS_MODULE; + girq = &chip->gc.irq; + girq->chip = &sifive_gpio_irqchip; + girq->fwnode = of_node_to_fwnode(node); + girq->parent_domain = parent; + girq->child_to_parent_hwirq = sifive_gpio_child_to_parent_hwirq; + girq->handler = handle_bad_irq; + girq->default_type = IRQ_TYPE_NONE; + + platform_set_drvdata(pdev, chip); + return gpiochip_add_data(&chip->gc, chip); +} + +static const struct of_device_id sifive_gpio_match[] = { + { .compatible = "sifive,gpio0" }, + { .compatible = "sifive,fu540-c000-gpio" }, + { }, +}; + +static struct platform_driver sifive_gpio_driver = { + .probe = sifive_gpio_probe, + .driver = { + .name = "sifive_gpio", + .of_match_table = of_match_ptr(sifive_gpio_match), + }, +}; +builtin_platform_driver(sifive_gpio_driver) diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index d08d86a22b1f..462770479045 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -53,6 +53,7 @@ struct thunderx_line { struct thunderx_gpio { struct gpio_chip chip; u8 __iomem *register_base; + struct irq_domain *irqd; struct msix_entry *msix_entries; /* per line MSI-X */ struct thunderx_line *line_entries; /* per line irq info */ raw_spinlock_t lock; @@ -285,60 +286,54 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip, } } -static void thunderx_gpio_irq_ack(struct irq_data *d) +static void thunderx_gpio_irq_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask(struct irq_data *d) +static void thunderx_gpio_irq_mask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_mask_ack(struct irq_data *d) +static void thunderx_gpio_irq_mask_ack(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static void thunderx_gpio_irq_unmask(struct irq_data *d) +static void thunderx_gpio_irq_unmask(struct irq_data *data) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); writeq(GPIO_INTR_ENA_W1S, - txgpio->register_base + intr_reg(irqd_to_hwirq(d))); + txline->txgpio->register_base + intr_reg(txline->line)); } -static int thunderx_gpio_irq_set_type(struct irq_data *d, +static int thunderx_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - struct thunderx_line *txline = - &txgpio->line_entries[irqd_to_hwirq(d)]; + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; u64 bit_cfg; - irqd_set_trigger_type(d, flow_type); + irqd_set_trigger_type(data, flow_type); bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN; if (flow_type & IRQ_TYPE_EDGE_BOTH) { - irq_set_handler_locked(d, handle_fasteoi_ack_irq); + irq_set_handler_locked(data, handle_fasteoi_ack_irq); bit_cfg |= GPIO_BIT_CFG_INT_TYPE; } else { - irq_set_handler_locked(d, handle_fasteoi_mask_irq); + irq_set_handler_locked(data, handle_fasteoi_mask_irq); } raw_spin_lock(&txgpio->lock); @@ -367,6 +362,33 @@ static void thunderx_gpio_irq_disable(struct irq_data *data) irq_chip_disable_parent(data); } +static int thunderx_gpio_irq_request_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + int r; + + r = gpiochip_lock_as_irq(&txgpio->chip, txline->line); + if (r) + return r; + + r = irq_chip_request_resources_parent(data); + if (r) + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); + + return r; +} + +static void thunderx_gpio_irq_release_resources(struct irq_data *data) +{ + struct thunderx_line *txline = irq_data_get_irq_chip_data(data); + struct thunderx_gpio *txgpio = txline->txgpio; + + irq_chip_release_resources_parent(data); + + gpiochip_unlock_as_irq(&txgpio->chip, txline->line); +} + /* * Interrupts are chained from underlying MSI-X vectors. We have * these irq_chip functions to be able to handle level triggering @@ -383,24 +405,50 @@ static struct irq_chip thunderx_gpio_irq_chip = { .irq_unmask = thunderx_gpio_irq_unmask, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_request_resources = thunderx_gpio_irq_request_resources, + .irq_release_resources = thunderx_gpio_irq_release_resources, .irq_set_type = thunderx_gpio_irq_set_type, .flags = IRQCHIP_SET_TYPE_MASKED }; -static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, - unsigned int child, - unsigned int child_type, - unsigned int *parent, - unsigned int *parent_type) +static int thunderx_gpio_irq_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) { - struct thunderx_gpio *txgpio = gpiochip_get_data(gc); - - *parent = txgpio->base_msi + (2 * child); - *parent_type = IRQ_TYPE_LEVEL_HIGH; + struct thunderx_gpio *txgpio = d->host_data; + + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (fwspec->param[0] >= txgpio->chip.ngpio) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } +static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct thunderx_line *txline = arg; + + return irq_domain_set_hwirq_and_chip(d, virq, txline->line, + &thunderx_gpio_irq_chip, txline); +} + +static const struct irq_domain_ops thunderx_gpio_irqd_ops = { + .alloc = thunderx_gpio_irq_alloc, + .translate = thunderx_gpio_irq_translate +}; + +static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) +{ + struct thunderx_gpio *txgpio = gpiochip_get_data(chip); + + return irq_find_mapping(txgpio->irqd, offset); +} + static int thunderx_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -408,7 +456,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, struct device *dev = &pdev->dev; struct thunderx_gpio *txgpio; struct gpio_chip *chip; - struct gpio_irq_chip *girq; int ngpio, i; int err = 0; @@ -453,8 +500,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, } txgpio->msix_entries = devm_kcalloc(dev, - ngpio, sizeof(struct msix_entry), - GFP_KERNEL); + ngpio, sizeof(struct msix_entry), + GFP_KERNEL); if (!txgpio->msix_entries) { err = -ENOMEM; goto out; @@ -495,6 +542,27 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, if (err < 0) goto out; + /* + * Push GPIO specific irqdomain on hierarchy created as a side + * effect of the pci_enable_msix() + */ + txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain, + 0, 0, of_node_to_fwnode(dev->of_node), + &thunderx_gpio_irqd_ops, txgpio); + if (!txgpio->irqd) { + err = -ENOMEM; + goto out; + } + + /* Push on irq_data and the domain for each line. */ + for (i = 0; i < ngpio; i++) { + err = irq_domain_push_irq(txgpio->irqd, + txgpio->msix_entries[i].vector, + &txgpio->line_entries[i]); + if (err < 0) + dev_err(dev, "irq_domain_push_irq: %d\n", err); + } + chip->label = KBUILD_MODNAME; chip->parent = dev; chip->owner = THIS_MODULE; @@ -509,28 +577,11 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, chip->set = thunderx_gpio_set; chip->set_multiple = thunderx_gpio_set_multiple; chip->set_config = thunderx_gpio_set_config; - girq = &chip->irq; - girq->chip = &thunderx_gpio_irq_chip; - girq->fwnode = of_node_to_fwnode(dev->of_node); - girq->parent_domain = - irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; - girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; - girq->handler = handle_bad_irq; - girq->default_type = IRQ_TYPE_NONE; - + chip->to_irq = thunderx_gpio_to_irq; err = devm_gpiochip_add_data(dev, chip, txgpio); if (err) goto out; - /* Push on irq_data and the domain for each line. */ - for (i = 0; i < ngpio; i++) { - err = irq_domain_push_irq(chip->irq.domain, - txgpio->msix_entries[i].vector, - chip); - if (err < 0) - dev_err(dev, "irq_domain_push_irq: %d\n", err); - } - dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n", ngpio, chip->base); return 0; @@ -545,10 +596,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev) struct thunderx_gpio *txgpio = pci_get_drvdata(pdev); for (i = 0; i < txgpio->chip.ngpio; i++) - irq_domain_pop_irq(txgpio->chip.irq.domain, + irq_domain_pop_irq(txgpio->irqd, txgpio->msix_entries[i].vector); - irq_domain_remove(txgpio->chip.irq.domain); + irq_domain_remove(txgpio->irqd); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c index 773e5c24309e..b21c2e436b61 100644 --- a/drivers/gpio/gpio-xgs-iproc.c +++ b/drivers/gpio/gpio-xgs-iproc.c @@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev) return 0; } -static int __exit iproc_gpio_remove(struct platform_device *pdev) +static int iproc_gpio_remove(struct platform_device *pdev) { struct iproc_gpio_chip *chip; diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index 08d7c3b32038..c8af34a6368f 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c @@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable) unsigned long flags; local_irq_save(flags); - RSR_CPENABLE(*cpenable); - WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP)); - + *cpenable = xtensa_get_sr(cpenable); + xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable); return flags; } static inline void disable_cp(unsigned long flags, unsigned long cpenable) { - WSR_CPENABLE(cpenable); + xtensa_set_sr(cpenable, cpenable); local_irq_restore(flags); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 4c3f6370eab4..05ba16fffdad 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -684,6 +684,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) unsigned int bank_num; for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { + writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); writel_relaxed(gpio->context.datalsw[bank_num], gpio->base_addr + ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); @@ -693,9 +695,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.dirm[bank_num], gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); - writel_relaxed(gpio->context.int_en[bank_num], - gpio->base_addr + - ZYNQ_GPIO_INTEN_OFFSET(bank_num)); writel_relaxed(gpio->context.int_type[bank_num], gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); @@ -705,6 +704,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) writel_relaxed(gpio->context.int_any[bank_num], gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); + writel_relaxed(~(gpio->context.int_en[bank_num]), + gpio->base_addr + + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); } } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d30e57dc755c..31fee5e918b7 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -21,11 +21,19 @@ #include "gpiolib.h" #include "gpiolib-acpi.h" +#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l +#define QUIRK_NO_WAKEUP 0x02l + static int run_edge_events_on_boot = -1; module_param(run_edge_events_on_boot, int, 0444); MODULE_PARM_DESC(run_edge_events_on_boot, "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); +static int honor_wakeup = -1; +module_param(honor_wakeup, int, 0444); +MODULE_PARM_DESC(honor_wakeup, + "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto"); + /** * struct acpi_gpio_event - ACPI GPIO event handler data * @@ -281,7 +289,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, event->handle = evt_handle; event->handler = handler; event->irq = irq; - event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE; + event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE; event->pin = pin; event->desc = desc; @@ -1309,7 +1317,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void) /* We must use _sync so that this runs after the first deferred_probe run */ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); -static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { +static const struct dmi_system_id gpiolib_acpi_quirks[] = { { /* * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for @@ -1319,7 +1327,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, }, { /* @@ -1331,20 +1340,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), - } + }, + .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT, + }, + { + /* + * Various HP X2 10 Cherry Trail models use an external + * embedded-controller connected via I2C + an ACPI GPIO + * event handler. The embedded controller generates various + * spurious wakeup events when suspended. So disable wakeup + * for its handler (it uses the only ACPI GPIO event handler). + * This breaks wakeup when opening the lid, the user needs + * to press the power-button to wakeup the system. The + * alternative is suspend simply not working, which is worse. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), + }, + .driver_data = (void *)QUIRK_NO_WAKEUP, }, {} /* Terminating entry */ }; static int acpi_gpio_setup_params(void) { + const struct dmi_system_id *id; + long quirks = 0; + + id = dmi_first_match(gpiolib_acpi_quirks); + if (id) + quirks = (long)id->driver_data; + if (run_edge_events_on_boot < 0) { - if (dmi_check_system(run_edge_events_on_boot_blacklist)) + if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT) run_edge_events_on_boot = 0; else run_edge_events_on_boot = 1; } + if (honor_wakeup < 0) { + if (quirks & QUIRK_NO_WAKEUP) + honor_wakeup = 0; + else + honor_wakeup = 1; + } + return 0; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index dc27b1a88e93..1b3f217a35e2 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -23,6 +23,29 @@ #include "gpiolib.h" #include "gpiolib-of.h" +/** + * of_gpio_spi_cs_get_count() - special GPIO counting for SPI + * Some elder GPIO controllers need special quirks. Currently we handle + * the Freescale GPIO controller with bindings that doesn't use the + * established "cs-gpios" for chip selects but instead rely on + * "gpios" for the chip select lines. If we detect this, we redirect + * the counting of "cs-gpios" to count "gpios" transparent to the + * driver. + */ +static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id) +{ + struct device_node *np = dev->of_node; + + if (!IS_ENABLED(CONFIG_SPI_MASTER)) + return 0; + if (!con_id || strcmp(con_id, "cs")) + return 0; + if (!of_device_is_compatible(np, "fsl,spi") && + !of_device_is_compatible(np, "aeroflexgaisler,spictrl")) + return 0; + return of_gpio_named_count(np, "gpios"); +} + /* * This is used by external users of of_gpio_count() from <linux/of_gpio.h> * @@ -35,6 +58,10 @@ int of_gpio_get_count(struct device *dev, const char *con_id) char propname[32]; unsigned int i; + ret = of_gpio_spi_cs_get_count(dev, con_id); + if (ret > 0) + return ret; + for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { if (con_id) snprintf(propname, sizeof(propname), "%s-%s", @@ -105,27 +132,6 @@ static void of_gpio_flags_quirks(struct device_node *np, int index) { /* - * Handle MMC "cd-inverted" and "wp-inverted" semantics. - */ - if (IS_ENABLED(CONFIG_MMC)) { - /* - * Active low is the default according to the - * SDHCI specification and the device tree - * bindings. However the code in the current - * kernel was written such that the phandle - * flags were always respected, and "cd-inverted" - * would invert the flag from the device phandle. - */ - if (!strcmp(propname, "cd-gpios")) { - if (of_property_read_bool(np, "cd-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - if (!strcmp(propname, "wp-gpios")) { - if (of_property_read_bool(np, "wp-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - } - /* * Some GPIO fixed regulator quirks. * Note that active low is the default. */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9913886ede90..bcfbfded9ba3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc) chip = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); + /* + * Open drain emulation using input mode may incorrectly report + * input here, fix that up. + */ + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && + test_bit(FLAG_IS_OUT, &desc->flags)) + return 0; + if (!chip->get_direction) return -ENOTSUPP; @@ -3363,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc) } EXPORT_SYMBOL_GPL(gpiod_is_active_low); +/** + * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not + * @desc: the gpio descriptor to change + */ +void gpiod_toggle_active_low(struct gpio_desc *desc) +{ + VALIDATE_DESC_VOID(desc); + change_bit(FLAG_ACTIVE_LOW, &desc->flags); +} +EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); + /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * @@ -4472,8 +4491,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, if (chip->ngpio <= p->chip_hwnum) { dev_err(dev, - "requested GPIO %d is out of range [0..%d] for chip %s\n", - idx, chip->ngpio, chip->label); + "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", + idx, p->chip_hwnum, chip->ngpio - 1, + chip->label); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index d968c2471412..0d12ebf66174 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT menu "ACP (Audio CoProcessor) Configuration" config DRM_AMD_ACP diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 2e98c016cb47..9375e7f12420 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT config DRM_AMDGPU_SI bool "Enable amdgpu support for SI parts" depends on DRM_AMDGPU diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index a97fb759e2f4..3e35a8f2c5e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void) bool d3_supported = false; struct pci_dev *parent_pdev; - while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + + has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); + + parent_pdev = pci_upstream_bridge(pdev); + d3_supported |= parent_pdev && parent_pdev->bridge_d3; + amdgpu_atpx_get_quirks(pdev); + } + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 2cdaf3b2a721..6614d8a6f4c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -604,11 +604,8 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; } - for (i = 0; i < num_entities; i++) { - mutex_lock(&ctx->adev->lock_reset); + for (i = 0; i < num_entities; i++) drm_sched_entity_fini(&ctx->entities[0][i].entity); - mutex_unlock(&ctx->adev->lock_reset); - } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0ffc9447b573..30a1e3ac21d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1; int amdgpu_mcbp = 0; int amdgpu_discovery = -1; int amdgpu_mes = 0; -int amdgpu_noretry = 1; +int amdgpu_noretry; int amdgpu_force_asic_type = -1; struct amdgpu_mgpu_info mgpu_info = { @@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes, module_param_named(mes, amdgpu_mes, int, 0444); MODULE_PARM_DESC(noretry, - "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); + "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)"); module_param_named(noretry, amdgpu_noretry, int, 0644); /** @@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ - {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, @@ -1359,7 +1359,8 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_ATOMIC | DRIVER_GEM | - DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, + DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | + DRIVER_SYNCOBJ_TIMELINE, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 44be3a45b25e..e1b8d8daeafc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1488,7 +1488,7 @@ out: /* Start rlc autoload after psp recieved all the gfx firmware */ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? - AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { + AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 410587b950f3..914acecda5cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -292,10 +292,10 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_CP_MEC2_JT, AMDGPU_UCODE_ID_CP_MES, AMDGPU_UCODE_ID_CP_MES_DATA, - AMDGPU_UCODE_ID_RLC_G, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM, + AMDGPU_UCODE_ID_RLC_G, AMDGPU_UCODE_ID_STORAGE, AMDGPU_UCODE_ID_SMC, AMDGPU_UCODE_ID_UVD, diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 16fbd2bc8ad1..4043ebcea5de 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -268,23 +268,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, { u32 tmp; - /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } else { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_DISABLE; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } + if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); + + if (enable) { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_DISABLE; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } - /* Exit broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); + /* Exit broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); + } } static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f2c1b026397b..ba9e53a1abc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -117,10 +117,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), @@ -162,10 +165,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 983db77999e7..52a647d7022d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6146,7 +6146,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + amdgpu_ring_write(ring, lower_32_bits(seq - 1)); + amdgpu_ring_write(ring, upper_32_bits(seq - 1)); + + /* Then send the real EOP event down the pipe: + * EVENT_WRITE_EOP - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -6888,7 +6904,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 5 + /* COND_EXEC */ 7 + /* PIPELINE_SYNC */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */ - 8 + /* FENCE for VM_FLUSH */ + 12 + /* FENCE for VM_FLUSH */ 20 + /* GDS switch */ 4 + /* double SWITCH_BUFFER, the first COND_EXEC jump to the place just @@ -6900,7 +6916,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 31 + /* DE_META */ 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ - 8 + 8 + /* FENCE x2 */ + 12 + 12 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 66328ffa395a..97105a5bb246 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1052,17 +1052,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) case CHIP_VEGA20: break; case CHIP_RAVEN: - /* Disable GFXOFF on original raven. There are combinations - * of sbios and platforms that are not stable. - */ - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - &&((adev->gfx.rlc_fw_version != 106 && - adev->gfx.rlc_fw_version < 531) || - (adev->gfx.rlc_fw_version == 53815) || - (adev->gfx.rlc_feature_version < 1) || - !adev->gfx.rlc.is_rlc_v2_1)) + if (!(adev->rev_id >= 0x8 || + adev->pdev->device == 0x15d8) && + (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ + !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (adev->pm.pp_feature & PP_GFXOFF_MASK) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 232469507446..f5725336a5f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -219,6 +219,21 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, return req; } +/** + * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore + * + * @adev: amdgpu_device pointer + * @vmhub: vmhub type + * + */ +static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, + uint32_t vmhub) +{ + return ((vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) && + (!amdgpu_sriov_vf(adev))); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -229,6 +244,7 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int vmhub, uint32_t flush_type) { + bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); /* Use register 17 for GART */ @@ -244,8 +260,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, */ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) { + if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); @@ -278,8 +293,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, } /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) + if (use_semaphore) /* * add semaphore release after invalidation, * write with 0 means semaphore release @@ -369,6 +383,7 @@ error_alloc: static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { + bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; @@ -381,8 +396,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, */ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || - ring->funcs->vmhub == AMDGPU_MMHUB_1) + if (use_semaphore) /* a read return value of 1 means semaphore acuqire */ amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_sem + eng, 0x1, 0x1); @@ -398,8 +412,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, req, 1 << vmid); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || - ring->funcs->vmhub == AMDGPU_MMHUB_1) + if (use_semaphore) /* * add semaphore release after invalidation, * write with 0 means semaphore release diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3c355fb5d2b4..a5b68b5e452f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -416,6 +416,24 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, return req; } +/** + * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore + * + * @adev: amdgpu_device pointer + * @vmhub: vmhub type + * + */ +static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, + uint32_t vmhub) +{ + return ((vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) && + (!amdgpu_sriov_vf(adev)) && + (!(adev->asic_type == CHIP_RAVEN && + adev->rev_id < 0x8 && + adev->pdev->device == 0x15d8))); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -435,6 +453,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { + bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; u32 j, tmp; struct amdgpu_vmhub *hub; @@ -468,8 +487,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) { + if (use_semaphore) { for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acuqire */ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); @@ -499,8 +517,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) + if (use_semaphore) /* * add semaphore release after invalidation, * write with 0 means semaphore release @@ -518,6 +535,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { + bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); struct amdgpu_device *adev = ring->adev; struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); @@ -531,8 +549,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, */ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || - ring->funcs->vmhub == AMDGPU_MMHUB_1) + if (use_semaphore) /* a read return value of 1 means semaphore acuqire */ amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_sem + eng, 0x1, 0x1); @@ -548,8 +565,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, req, 1 << vmid); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || - ring->funcs->vmhub == AMDGPU_MMHUB_1) + if (use_semaphore) /* * add semaphore release after invalidation, * write with 0 means semaphore release diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4ef4d31f5231..2f52b7f4d25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -254,7 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe) }; static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index ba0e68057a89..b3672d10ea54 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT # # Heterogenous system architecture configuration # diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 313183b80032..ae161fe86ebb 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT menu "Display Engine Configuration" depends on DRM && DRM_AMDGPU diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7aac9568d3be..803e59d97411 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3356,27 +3356,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) return color_space; } -static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) -{ - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; - - timing_out->display_color_depth--; -} - -static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, - const struct drm_display_info *info) +static bool adjust_colour_depth_from_display_info( + struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) { + enum dc_color_depth depth = timing_out->display_color_depth; int normalized_clk; - if (timing_out->display_color_depth <= COLOR_DEPTH_888) - return; do { normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) normalized_clk /= 2; /* Adjusting pix clock following on HDMI spec based on colour depth */ - switch (timing_out->display_color_depth) { + switch (depth) { + case COLOR_DEPTH_888: + break; case COLOR_DEPTH_101010: normalized_clk = (normalized_clk * 30) / 24; break; @@ -3387,14 +3381,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ normalized_clk = (normalized_clk * 48) / 24; break; default: - return; + /* The above depths are the only ones valid for HDMI. */ + return false; } - if (normalized_clk <= info->max_tmds_clock) - return; - reduce_mode_colour_depth(timing_out); - - } while (timing_out->display_color_depth > COLOR_DEPTH_888); - + if (normalized_clk <= info->max_tmds_clock) { + timing_out->display_color_depth = depth; + return true; + } + } while (--depth > COLOR_DEPTH_666); + return false; } static void fill_stream_properties_from_drm_display_mode( @@ -3474,8 +3469,14 @@ static void fill_stream_properties_from_drm_display_mode( stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) - adjust_colour_depth_from_display_info(timing_out, info); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + if (!adjust_colour_depth_from_display_info(timing_out, info) && + drm_mode_is_420_also(info, mode_in) && + timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + adjust_colour_depth_from_display_info(timing_out, info); + } + } } static void fill_audio_info(struct audio_info *audio_info, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 7873abea4112..5c3fcaa47410 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1625,6 +1625,7 @@ static enum bp_result construct_integrated_info( /* Don't need to check major revision as they are all 1 */ switch (revision.minor) { case 11: + case 12: result = get_integrated_info_v11(bp, info); break; default: diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 790a2d211bd6..35c55e54eac0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -471,12 +471,28 @@ static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) } +static bool rn_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + + return true; +} + + static struct clk_mgr_funcs dcn21_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = rn_update_clocks, .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, - /* .dump_clk_registers = rn_dump_clk_registers, */ + .are_clock_states_equal = rn_are_clock_states_equal, .notify_wm_ranges = rn_notify_wm_ranges }; @@ -518,36 +534,83 @@ struct clk_bw_params rn_bw_params = { .num_entries = 4, }, - .wm_table = { - .entries = { - { - .wm_inst = WM_A, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, - { - .wm_inst = WM_B, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, - { - .wm_inst = WM_C, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, - { - .wm_inst = WM_D, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, +}; + +struct wm_table ddr4_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 6.09, + .sr_enter_plus_exit_time_us = 7.14, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, }, } }; +struct wm_table lpddr4_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 23.84, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, + .valid = true, + }, + } +}; + + static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { int i; @@ -561,7 +624,7 @@ static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsi return 0; } -static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) +static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info) { int i, j = 0; @@ -593,8 +656,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); } - bw_params->vram_type = asic_id->vram_type; - bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH; + bw_params->vram_type = bios_info->memory_type; + bw_params->num_channels = bios_info->ma_channel_number; for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; @@ -669,15 +732,24 @@ void rn_clk_mgr_construct( ASSERT(clk_mgr->base.dprefclk_khz == 600000); clk_mgr->base.dprefclk_khz = 600000; } + + if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { + rn_bw_params.wm_table = lpddr4_wm_table; + } else { + rn_bw_params.wm_table = ddr4_wm_table; + } } dce_clock_read_ss_info(clk_mgr); + clk_mgr->base.bw_params = &rn_bw_params; if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); - rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); + if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); + } } if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 12ba6fdf89b7..4619f94f0ac7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -372,7 +372,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) if (GPIO_RESULT_OK != dal_ddc_open( ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { - dal_gpio_destroy_ddc(&ddc); + dal_ddc_close(ddc); return present; } @@ -817,8 +817,8 @@ static bool dc_link_detect_helper(struct dc_link *link, } case SIGNAL_TYPE_EDP: { - read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); + read_current_link_settings_on_detect(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 7f904d55c1bc..81789191d4ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -586,7 +586,7 @@ bool dal_ddc_service_query_ddc_data( bool dal_ddc_submit_aux_command(struct ddc_service *ddc, struct aux_payload *payload) { - uint8_t retrieved = 0; + uint32_t retrieved = 0; bool ret = 0; if (!ddc) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 0f59b68aa4c2..504055fc70e8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3522,7 +3522,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) if (link_enc->funcs->fec_set_enable && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { if (link->fec_state == dc_link_fec_ready && enable) { - msleep(1); + /* Accord to DP spec, FEC enable sequence can first + * be transmitted anytime after 1000 LL codes have + * been transmitted on the link after link training + * completion. Using 1 lane RBR should have the maximum + * time for transmitting 1000 LL codes which is 6.173 us. + * So use 7 microseconds delay instead. + */ + udelay(7); link_enc->funcs->fec_set_enable(link_enc, true); link->fec_state = dc_link_fec_enabled; } else if (link->fec_state == dc_link_fec_enabled && !enable) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index e472608faf33..793c0cec407f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -583,6 +583,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, uint8_t reply; bool payload_reply = true; enum aux_channel_operation_result operation_result; + bool retry_on_defer = false; + int aux_ack_retries = 0, aux_defer_retries = 0, aux_i2c_defer_retries = 0, @@ -613,8 +615,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_TRANSACTION_REPLY_AUX_DEFER: - case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: + retry_on_defer = true; + /* fall through */ + case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; } else { @@ -647,15 +651,24 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) - goto fail; - else { - /* - * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts - * According to the DP spec there should be 3 retries total - * with a 400us wait inbetween each. Hardware already waits - * for 550us therefore no wait is required here. - */ + // Check whether a DEFER had occurred before the timeout. + // If so, treat timeout as a DEFER. + if (retry_on_defer) { + if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + goto fail; + else if (payload->defer_delay > 0) + msleep(payload->defer_delay); + } else { + if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) + goto fail; + else { + /* + * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts + * According to the DP spec there should be 3 retries total + * with a 400us wait inbetween each. Hardware already waits + * for 550us therefore no wait is required here. + */ + } } break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 63f3bddba7da..10b47986526b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: MIT # # Makefile for DCN. diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 09793336d84f..23ff2f1c75b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -923,7 +923,9 @@ static const struct resource_caps res_cap_nv14 = { .num_dwb = 1, .num_ddc = 5, .num_vmid = 16, +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 5, +#endif }; static const struct dc_debug_options debug_defaults_drv = { @@ -1536,13 +1538,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, - struct display_stream_compressor **dsc) + struct display_stream_compressor **dsc, + int pipe_idx) { int i; ASSERT(*dsc == NULL); *dsc = NULL; + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + /* Find first free DSC */ for (i = 0; i < pool->res_cap->num_dsc; i++) if (!res_ctx->is_dsc_acquired[i]) { @@ -1585,7 +1594,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream != dc_stream) continue; - acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); + acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { @@ -1785,7 +1794,7 @@ bool dcn20_split_stream_for_odm( next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (next_odm_pipe->stream->timing.flags.DSC == 1) { - acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); + acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 4b3401616434..fcb3877b4fcb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -492,15 +492,23 @@ void enc2_stream_encoder_dp_unblank( DP_VID_N_MUL, n_multiply); } - /* set DIG_START to 0x1 to reset FIFO */ + /* make sure stream is disabled before resetting steer fifo */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); + REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); + /* set DIG_START to 0x1 to reset FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + udelay(1); /* write 0 to take the FIFO out of reset */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); - /* switch DP encoder to CRTC data */ + /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen + * that it overflows during mode transition, and sometimes doesn't recover. + */ + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); + udelay(10); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index 14113ccf498d..5b8c17564bc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: MIT # # Makefile for DCN21. diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 459bd9a5caed..b29b2c99a564 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -23,6 +23,8 @@ * */ +#include <linux/slab.h> + #include "dm_services.h" #include "dc.h" @@ -257,7 +259,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 23.84, .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3550, + .dispclk_dppclk_vco_speed_mhz = 3600, .xfc_bus_transport_time_us = 4, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 1, @@ -1000,6 +1002,8 @@ static void calculate_wm_set_for_vlevel( pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; @@ -1017,14 +1021,21 @@ static void calculate_wm_set_for_vlevel( static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { + int i; + kernel_fpu_begin(); if (dc->bb_overrides.sr_exit_time_ns) { - bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = + dc->bb_overrides.sr_exit_time_ns / 1000.0; + } } if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { - bb->sr_enter_plus_exit_time_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } } if (dc->bb_overrides.urgent_latency_ns) { @@ -1032,9 +1043,12 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s } if (dc->bb_overrides.dram_clock_change_latency_ns) { - bb->dram_clock_change_latency_us = + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us = dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } } + kernel_fpu_end(); } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 970737217e53..641ffb7cfaed 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 4e18e77dcf42..026e6a2a2c44 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -69,6 +69,8 @@ struct wm_range_table_entry { unsigned int wm_inst; unsigned int wm_type; double pstate_latency_us; + double sr_exit_time_us; + double sr_enter_plus_exit_time_us; bool valid; }; diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h index bb012cb1a9f5..c7fbb9c3ad6b 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h @@ -42,7 +42,7 @@ struct aux_payload { bool write; bool mot; uint32_t address; - uint8_t length; + uint32_t length; uint8_t *data; /* * used to return the reply type of the transaction diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 16e69bbc69aa..5437b50e9f90 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -37,8 +37,8 @@ #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) /* Number of elements in the render times cache array */ #define RENDER_TIMES_MAX_COUNT 10 -/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */ -#define BTR_MAX_MARGIN 2500 +/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ +#define BTR_EXIT_MARGIN 2000 /* Threshold to change BTR multiplier (to avoid frequent changes) */ #define BTR_DRIFT_MARGIN 2000 /*Threshold to exit fixed refresh rate*/ @@ -254,22 +254,24 @@ static void apply_below_the_range(struct core_freesync *core_freesync, unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; unsigned int frames_to_insert = 0; + unsigned int min_frame_duration_in_ns = 0; + unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; unsigned int delta_from_mid_point_delta_in_us; - unsigned int max_render_time_in_us = - in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us; + + min_frame_duration_in_ns = ((unsigned int) (div64_u64( + (1000000000ULL * 1000000), + in_out_vrr->max_refresh_in_uhz))); /* Program BTR */ - if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) { + if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { /* Exit Below the Range */ if (in_out_vrr->btr.btr_active) { in_out_vrr->btr.frame_counter = 0; in_out_vrr->btr.btr_active = false; } - } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { + } else if (last_render_time_in_us > max_render_time_in_us) { /* Enter Below the Range */ - if (!in_out_vrr->btr.btr_active) { - in_out_vrr->btr.btr_active = true; - } + in_out_vrr->btr.btr_active = true; } /* BTR set to "not active" so disengage */ @@ -325,9 +327,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. */ - if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && - (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || - mid_point_frames_floor < 2)) { + if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { frames_to_insert = mid_point_frames_ceil; delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - delta_from_mid_point_in_us_1; @@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < - max_render_time_in_us) && + in_out_vrr->max_duration_in_us) && ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) > in_out_vrr->min_duration_in_us)) frames_to_insert = in_out_vrr->btr.frames_to_insert; @@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; - in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - - 2 * in_out_vrr->min_duration_in_us; - if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) - in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; - in_out_vrr->supported = true; } @@ -816,7 +811,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; in_out_vrr->btr.frame_counter = 0; - in_out_vrr->btr.mid_point_in_us = (in_out_vrr->min_duration_in_us + in_out_vrr->max_duration_in_us) / 2; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index dbe7835aabcf..dc187844d10b 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -92,7 +92,6 @@ struct mod_vrr_params_btr { uint32_t inserted_duration_in_us; uint32_t frames_to_insert; uint32_t frame_counter; - uint32_t margin_in_us; }; struct mod_vrr_params_fixed_refresh { diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 5ff7ccedfbed..a23729d3174b 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -866,6 +866,7 @@ static int smu_sw_init(void *handle) smu->smu_baco.platform_support = false; mutex_init(&smu->sensor_lock); + mutex_init(&smu->metrics_lock); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index ce3566ca3e24..472e9fed411a 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -862,18 +862,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } @@ -1313,12 +1316,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, "VR", "COMPUTE", "CUSTOM"}; + static const char *title[] = { + "PROFILE_INDEX(NAME)"}; uint32_t i, size = 0; int16_t workload_type = 0; if (!smu->pm_enabled || !buf) return -EINVAL; + size += sprintf(buf + size, "%16s\n", + title[0]); + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index ac9758305ab3..41fce75b263f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -349,6 +349,7 @@ struct smu_context const struct pptable_funcs *ppt_funcs; struct mutex mutex; struct mutex sensor_lock; + struct mutex metrics_lock; uint64_t pool_size; struct smu_table_context smu_table; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 4a14fd1f9fd5..ca62e92e5a4f 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -562,17 +562,20 @@ static int navi10_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 60b9ff097142..0d3a3b0a934e 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -1678,17 +1678,20 @@ static int vega20_get_metrics_table(struct smu_context *smu, struct smu_table_context *smu_table= &smu->smu_table; int ret = 0; + mutex_lock(&smu->metrics_lock); if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)smu_table->metrics_table, false); if (ret) { pr_info("Failed to export SMU metrics table!\n"); + mutex_unlock(&smu->metrics_lock); return ret; } smu_table->metrics_time = jiffies; } memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + mutex_unlock(&smu->metrics_lock); return ret; } diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 875a3a9eabfa..7d0e7b031e44 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { +static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { .get_modes = malidp_mw_connector_get_modes, .mode_valid = malidp_mw_connector_mode_valid, }; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 273dd80fabf3..e6afe4faeca6 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -393,7 +393,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); idx += req->u.i2c_read.transactions[i].num_bytes; - buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); idx++; } @@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } + mgr->is_waiting_for_dwn_reply = false; + } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1199,6 +1201,7 @@ out: } mutex_unlock(&mgr->qlock); + drm_dp_mst_kick_tx(mgr); return ret; } @@ -1913,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) +static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) +{ + switch (pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + return true; + case DP_PEER_DEVICE_MST_BRANCHING: + /* For sst branch device */ + if (!mcs) + return true; + + return false; + } + return true; +} + +static int +drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, + bool new_mcs) { struct drm_dp_mst_topology_mgr *mgr = port->mgr; struct drm_dp_mst_branch *mstb; u8 rad[8], lct; int ret = 0; - if (port->pdt == new_pdt) + if (port->pdt == new_pdt && port->mcs == new_mcs) return 0; /* Teardown the old pdt, if there is one */ - switch (port->pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* - * If the new PDT would also have an i2c bus, don't bother - * with reregistering it - */ - if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - new_pdt == DP_PEER_DEVICE_SST_SINK) { - port->pdt = new_pdt; - return 0; - } + if (port->pdt != DP_PEER_DEVICE_NONE) { + if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + /* + * If the new PDT would also have an i2c bus, + * don't bother with reregistering it + */ + if (new_pdt != DP_PEER_DEVICE_NONE && + drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { + port->pdt = new_pdt; + port->mcs = new_mcs; + return 0; + } - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mutex_lock(&mgr->lock); - drm_dp_mst_topology_put_mstb(port->mstb); - port->mstb = NULL; - mutex_unlock(&mgr->lock); - break; + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + } else { + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + } } port->pdt = new_pdt; - switch (port->pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* add i2c over sideband */ - ret = drm_dp_mst_register_i2c_bus(&port->aux); - break; + port->mcs = new_mcs; - case DP_PEER_DEVICE_MST_BRANCHING: - lct = drm_dp_calculate_rad(port, rad); - mstb = drm_dp_add_mst_branch_device(lct, rad); - if (!mstb) { - ret = -ENOMEM; - DRM_ERROR("Failed to create MSTB for port %p", port); - goto out; - } + if (port->pdt != DP_PEER_DEVICE_NONE) { + if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + /* add i2c over sideband */ + ret = drm_dp_mst_register_i2c_bus(&port->aux); + } else { + lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", + port); + goto out; + } - mutex_lock(&mgr->lock); - port->mstb = mstb; - mstb->mgr = port->mgr; - mstb->port_parent = port; + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); - mutex_unlock(&mgr->lock); + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); - /* And make sure we send a link address for this */ - ret = 1; - break; + /* And make sure we send a link address for this */ + ret = 1; + } } out: @@ -2132,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, goto error; } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { + if (port->pdt != DP_PEER_DEVICE_NONE && + drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); drm_connector_set_tile_property(port->connector); @@ -2198,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port; int old_ddps = 0, ret; u8 new_pdt = DP_PEER_DEVICE_NONE; + bool new_mcs = 0; bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); @@ -2242,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, port->input = port_msg->input_port; if (!port->input) new_pdt = port_msg->peer_device_type; - port->mcs = port_msg->mcs; + new_mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; port->dpcd_rev = port_msg->dpcd_revision; @@ -2269,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, } } - ret = drm_dp_port_set_pdt(port, new_pdt); + ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); if (ret == 1) { send_link_addr = true; } else if (ret < 0) { @@ -2283,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, * we're coming out of suspend. In this case, always resend the link * address if there's an MSTB on this port */ - if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && + port->mcs) send_link_addr = true; if (port->connector) @@ -2318,8 +2339,9 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_ddps, ret; + int old_ddps, old_input, ret, i; u8 new_pdt; + bool new_mcs; bool dowork = false, create_connector = false; port = drm_dp_get_port(mstb, conn_stat->port_number); @@ -2349,8 +2371,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } old_ddps = port->ddps; + old_input = port->input; port->input = conn_stat->input_port; - port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -2363,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; - - ret = drm_dp_port_set_pdt(port, new_pdt); + new_mcs = conn_stat->message_capability_status; + ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); if (ret == 1) { dowork = true; } else if (ret < 0) { @@ -2373,6 +2395,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, dowork = false; } + if (!old_input && old_ddps != port->ddps && !port->ddps) { + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + struct drm_dp_mst_port *port_validated; + + if (!vcpi) + continue; + + port_validated = + container_of(vcpi, struct drm_dp_mst_port, vcpi); + port_validated = + drm_dp_mst_topology_get_port_validated(mgr, port_validated); + if (!port_validated) { + mutex_lock(&mgr->payload_lock); + vcpi->num_slots = 0; + mutex_unlock(&mgr->payload_lock); + } else { + drm_dp_mst_topology_put_port(port_validated); + } + } + } + if (port->connector) drm_modeset_unlock(&mgr->base.lock); else if (create_connector) @@ -2718,9 +2762,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ + mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2760,7 +2806,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq)) + if (list_is_singular(&mgr->tx_msg_downq) && + !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -3678,6 +3725,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; mstb->tx_slots[slot] = NULL; + mgr->is_waiting_for_dwn_reply = false; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); @@ -3687,6 +3735,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) no_msg: drm_dp_mst_topology_put_mstb(mstb); clear_down_rep_recv: + mutex_lock(&mgr->qlock); + mgr->is_waiting_for_dwn_reply = false; + mutex_unlock(&mgr->qlock); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -3896,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, switch (port->pdt) { case DP_PEER_DEVICE_NONE: case DP_PEER_DEVICE_MST_BRANCHING: + if (!port->mcs) + ret = connector_status_connected; break; case DP_PEER_DEVICE_SST_SINK: @@ -4497,7 +4550,7 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq)) + if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -4508,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) if (port->connector) port->mgr->cbs->destroy_connector(port->mgr, port->connector); - drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); drm_dp_mst_put_port_malloc(port); } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8ebeccdeed23..d8e8f3960f4d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1283,7 +1283,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. */ - if (var->bits_per_pixel != fb->format->cpp[0] * 8 || + if (var->bits_per_pixel > fb->format->cpp[0] * 8 || var->xres > fb->width || var->yres > fb->height || var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb requested width/height/bpp can't fit in current fb " @@ -1309,6 +1309,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, } /* + * Likewise, bits_per_pixel should be rounded up to a supported value. + */ + var->bits_per_pixel = fb->format->cpp[0] * 8; + + /* * drm fbdev emulation doesn't support changing the pixel format at all, * so reject all pixel format changing requests. */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 7ae087b0504d..88b6fcaa20be 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + component_del(dev, &gsc_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index afaf4bea21cf..9278bcfad1bf 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) * Map the GTT and the stolen memory area */ if (!resume) - dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, + dev_priv->gtt_map = ioremap(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT); if (!dev_priv->gtt_map) { dev_err(dev->dev, "Failure to map gtt.\n"); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 7005f8f69c68..0900052fc484 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) PSB_AUX_RESOURCE); resource_len = pci_resource_len(dev_priv->aux_pdev, PSB_AUX_RESOURCE); - dev_priv->aux_reg = ioremap_nocache(resource_start, + dev_priv->aux_reg = ioremap(resource_start, resource_len); if (!dev_priv->aux_reg) goto out_err; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 2fd4ca91a62d..8dd5a43e5486 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -211,7 +211,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv) ioaddr = pci_resource_start(pdev, 1); iosize = pci_resource_len(pdev, 1); - priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize); + priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize); if (!priv->mmio) { DRM_ERROR("Cannot map mmio region\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 85e6b2bbb34f..3a5ac13d5801 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -856,7 +856,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, true); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -875,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev, /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--dev_priv->audio_power_refcount == 0) - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c7c2b349858d..2a27fb5d7dc6 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3986,6 +3986,7 @@ static void intel_enable_ddi(struct intel_encoder *encoder, if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) intel_hdcp_enable(to_intel_connector(conn_state->connector), + crtc_state->cpu_transcoder, (u8)conn_state->hdcp_content_type); } @@ -4089,7 +4090,9 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED || content_protection_type_changed) - intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type); + intel_hdcp_enable(connector, + crtc_state->cpu_transcoder, + (u8)conn_state->hdcp_content_type); } static void diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 6f5e3bd13ad1..301897791627 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4515,8 +4515,6 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg; - u32 trans_ddi_func_ctl2_val; if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) return; @@ -4524,10 +4522,7 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", transcoder_name(old_crtc_state->cpu_transcoder)); - reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); - trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | - PORT_SYNC_MODE_MASTER_SELECT_MASK); - I915_WRITE(reg, trans_ddi_func_ctl2_val); + I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0); } static void intel_fdi_normal_train(struct intel_crtc *crtc) @@ -15112,7 +15107,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, return ret; fb_obj_bump_render_priority(obj); - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); if (!new_plane_state->base.fence) { /* implicit fencing */ struct dma_fence *fence; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index ce1b64f4dd44..12ba74788cce 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -3688,6 +3688,151 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; +static const struct i915_power_well_desc ehl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, + { + .name = "power well 2", + .domains = ICL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, + { + .name = "power well 3", + .domains = ICL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + }, + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + }, + }, + { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + }, + }, + { + .name = "DDI D IO", + .domains = ICL_DDI_IO_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_D, + }, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX C", + .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + }, + }, + { + .name = "AUX D", + .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_D, + }, + }, + { + .name = "power well 4", + .domains = ICL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, + }, +}; + static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "always-on", @@ -3832,7 +3977,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX A", .domains = TGL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3842,7 +3987,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX B", .domains = TGL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3852,7 +3997,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX C", .domains = TGL_AUX_C_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4162,6 +4307,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ if (IS_GEN(dev_priv, 12)) { err = set_power_wells(power_domains, tgl_power_wells); + } else if (IS_ELKHARTLAKE(dev_priv)) { + err = set_power_wells(power_domains, ehl_power_wells); } else if (IS_GEN(dev_priv, 11)) { err = set_power_wells(power_domains, icl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 050655a1a3d8..b05b2191b919 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2414,9 +2414,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config); - intel_hdcp_transcoder_config(intel_connector, - pipe_config->cpu_transcoder); - return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 3111ecaeabd0..20616639b8ab 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1284,7 +1284,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) return 0; /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */ - if (IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return 0; if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 84b164f31895..6cb02c912acc 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref) vma->display_alignment = I915_GTT_MIN_ALIGNMENT; spin_unlock(&obj->vma.lock); - obj->frontbuffer = NULL; + RCU_INIT_POINTER(obj->frontbuffer, NULL); spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock); i915_gem_object_put(obj); - kfree(front); + kfree_rcu(front, rcu); } struct intel_frontbuffer * @@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_frontbuffer *front; - spin_lock(&i915->fb_tracking.lock); - front = obj->frontbuffer; - if (front) - kref_get(&front->ref); - spin_unlock(&i915->fb_tracking.lock); + front = __intel_frontbuffer_get(obj); if (front) return front; @@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) i915_active_may_sleep(frontbuffer_retire)); spin_lock(&i915->fb_tracking.lock); - if (obj->frontbuffer) { + if (rcu_access_pointer(obj->frontbuffer)) { kfree(front); - front = obj->frontbuffer; + front = rcu_dereference_protected(obj->frontbuffer, true); kref_get(&front->ref); } else { i915_gem_object_get(obj); - obj->frontbuffer = front; + rcu_assign_pointer(obj->frontbuffer, front); } spin_unlock(&i915->fb_tracking.lock); diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index adc64d61a4a5..6d41f5394425 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -27,10 +27,10 @@ #include <linux/atomic.h> #include <linux/kref.h> +#include "gem/i915_gem_object_types.h" #include "i915_active.h" struct drm_i915_private; -struct drm_i915_gem_object; enum fb_op_origin { ORIGIN_GTT, @@ -45,6 +45,7 @@ struct intel_frontbuffer { atomic_t bits; struct i915_active write; struct drm_i915_gem_object *obj; + struct rcu_head rcu; }; void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, @@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, void intel_frontbuffer_flip(struct drm_i915_private *i915, unsigned frontbuffer_bits); +void intel_frontbuffer_put(struct intel_frontbuffer *front); + +static inline struct intel_frontbuffer * +__intel_frontbuffer_get(const struct drm_i915_gem_object *obj) +{ + struct intel_frontbuffer *front; + + if (likely(!rcu_access_pointer(obj->frontbuffer))) + return NULL; + + rcu_read_lock(); + do { + front = rcu_dereference(obj->frontbuffer); + if (!front) + break; + + if (unlikely(!kref_get_unless_zero(&front->ref))) + continue; + + if (likely(front == rcu_access_pointer(obj->frontbuffer))) + break; + + intel_frontbuffer_put(front); + } while (1); + rcu_read_unlock(); + + return front; +} + struct intel_frontbuffer * intel_frontbuffer_get(struct drm_i915_gem_object *obj); @@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, struct intel_frontbuffer *new, unsigned int frontbuffer_bits); -void intel_frontbuffer_put(struct intel_frontbuffer *front); - #endif /* __INTEL_FRONTBUFFER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index f1f41ca8402b..a448815d8fc2 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1821,23 +1821,6 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) } } -void intel_hdcp_transcoder_config(struct intel_connector *connector, - enum transcoder cpu_transcoder) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; - - if (!hdcp->shim) - return; - - if (INTEL_GEN(dev_priv) >= 12) { - mutex_lock(&hdcp->mutex); - hdcp->cpu_transcoder = cpu_transcoder; - hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); - mutex_unlock(&hdcp->mutex); - } -} - static inline int initialize_hdcp_port_data(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { @@ -1959,8 +1942,10 @@ int intel_hdcp_init(struct intel_connector *connector, return 0; } -int intel_hdcp_enable(struct intel_connector *connector, u8 content_type) +int intel_hdcp_enable(struct intel_connector *connector, + enum transcoder cpu_transcoder, u8 content_type) { + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; int ret = -EINVAL; @@ -1972,6 +1957,11 @@ int intel_hdcp_enable(struct intel_connector *connector, u8 content_type) WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = content_type; + if (INTEL_GEN(dev_priv) >= 12) { + hdcp->cpu_transcoder = cpu_transcoder; + hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); + } + /* * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup * is capable of HDCP2.2, it is preferred to use HDCP2.2. diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 41c1053d9e38..f3c3272e712a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -21,11 +21,10 @@ enum transcoder; void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); -void intel_hdcp_transcoder_config(struct intel_connector *connector, - enum transcoder cpu_transcoder); int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); -int intel_hdcp_enable(struct intel_connector *connector, u8 content_type); +int intel_hdcp_enable(struct intel_connector *connector, + enum transcoder cpu_transcoder, u8 content_type); int intel_hdcp_disable(struct intel_connector *connector); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); bool intel_hdcp_capable(struct intel_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index f6f5312205c4..f56fffc474fa 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2489,9 +2489,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return -EINVAL; } - intel_hdcp_transcoder_config(intel_hdmi->attached_connector, - pipe_config->cpu_transcoder); - return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 848ce07a8ec2..8a98a1aa7adc 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -279,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, struct i915_vma *vma) { enum pipe pipe = overlay->crtc->pipe; + struct intel_frontbuffer *from = NULL, *to = NULL; WARN_ON(overlay->old_vma); - intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL, - vma ? vma->obj->frontbuffer : NULL, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + if (overlay->vma) + from = intel_frontbuffer_get(overlay->vma->obj); + if (vma) + to = intel_frontbuffer_get(vma->obj); + + intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe)); + + if (to) + intel_frontbuffer_put(to); + if (from) + intel_frontbuffer_put(from); intel_frontbuffer_flip_prepare(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(pipe)); @@ -766,7 +775,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ret = PTR_ERR(vma); goto out_pin_section; } - intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB); + i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB); if (!overlay->active) { u32 oconfig; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 3d4f5775a4ba..25235ef630c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -9,16 +9,16 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" -static __always_inline u32 __busy_read_flag(u8 id) +static __always_inline u32 __busy_read_flag(u16 id) { - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffff0000u; GEM_BUG_ON(id >= 16); return 0x10000u << id; } -static __always_inline u32 __busy_write_id(u8 id) +static __always_inline u32 __busy_write_id(u16 id) { /* * The uABI guarantees an active writer is also amongst the read @@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id) * last_read - hence we always set both read and write busy for * last_write. */ - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffffffffu; return (id + 1) | __busy_read_flag(id); } static __always_inline unsigned int -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) +__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; @@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) return 0; /* Beware type-expansion follies! */ - BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); + BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); return flag(rq->engine->uabi_class); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index b9f504ba3b32..18ee708585a9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -20,7 +20,8 @@ static void __do_clflush(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_pages(obj)); drm_clflush_sg(obj->mm.pages); - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); } static int clflush_work(struct dma_fence_work *base) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 337ba17b1e0e..42385277c684 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -2167,8 +2167,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ext_data.fpriv = file->driver_priv; if (client_is_banned(ext_data.fpriv)) { DRM_DEBUG("client %s[%d] banned from creating ctx\n", - current->comm, - pid_nr(get_task_pid(current, PIDTYPE_PID))); + current->comm, task_pid_nr(current)); return -EIO; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 9937b4c341f1..f86400a191b0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -664,7 +664,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, i915_gem_object_unlock(obj); if (write_domain) - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); out_unpin: i915_gem_object_unpin_pages(obj); @@ -784,7 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, } out: - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); obj->mm.dirty = true; /* return with the pages pinned */ return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index f0998f1225af..bc3a67226163 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2694,6 +2694,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, err = eb_submit(&eb); err_request: add_to_client(eb.request, file); + i915_request_get(eb.request); i915_request_add(eb.request); if (fences) @@ -2709,6 +2710,7 @@ err_request: fput(out_fence->file); } } + i915_request_put(eb.request); err_batch_unpin: if (eb.batch_flags & I915_DISPATCH_SECURE) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index a50296cce0d8..a596548c07bf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -280,7 +280,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, for_each_ggtt_vma(vma, obj) intel_gt_flush_ggtt_writes(vma->vm->gt); - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); for_each_ggtt_vma(vma, obj) { if (vma->iomap) @@ -308,6 +308,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, obj->write_domain = 0; } +void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (front) { + intel_frontbuffer_flush(front, origin); + intel_frontbuffer_put(front); + } +} + +void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (front) { + intel_frontbuffer_invalidate(front, origin); + intel_frontbuffer_put(front); + } +} + void i915_gem_init__objects(struct drm_i915_private *i915) { INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 458cd51331f1..4b93591fd5c7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -13,8 +13,8 @@ #include <drm/i915_drm.h> +#include "display/intel_frontbuffer.h" #include "i915_gem_object_types.h" - #include "i915_gem_gtt.h" void i915_gem_init__objects(struct drm_i915_private *i915); @@ -463,4 +463,25 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); +void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin); +void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin); + +static inline void +i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + if (unlikely(rcu_access_pointer(obj->frontbuffer))) + __i915_gem_object_flush_frontbuffer(obj, origin); +} + +static inline void +i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + if (unlikely(rcu_access_pointer(obj->frontbuffer))) + __i915_gem_object_invalidate_frontbuffer(obj, origin); +} + #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 96008374a412..e3f3944fbd90 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -150,7 +150,7 @@ struct drm_i915_gem_object { */ u16 write_domain; - struct intel_frontbuffer *frontbuffer; + struct intel_frontbuffer __rcu *frontbuffer; /** Current tiling stride for the object, if it's tiled. */ unsigned int tiling_and_stride; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 4c72d74d6576..0dbb44d30885 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -402,7 +402,7 @@ struct get_pages_work { static struct sg_table * __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, - struct page **pvec, int num_pages) + struct page **pvec, unsigned long num_pages) { unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; @@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { struct get_pages_work *work = container_of(_work, typeof(*work), work); struct drm_i915_gem_object *obj = work->obj; - const int npages = obj->base.size >> PAGE_SHIFT; + const unsigned long npages = obj->base.size >> PAGE_SHIFT; + unsigned long pinned; struct page **pvec; - int pinned, ret; + int ret; ret = -ENOMEM; pinned = 0; @@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { - const int num_pages = obj->base.size >> PAGE_SHIFT; + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; struct mm_struct *mm = obj->userptr.mm->mm; struct page **pvec; struct sg_table *pages; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index ef7bc41ffffa..5b7ff3ccfa8e 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -123,6 +123,10 @@ static int __context_pin_state(struct i915_vma *vma) if (err) return err; + err = i915_active_acquire(&vma->active); + if (err) + goto err_unpin; + /* * And mark it as a globally pinned object to let the shrinker know * it cannot reclaim the object until we release it. @@ -131,14 +135,44 @@ static int __context_pin_state(struct i915_vma *vma) vma->obj->mm.dirty = true; return 0; + +err_unpin: + i915_vma_unpin(vma); + return err; } static void __context_unpin_state(struct i915_vma *vma) { i915_vma_make_shrinkable(vma); + i915_active_release(&vma->active); __i915_vma_unpin(vma); } +static int __ring_active(struct intel_ring *ring) +{ + int err; + + err = i915_active_acquire(&ring->vma->active); + if (err) + return err; + + err = intel_ring_pin(ring); + if (err) + goto err_active; + + return 0; + +err_active: + i915_active_release(&ring->vma->active); + return err; +} + +static void __ring_retire(struct intel_ring *ring) +{ + intel_ring_unpin(ring); + i915_active_release(&ring->vma->active); +} + __i915_active_call static void __intel_context_retire(struct i915_active *active) { @@ -151,7 +185,7 @@ static void __intel_context_retire(struct i915_active *active) __context_unpin_state(ce->state); intel_timeline_unpin(ce->timeline); - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); intel_context_put(ce); } @@ -163,7 +197,7 @@ static int __intel_context_active(struct i915_active *active) intel_context_get(ce); - err = intel_ring_pin(ce->ring); + err = __ring_active(ce->ring); if (err) goto err_put; @@ -183,7 +217,7 @@ static int __intel_context_active(struct i915_active *active) err_timeline: intel_timeline_unpin(ce->timeline); err_ring: - intel_ring_unpin(ce->ring); + __ring_retire(ce->ring); err_put: intel_context_put(ce); return err; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 17f1f1441efc..2b446474e010 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -274,8 +274,8 @@ struct intel_engine_cs { u8 class; u8 instance; - u8 uabi_class; - u8 uabi_instance; + u16 uabi_class; + u16 uabi_instance; u32 uabi_capabilities; u32 context_size; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index a459a42ad5c2..7e64b7d7d330 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -94,8 +94,9 @@ static int __gt_park(struct intel_wakeref *wf) intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); } + /* Defer dropping the display power well for 100ms, it's slow! */ GEM_BUG_ON(!wakeref); - intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); + intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref); i915_globals_park(); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 9fdefbdc3546..d925a1035c9d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -845,12 +845,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) } } -static void unwind_wa_tail(struct i915_request *rq) -{ - rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); - assert_ring_tail_valid(rq->ring, rq->tail); -} - static struct i915_request * __unwind_incomplete_requests(struct intel_engine_cs *engine) { @@ -863,12 +857,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_for_each_entry_safe_reverse(rq, rn, &engine->active.requests, sched.link) { - if (i915_request_completed(rq)) continue; /* XXX */ __i915_request_unsubmit(rq); - unwind_wa_tail(rq); /* * Push the request back into the queue for later resubmission. @@ -1161,13 +1153,29 @@ execlists_schedule_out(struct i915_request *rq) i915_request_put(rq); } -static u64 execlists_update_context(const struct i915_request *rq) +static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = rq->hw_context; - u64 desc; + u64 desc = ce->lrc_desc; + u32 tail; - ce->lrc_reg_state[CTX_RING_TAIL] = - intel_ring_set_tail(rq->ring, rq->tail); + /* + * WaIdleLiteRestore:bdw,skl + * + * We should never submit the context with the same RING_TAIL twice + * just in case we submit an empty ring, which confuses the HW. + * + * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of + * the normal request to be able to always advance the RING_TAIL on + * subsequent resubmissions (for lite restore). Should that fail us, + * and we try and submit the same tail again, force the context + * reload. + */ + tail = intel_ring_set_tail(rq->ring, rq->tail); + if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail)) + desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc_reg_state[CTX_RING_TAIL] = tail; + rq->tail = rq->wa_tail; /* * Make sure the context image is complete before we submit it to HW. @@ -1186,13 +1194,11 @@ static u64 execlists_update_context(const struct i915_request *rq) */ mb(); - desc = ce->lrc_desc; - ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; - /* Wa_1607138340:tgl */ if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0)) desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; return desc; } @@ -1703,16 +1709,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) return; } - - /* - * WaIdleLiteRestore:bdw,skl - * Apply the wa NOOPs to prevent - * ring:HEAD == rq:TAIL as we resubmit the - * request. See gen8_emit_fini_breadcrumb() for - * where we prepare the padding after the - * end of the request. - */ - last->tail = last->wa_tail; } } @@ -2668,6 +2664,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ + batch = gen8_emit_pipe_control(batch, + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE, + LRC_PPHWSP_SCRATCH_ADDR); + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); /* WaMediaPoolStateCmdInWABB:bxt,glk */ @@ -4120,17 +4124,18 @@ static void virtual_context_destroy(struct kref *kref) for (n = 0; n < ve->num_siblings; n++) { struct intel_engine_cs *sibling = ve->siblings[n]; struct rb_node *node = &ve->nodes[sibling->id].rb; + unsigned long flags; if (RB_EMPTY_NODE(node)) continue; - spin_lock_irq(&sibling->active.lock); + spin_lock_irqsave(&sibling->active.lock, flags); /* Detachment is lazily performed in the execlists tasklet */ if (!RB_EMPTY_NODE(node)) rb_erase_cached(node, &sibling->execlists.virtual); - spin_unlock_irq(&sibling->active.lock); + spin_unlock_irqrestore(&sibling->active.lock, flags); } GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); @@ -4419,9 +4424,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.gt = siblings[0]->gt; ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; + ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; /* * The decision on whether to submit a request using semaphores diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index a47d5a7c32c9..93026217c121 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -1413,14 +1413,6 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) int len; u32 *cs; - flags |= MI_MM_SPACE_GTT; - if (IS_HASWELL(i915)) - /* These flags are for resource streamer on HSW+ */ - flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; - else - /* We need to save the extended state for powersaving modes */ - flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; - len = 4; if (IS_GEN(i915, 7)) len += 2 + (num_engines ? 4 * num_engines + 6 : 0); @@ -1589,22 +1581,21 @@ static int switch_context(struct i915_request *rq) } if (ce->state) { - u32 hw_flags; + u32 flags; GEM_BUG_ON(rq->engine->id != RCS0); - /* - * The kernel context(s) is treated as pure scratch and is not - * expected to retain any state (as we sacrifice it during - * suspend and on resume it may be corrupted). This is ok, - * as nothing actually executes using the kernel context; it - * is purely used for flushing user contexts. - */ - hw_flags = 0; - if (i915_gem_context_is_kernel(rq->gem_context)) - hw_flags = MI_RESTORE_INHIBIT; + /* For resource streamer on HSW+ and power context elsewhere */ + BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); + BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); + + flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; + if (!i915_gem_context_is_kernel(rq->gem_context)) + flags |= MI_RESTORE_EXT_STATE_EN; + else + flags |= MI_RESTORE_INHIBIT; - ret = mi_set_context(rq, hw_flags); + ret = mi_set_context(rq, flags); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index e451298d11c3..2477a1e5a166 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -36,13 +36,32 @@ #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) +static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, + unsigned long size, + dma_addr_t dma_addr) +{ + int ret = 0; + + if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) + ret = -EINVAL; + + return ret; +} + +static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); +} + static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct intel_vgpu *vgpu; struct sg_table *st; struct scatterlist *sg; - int i, ret; + int i, j, ret; gen8_pte_t __iomem *gtt_entries; struct intel_vgpu_fb_info *fb_info; u32 page_num; @@ -51,6 +70,10 @@ static int vgpu_gem_get_pages( if (WARN_ON(!fb_info)) return -ENODEV; + vgpu = fb_info->obj->vgpu; + if (WARN_ON(!vgpu)) + return -ENODEV; + st = kmalloc(sizeof(*st), GFP_KERNEL); if (unlikely(!st)) return -ENOMEM; @@ -64,21 +87,53 @@ static int vgpu_gem_get_pages( gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + (fb_info->start >> PAGE_SHIFT); for_each_sg(st->sgl, sg, page_num, i) { + dma_addr_t dma_addr = + GEN8_DECODE_PTE(readq(>t_entries[i])); + if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { + ret = -EINVAL; + goto out; + } + sg->offset = 0; sg->length = PAGE_SIZE; - sg_dma_address(sg) = - GEN8_DECODE_PTE(readq(>t_entries[i])); sg_dma_len(sg) = PAGE_SIZE; + sg_dma_address(sg) = dma_addr; } __i915_gem_object_set_pages(obj, st, PAGE_SIZE); +out: + if (ret) { + dma_addr_t dma_addr; + + for_each_sg(st->sgl, sg, i, j) { + dma_addr = sg_dma_address(sg); + if (dma_addr) + vgpu_unpin_dma_address(vgpu, dma_addr); + } + sg_free_table(st); + kfree(st); + } + + return ret; - return 0; } static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { + struct scatterlist *sg; + + if (obj->base.dma_buf) { + struct intel_vgpu_fb_info *fb_info = obj->gvt_info; + struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; + struct intel_vgpu *vgpu = obj->vgpu; + int i; + + for_each_sg(pages->sgl, sg, fb_info->size, i) + vgpu_unpin_dma_address(vgpu, + sg_dma_address(sg)); + } + sg_free_table(pages); kfree(pages); } @@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); + i915_gem_object_set_readonly(obj); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bb9fe6bf5275..1043e6d564df 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); engine_mask |= BIT(VCS1); } + if (data & GEN9_GRDOM_GUC) { + gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); + vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; + } engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; } @@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } +static int guc_status_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, + unsigned int bytes) +{ + /* keep MIA_IN_RESET before clearing */ + read_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET; + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); + MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 4862fb12778e..b19a3b1ea4c1 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -62,6 +62,8 @@ struct intel_gvt_mpt { unsigned long size, dma_addr_t *dma_addr); void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 04a5a0d90823..3259a1fa69e1 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1916,6 +1916,28 @@ err_unlock: return ret; } +static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) +{ + struct kvmgt_guest_info *info; + struct gvt_dma *entry; + int ret = 0; + + if (!handle_valid(handle)) + return -ENODEV; + + info = (struct kvmgt_guest_info *)handle; + + mutex_lock(&info->vgpu->vdev.cache_lock); + entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + if (entry) + kref_get(&entry->ref); + else + ret = -ENOMEM; + mutex_unlock(&info->vgpu->vdev.cache_lock); + + return ret; +} + static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); @@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = { .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, + .dma_pin_guest_page = kvmgt_dma_pin_guest_page, .set_opregion = kvmgt_set_opregion, .set_edid = kvmgt_set_edid, .get_vfio_device = kvmgt_get_vfio_device, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 0f9440128123..9ad224df9c68 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -255,6 +255,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page( } /** + * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf + * @vgpu: a vGPU + * @dma_addr: guest dma addr + * + * Returns: + * 0 on success, negative error code if failed. + */ +static inline int +intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); +} + +/** * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * @vgpu: a vGPU * @gfn: guest PFN diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index d5a6e4e3d0fd..85bd9bf4f6ee 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) { - mutex_lock(&vgpu->gvt->lock); + mutex_lock(&vgpu->vgpu_lock); vgpu->active = true; - mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e29bc137e7ba..21aa08f55811 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1660,8 +1660,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9)) /* WaRsDisableCoarsePowerGating:skl,cnl */ -#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9)) +#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ + (IS_CANNONLAKE(dev_priv) || \ + IS_SKL_GT3(dev_priv) || \ + IS_SKL_GT4(dev_priv)) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b9eb6b3149b7..905890e3ac24 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -45,6 +45,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_pm.h" +#include "gt/intel_context.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" @@ -160,7 +161,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, * We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. */ - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); if (copy_from_user(vaddr, user_data, args->size)) return -EFAULT; @@ -168,7 +169,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, drm_clflush_virt_range(vaddr, args->size); intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); return 0; } @@ -588,7 +589,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, goto out_unpin; } - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); user_data = u64_to_user_ptr(args->data_ptr); offset = args->offset; @@ -630,7 +631,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, user_data += page_length; offset += page_length; } - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_unlock_fence(obj, fence); out_unpin: @@ -720,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, offset = 0; } - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_unlock_fence(obj, fence); return ret; @@ -1053,6 +1054,18 @@ out: return err; } +static int __intel_context_flush_retire(struct intel_context *ce) +{ + struct intel_timeline *tl; + + tl = intel_context_timeline_lock(ce); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + intel_context_timeline_unlock(tl); + return 0; +} + static int __intel_engines_record_defaults(struct intel_gt *gt) { struct i915_request *requests[I915_NUM_ENGINES] = {}; @@ -1121,13 +1134,20 @@ err_rq: if (!rq) continue; - /* We want to be able to unbind the state from the GGTT */ - GEM_BUG_ON(intel_context_is_pinned(rq->hw_context)); - + GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, + &rq->hw_context->flags)); state = rq->hw_context->state; if (!state) continue; + /* Serialise with retirement on another CPU */ + err = __intel_context_flush_retire(rq->hw_context); + if (err) + goto out; + + /* We want to be able to unbind the state from the GGTT */ + GEM_BUG_ON(intel_context_is_pinned(rq->hw_context)); + /* * As we will hold a reference to the logical state, it will * not be torn down with the context, and importantly the diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6239a9adbf14..d6ce57d30958 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { + GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; @@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { + GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); iter.dma += I915_GTT_PAGE_SIZE; @@ -2847,7 +2849,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) * readback check when writing GTT PTE entries. */ if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) - ggtt->gsm = ioremap_nocache(phys_addr, size); + ggtt->gsm = ioremap(phys_addr, size); else ggtt->gsm = ioremap_wc(phys_addr, size); if (!ggtt->gsm) { @@ -3304,7 +3306,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) static void ggtt_restore_mappings(struct i915_ggtt *ggtt) { - struct i915_vma *vma, *vn; + struct i915_vma *vma; bool flush = false; int open; @@ -3319,15 +3321,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) open = atomic_xchg(&ggtt->vm.open, 0); /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) continue; - if (!__i915_vma_unbind(vma)) - continue; - clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); WARN_ON(i915_vma_bind(vma, obj ? obj->cache_level : 0, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 65d7c2e599de..2ae14bc14931 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2078,20 +2078,12 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce, u32 *reg_state = ce->lrc_reg_state; int i; - if (IS_GEN(stream->perf->i915, 12)) { - u32 format = stream->oa_buffer.format; + reg_state[ctx_oactxctrl + 1] = + (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME; - reg_state[ctx_oactxctrl + 1] = - (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | - (stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0); - } else { - reg_state[ctx_oactxctrl + 1] = - (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | - (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME; - } - - for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++) + for (i = 0; i < ARRAY_SIZE(flex_regs); i++) reg_state[ctx_flexeu0 + i * 2 + 1] = oa_config_flex_reg(stream->oa_config, flex_regs[i]); @@ -2224,34 +2216,51 @@ static int gen8_configure_context(struct i915_gem_context *ctx, return err; } -static int gen12_emit_oar_config(struct intel_context *ce, bool enable) +static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable) { - struct i915_request *rq; - u32 *cs; - int err = 0; - - rq = i915_request_create(ce); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto out; - } - - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base)); - *cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, - enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0); - *cs++ = MI_NOOP; + int err; + struct intel_context *ce = stream->pinned_ctx; + u32 format = stream->oa_buffer.format; + struct flex regs_context[] = { + { + GEN8_OACTXCONTROL, + stream->perf->ctx_oactxctrl_offset + 1, + enable ? GEN8_OA_COUNTER_RESUME : 0, + }, + }; + /* Offsets in regs_lri are not used since this configuration is only + * applied using LRI. Initialize the correct offsets for posterity. + */ +#define GEN12_OAR_OACONTROL_OFFSET 0x5B0 + struct flex regs_lri[] = { + { + GEN12_OAR_OACONTROL, + GEN12_OAR_OACONTROL_OFFSET + 1, + (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | + (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) + }, + { + RING_CONTEXT_CONTROL(ce->engine->mmio_base), + CTX_CONTEXT_CONTROL, + _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, + enable ? + GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : + 0) + }, + }; - intel_ring_advance(rq, cs); + /* Modify the context image of pinned context with regs_context*/ + err = intel_context_lock_pinned(ce); + if (err) + return err; -out: - i915_request_add(rq); + err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)); + intel_context_unlock_pinned(ce); + if (err) + return err; - return err; + /* Apply regs_lri using LRI with pinned context */ + return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri)); } /* @@ -2277,53 +2286,16 @@ out: * per-context OA state. * * Note: it's only the RCS/Render context that has any OA state. + * Note: the first flex register passed must always be R_PWR_CLK_STATE */ -static int lrc_configure_all_contexts(struct i915_perf_stream *stream, - const struct i915_oa_config *oa_config) +static int oa_configure_all_contexts(struct i915_perf_stream *stream, + struct flex *regs, + size_t num_regs) { struct drm_i915_private *i915 = stream->perf->i915; - /* The MMIO offsets for Flex EU registers aren't contiguous */ - const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; -#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) - struct flex regs[] = { - { - GEN8_R_PWR_CLK_STATE, - CTX_R_PWR_CLK_STATE, - }, - { - IS_GEN(i915, 12) ? - GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL, - stream->perf->ctx_oactxctrl_offset + 1, - }, - { EU_PERF_CNTL0, ctx_flexeuN(0) }, - { EU_PERF_CNTL1, ctx_flexeuN(1) }, - { EU_PERF_CNTL2, ctx_flexeuN(2) }, - { EU_PERF_CNTL3, ctx_flexeuN(3) }, - { EU_PERF_CNTL4, ctx_flexeuN(4) }, - { EU_PERF_CNTL5, ctx_flexeuN(5) }, - { EU_PERF_CNTL6, ctx_flexeuN(6) }, - }; -#undef ctx_flexeuN struct intel_engine_cs *engine; struct i915_gem_context *ctx, *cn; - size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs); - int i, err; - - if (IS_GEN(i915, 12)) { - u32 format = stream->oa_buffer.format; - - regs[1].value = - (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | - (oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0); - } else { - regs[1].value = - (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | - (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME; - } - - for (i = 2; !!ctx_flexeu0 && i < array_size; i++) - regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); + int err; lockdep_assert_held(&stream->perf->lock); @@ -2353,7 +2325,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, spin_unlock(&i915->gem.contexts.lock); - err = gen8_configure_context(ctx, regs, array_size); + err = gen8_configure_context(ctx, regs, num_regs); if (err) { i915_gem_context_put(ctx); return err; @@ -2378,7 +2350,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); - err = gen8_modify_self(ce, regs, array_size); + err = gen8_modify_self(ce, regs, num_regs); if (err) return err; } @@ -2386,6 +2358,56 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, return 0; } +static int gen12_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config) +{ + struct flex regs[] = { + { + GEN8_R_PWR_CLK_STATE, + CTX_R_PWR_CLK_STATE, + }, + }; + + return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); +} + +static int lrc_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config) +{ + /* The MMIO offsets for Flex EU registers aren't contiguous */ + const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; +#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) + struct flex regs[] = { + { + GEN8_R_PWR_CLK_STATE, + CTX_R_PWR_CLK_STATE, + }, + { + GEN8_OACTXCONTROL, + stream->perf->ctx_oactxctrl_offset + 1, + }, + { EU_PERF_CNTL0, ctx_flexeuN(0) }, + { EU_PERF_CNTL1, ctx_flexeuN(1) }, + { EU_PERF_CNTL2, ctx_flexeuN(2) }, + { EU_PERF_CNTL3, ctx_flexeuN(3) }, + { EU_PERF_CNTL4, ctx_flexeuN(4) }, + { EU_PERF_CNTL5, ctx_flexeuN(5) }, + { EU_PERF_CNTL6, ctx_flexeuN(6) }, + }; +#undef ctx_flexeuN + int i; + + regs[1].value = + (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME; + + for (i = 2; i < ARRAY_SIZE(regs); i++) + regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); + + return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); +} + static int gen8_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; @@ -2464,7 +2486,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = lrc_configure_all_contexts(stream, oa_config); + ret = gen12_configure_all_contexts(stream, oa_config); if (ret) return ret; @@ -2474,8 +2496,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) * requested this. */ if (stream->ctx) { - ret = gen12_emit_oar_config(stream->pinned_ctx, - oa_config != NULL); + ret = gen12_configure_oar_context(stream, true); if (ret) return ret; } @@ -2509,11 +2530,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream) struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - lrc_configure_all_contexts(stream, NULL); + gen12_configure_all_contexts(stream, NULL); /* disable the context save/restore or OAR counters */ if (stream->ctx) - gen12_emit_oar_config(stream->pinned_ctx, false); + gen12_configure_oar_context(stream, false); /* Make sure we disable noa to save power. */ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); @@ -2713,7 +2734,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return -EINVAL; } - if (!(props->sample_flags & SAMPLE_OA_REPORT)) { + if (!(props->sample_flags & SAMPLE_OA_REPORT) && + (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) { DRM_DEBUG("Only OA report sampling supported\n"); return -EINVAL; } @@ -2745,7 +2767,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, format_size = perf->oa_formats[props->oa_format].size; - stream->sample_flags |= SAMPLE_OA_REPORT; + stream->sample_flags = props->sample_flags; stream->sample_size += format_size; stream->oa_buffer.format_size = format_size; @@ -2854,7 +2876,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce, return; stream = engine->i915->perf.exclusive_stream; - if (stream) + /* + * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller + * is already doing that, so nothing to be done for gen12 here. + */ + if (stream && INTEL_GEN(stream->perf->i915) < 12) gen8_update_reg_state_unlocked(ce, stream); } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 2814218c5ba1..d6d2e6fb8674 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -144,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt) return ktime_to_ns(ktime_sub(ktime_get(), kt)); } -static u64 __pmu_estimate_rc6(struct i915_pmu *pmu) -{ - u64 val; - - /* - * We think we are runtime suspended. - * - * Report the delta from when the device was suspended to now, - * on top of the last known real value, as the approximated RC6 - * counter value. - */ - val = ktime_since(pmu->sleep_last); - val += pmu->sample[__I915_SAMPLE_RC6].cur; - - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; - - return val; -} - -static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val) -{ - /* - * If we are coming back from being runtime suspended we must - * be careful not to report a larger value than returned - * previously. - */ - if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; - pmu->sample[__I915_SAMPLE_RC6].cur = val; - } else { - val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } - - return val; -} - static u64 get_rc6(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; struct i915_pmu *pmu = &i915->pmu; unsigned long flags; + bool awake = false; u64 val; - val = 0; if (intel_gt_pm_get_if_awake(gt)) { val = __get_rc6(gt); intel_gt_pm_put_async(gt); + awake = true; } spin_lock_irqsave(&pmu->lock, flags); - if (val) - val = __pmu_update_rc6(pmu, val); + if (awake) { + pmu->sample[__I915_SAMPLE_RC6].cur = val; + } else { + /* + * We think we are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. + */ + val = ktime_since(pmu->sleep_last); + val += pmu->sample[__I915_SAMPLE_RC6].cur; + } + + if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) + val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; else - val = __pmu_estimate_rc6(pmu); + pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; spin_unlock_irqrestore(&pmu->lock, flags); @@ -210,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915) struct i915_pmu *pmu = &i915->pmu; if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) - __pmu_update_rc6(pmu, __get_rc6(&i915->gt)); + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); pmu->sleep_last = ktime_get(); } -static void unpark_rc6(struct drm_i915_private *i915) -{ - struct i915_pmu *pmu = &i915->pmu; - - /* Estimate how long we slept and accumulate that into rc6 counters */ - if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) - __pmu_estimate_rc6(pmu); -} - #else static u64 get_rc6(struct intel_gt *gt) @@ -232,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt) } static void park_rc6(struct drm_i915_private *i915) {} -static void unpark_rc6(struct drm_i915_private *i915) {} #endif @@ -281,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) */ __i915_pmu_maybe_start_timer(pmu); - unpark_rc6(i915); - spin_unlock_irq(&pmu->lock); } @@ -1107,12 +1074,17 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; - if (!is_igp(i915)) + if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, - "i915-%s", + "i915_%s", dev_name(i915->drm.dev)); - else + if (pmu->name) { + /* tools/perf reserves colons as special. */ + strreplace((char *)pmu->name, ':', '_'); + } + } else { pmu->name = "i915"; + } if (!pmu->name) goto err; diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index bf52e3983631..6c1647c5daf2 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -18,7 +18,7 @@ enum { __I915_SAMPLE_FREQ_ACT = 0, __I915_SAMPLE_FREQ_REQ, __I915_SAMPLE_RC6, - __I915_SAMPLE_RC6_ESTIMATED, + __I915_SAMPLE_RC6_LAST_REPORTED, __I915_NUM_PMU_SAMPLERS }; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 73079b503724..094011b8f64d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4177,7 +4177,13 @@ enum { #define CPSSUNIT_CLKGATE_DIS REG_BIT(9) #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) -#define VFUNIT_CLKGATE_DIS (1 << 20) +#define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define HSUNIT_CLKGATE_DIS REG_BIT(8) +#define VSUNIT_CLKGATE_DIS REG_BIT(3) + +#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) +#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) +#define PSDUNIT_CLKGATE_DIS REG_BIT(5) #define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560) #define CGPSF_CLKGATE_DIS (1 << 3) @@ -9405,11 +9411,9 @@ enum skl_power_gate { #define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) #define _ICL_AUX_ANAOVRD1_A 0x162398 #define _ICL_AUX_ANAOVRD1_B 0x6C398 -#define _TGL_AUX_ANAOVRD1_C 0x160398 #define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \ _ICL_AUX_ANAOVRD1_A, \ - _ICL_AUX_ANAOVRD1_B, \ - _TGL_AUX_ANAOVRD1_C)) + _ICL_AUX_ANAOVRD1_B)) #define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) #define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) @@ -11994,7 +11998,7 @@ enum skl_power_gate { /* This register controls the Display State Buffer (DSB) engines. */ #define _DSBSL_INSTANCE_BASE 0x70B00 #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ - (pipe) * 0x1000 + (id) * 100) + (pipe) * 0x1000 + (id) * 0x100) #define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) #define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index bbd71af00a91..765bec89fc0d 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq) } static int -__i915_request_await_execution(struct i915_request *rq, - struct i915_request *signal, - void (*hook)(struct i915_request *rq, - struct dma_fence *signal), - gfp_t gfp) +__await_execution(struct i915_request *rq, + struct i915_request *signal, + void (*hook)(struct i915_request *rq, + struct dma_fence *signal), + gfp_t gfp) { struct execute_cb *cb; @@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq, } spin_unlock_irq(&signal->lock); + /* Copy across semaphore status as we need the same behaviour */ + rq->sched.flags |= signal->sched.flags; return 0; } @@ -811,31 +813,21 @@ already_busywaiting(struct i915_request *rq) } static int -emit_semaphore_wait(struct i915_request *to, - struct i915_request *from, - gfp_t gfp) +__emit_semaphore_wait(struct i915_request *to, + struct i915_request *from, + u32 seqno) { const int has_token = INTEL_GEN(to->i915) >= 12; u32 hwsp_offset; - int len; + int len, err; u32 *cs; GEM_BUG_ON(INTEL_GEN(to->i915) < 8); - /* Just emit the first semaphore we see as request space is limited. */ - if (already_busywaiting(to) & from->engine->mask) - goto await_fence; - - if (i915_request_await_start(to, from) < 0) - goto await_fence; - - /* Only submit our spinner after the signaler is running! */ - if (__i915_request_await_execution(to, from, NULL, gfp)) - goto await_fence; - /* We need to pin the signaler's HWSP until we are finished reading. */ - if (intel_timeline_read_hwsp(from, to, &hwsp_offset)) - goto await_fence; + err = intel_timeline_read_hwsp(from, to, &hwsp_offset); + if (err) + return err; len = 4; if (has_token) @@ -858,7 +850,7 @@ emit_semaphore_wait(struct i915_request *to, MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_GTE_SDD) + has_token; - *cs++ = from->fence.seqno; + *cs++ = seqno; *cs++ = hwsp_offset; *cs++ = 0; if (has_token) { @@ -867,6 +859,28 @@ emit_semaphore_wait(struct i915_request *to, } intel_ring_advance(to, cs); + return 0; +} + +static int +emit_semaphore_wait(struct i915_request *to, + struct i915_request *from, + gfp_t gfp) +{ + /* Just emit the first semaphore we see as request space is limited. */ + if (already_busywaiting(to) & from->engine->mask) + goto await_fence; + + if (i915_request_await_start(to, from) < 0) + goto await_fence; + + /* Only submit our spinner after the signaler is running! */ + if (__await_execution(to, from, NULL, gfp)) + goto await_fence; + + if (__emit_semaphore_wait(to, from, from->fence.seqno)) + goto await_fence; + to->sched.semaphores |= from->engine->mask; to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; return 0; @@ -980,6 +994,57 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) return 0; } +static bool intel_timeline_sync_has_start(struct intel_timeline *tl, + struct dma_fence *fence) +{ + return __intel_timeline_sync_is_later(tl, + fence->context, + fence->seqno - 1); +} + +static int intel_timeline_sync_set_start(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1); +} + +static int +__i915_request_await_execution(struct i915_request *to, + struct i915_request *from, + void (*hook)(struct i915_request *rq, + struct dma_fence *signal)) +{ + int err; + + /* Submit both requests at the same time */ + err = __await_execution(to, from, hook, I915_FENCE_GFP); + if (err) + return err; + + /* Squash repeated depenendices to the same timelines */ + if (intel_timeline_sync_has_start(i915_request_timeline(to), + &from->fence)) + return 0; + + /* Ensure both start together [after all semaphores in signal] */ + if (intel_engine_has_semaphores(to->engine)) + err = __emit_semaphore_wait(to, from, from->fence.seqno - 1); + else + err = i915_request_await_start(to, from); + if (err < 0) + return err; + + /* Couple the dependency tree for PI on this exposed to->fence */ + if (to->engine->schedule) { + err = i915_sched_node_add_dependency(&to->sched, &from->sched); + if (err < 0) + return err; + } + + return intel_timeline_sync_set_start(i915_request_timeline(to), + &from->fence); +} + int i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence, @@ -1013,8 +1078,7 @@ i915_request_await_execution(struct i915_request *rq, if (dma_fence_is_i915(fence)) ret = __i915_request_await_execution(rq, to_request(fence), - hook, - I915_FENCE_GFP); + hook); else ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, I915_FENCE_TIMEOUT, diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 010d67f48ad9..247a9671bca5 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -474,7 +474,6 @@ void i915_sched_node_fini(struct i915_sched_node *node) * so we may be called out-of-order. */ list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { - GEM_BUG_ON(!node_signaled(dep->signaler)); GEM_BUG_ON(!list_empty(&dep->dfs_link)); list_del(&dep->wait_link); diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c index 07552cd544f2..8538ee7a521d 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c @@ -78,12 +78,11 @@ static const struct dma_fence_ops fence_ops = { void dma_fence_work_init(struct dma_fence_work *f, const struct dma_fence_work_ops *ops) { + f->ops = ops; spin_lock_init(&f->lock); dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0); i915_sw_fence_init(&f->chain, fence_notify); INIT_WORK(&f->work, fence_work); - - f->ops = ops; } int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index e5512f26e20a..01c822256b39 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1104,8 +1104,14 @@ int i915_vma_move_to_active(struct i915_vma *vma, return err; if (flags & EXEC_OBJECT_WRITE) { - if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS)) - i915_active_add_request(&obj->frontbuffer->write, rq); + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (unlikely(front)) { + if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) + i915_active_add_request(&front->write, rq); + intel_frontbuffer_put(front); + } dma_resv_add_excl_fence(vma->resv, &rq->fence); obj->write_domain = I915_GEM_DOMAIN_RENDER; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 809bff955b5a..86379eddc908 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4291,8 +4291,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { - if (WARN_ON(wm->wm[level].min_ddb_alloc > - total[PLANE_CURSOR])) { + if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) { + WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX); blocks = U32_MAX; break; } @@ -6565,6 +6565,17 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) /* WaEnable32PlaneMode:icl */ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); + + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, + 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, + 0, PSDUNIT_CLKGATE_DIS); } static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 35cc69a3a1b9..05364eca20f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -25,6 +25,7 @@ #ifndef __I915_SELFTESTS_RANDOM_H__ #define __I915_SELFTESTS_RANDOM_H__ +#include <linux/math64.h> #include <linux/random.h> #include "../i915_selftest.h" diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index d6214d3c8b33..ef4c630afe3f 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -935,11 +935,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, for_each_available_child_of_node(dev->of_node, child) { panel = of_drm_find_panel(child); if (IS_ERR(panel)) { - dev_err(dev, "failed to find panel try bridge (%lu)\n", + dev_err(dev, "failed to find panel try bridge (%ld)\n", PTR_ERR(panel)); + panel = NULL; + bridge = of_drm_find_bridge(child); if (IS_ERR(bridge)) { - dev_err(dev, "failed to find bridge (%lu)\n", + dev_err(dev, "failed to find bridge (%ld)\n", PTR_ERR(bridge)); return PTR_ERR(bridge); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index f80a8ba75977..3305a94fc930 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -215,11 +215,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *comp; int i, count = 0; + unsigned int local_index = plane - mtk_crtc->planes; for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { comp = mtk_crtc->ddp_comp[i]; - if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { - *local_layer = plane->index - count; + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = local_index - count; return comp; } count += mtk_ddp_comp_layer_nr(comp); @@ -310,7 +311,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) plane_state = to_mtk_plane_state(plane->state); comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state); } return 0; @@ -386,8 +389,9 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state); plane_state->pending.config = false; } mtk_crtc->pending_planes = false; @@ -401,7 +405,9 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_ddp_comp *comp; comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); - return mtk_ddp_comp_layer_check(comp, local_layer, state); + if (comp) + return mtk_ddp_comp_layer_check(comp, local_layer, state); + return 0; } static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index e9931bbbe846..d77c9f484ce3 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) { u32 timcon0, timcon1, timcon2, timcon3; - u32 ui, cycle_time; + u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000); struct mtk_phy_timing *timing = &dsi->phy_timing; - ui = DIV_ROUND_UP(1000000000, dsi->data_rate); - cycle_time = div_u64(8000000000ULL, dsi->data_rate); + timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; + timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; + timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - + timing->da_hs_prepare; + timing->da_hs_trail = timing->da_hs_prepare + 1; - timing->lpx = NS_TO_CYCLE(60, cycle_time); - timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time); - timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time); - timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time); + timing->ta_go = 4 * timing->lpx - 2; + timing->ta_sure = timing->lpx + 2; + timing->ta_get = 4 * timing->lpx; + timing->da_hs_exit = 2 * timing->lpx + 1; - timing->ta_go = 4 * timing->lpx; - timing->ta_sure = 3 * timing->lpx / 2; - timing->ta_get = 5 * timing->lpx; - timing->da_hs_exit = 2 * timing->lpx; - - timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time); - timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10; - - timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time); - timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time); - timing->clk_hs_exit = 2 * timing->lpx; + timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); + timing->clk_hs_post = timing->clk_hs_prepare + 8; + timing->clk_hs_trail = timing->clk_hs_prepare; + timing->clk_hs_zero = timing->clk_hs_trail * 4; + timing->clk_hs_exit = 2 * timing->clk_hs_trail; timcon0 = timing->lpx | timing->da_hs_prepare << 8 | timing->da_hs_zero << 16 | timing->da_hs_trail << 24; @@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) dsi_tmp_buf_bpp - 10); data_phy_cycles = timing->lpx + timing->da_hs_prepare + - timing->da_hs_zero + timing->da_hs_exit + 2; + timing->da_hs_zero + timing->da_hs_exit + 3; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { - if (vm->hfront_porch * dsi_tmp_buf_bpp > + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > data_phy_cycles * dsi->lanes + 18) { - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp - - data_phy_cycles * - dsi->lanes - 18; + horizontal_frontporch_byte = + vm->hfront_porch * dsi_tmp_buf_bpp - + (data_phy_cycles * dsi->lanes + 18) * + vm->hfront_porch / + (vm->hfront_porch + vm->hback_porch); + + horizontal_backporch_byte = + horizontal_backporch_byte - + (data_phy_cycles * dsi->lanes + 18) * + vm->hback_porch / + (vm->hfront_porch + vm->hback_porch); } else { DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; } } else { - if (vm->hfront_porch * dsi_tmp_buf_bpp > + if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > data_phy_cycles * dsi->lanes + 12) { - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp - - data_phy_cycles * - dsi->lanes - 12; + horizontal_frontporch_byte = + vm->hfront_porch * dsi_tmp_buf_bpp - + (data_phy_cycles * dsi->lanes + 12) * + vm->hfront_porch / + (vm->hfront_porch + vm->hback_porch); + horizontal_backporch_byte = horizontal_backporch_byte - + (data_phy_cycles * dsi->lanes + 12) * + vm->hback_porch / + (vm->hfront_porch + vm->hback_porch); } else { DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); horizontal_frontporch_byte = vm->hfront_porch * diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index 9ab27aecfcf3..1bd6b6d15ffb 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -64,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { }, }; +static const struct meson_cvbs_mode * +meson_cvbs_get_mode(const struct drm_display_mode *req_mode) +{ + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + if (drm_mode_match(req_mode, &meson_mode->mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return meson_mode; + } + + return NULL; +} + /* Connector */ static void meson_cvbs_connector_destroy(struct drm_connector *connector) @@ -136,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode)) - return 0; - } + if (meson_cvbs_get_mode(&crtc_state->mode)) + return 0; return -EINVAL; } @@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode); struct meson_venc_cvbs *meson_venc_cvbs = encoder_to_meson_venc_cvbs(encoder); struct meson_drm *priv = meson_venc_cvbs->priv; - int i; - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + if (meson_mode) { + meson_venci_cvbs_mode_set(priv, meson_mode->enci); - if (drm_mode_equal(mode, &meson_mode->mode)) { - meson_venci_cvbs_mode_set(priv, - meson_mode->enci); - - /* Setup 27MHz vclk2 for ENCI and VDAC */ - meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - MESON_VCLK_CVBS, true); - break; - } + /* Setup 27MHz vclk2 for ENCI and VDAC */ + meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, true); } } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index d43951caeea0..b113876c2428 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -30,9 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400); static struct drm_driver driver; static const struct pci_device_id pciidlist[] = { - { PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0, + { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD}, - { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A }, { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B }, { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV }, { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB }, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c84f0a8b3f2c..ac678ace09a3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, size = resource_size(res); - ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); + ptr = devm_ioremap(&pdev->dev, res->start, size); if (!ptr) { DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h index 43df86c38f58..24f7700768da 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/atom.h +++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h @@ -114,6 +114,7 @@ struct nv50_head_atom { u8 nhsync:1; u8 nvsync:1; u8 depth:4; + u8 bpc; } or; /* Currently only used for MST */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 549486f1d937..63425e246018 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder, * same size as the native one (e.g. different * refresh rate) */ - if (adjusted_mode->hdisplay == native_mode->hdisplay && - adjusted_mode->vdisplay == native_mode->vdisplay && - adjusted_mode->type & DRM_MODE_TYPE_DRIVER) + if (mode->hdisplay == native_mode->hdisplay && + mode->vdisplay == native_mode->vdisplay && + mode->type & DRM_MODE_TYPE_DRIVER) break; mode = native_mode; asyc->scaler.full = true; @@ -353,10 +353,20 @@ nv50_outp_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct nouveau_connector *nv_connector = - nouveau_connector(conn_state->connector); - return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, - nv_connector->native_mode); + struct drm_connector *connector = conn_state->connector; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); + int ret; + + ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, + nv_connector->native_mode); + if (ret) + return ret; + + if (crtc_state->mode_changed || crtc_state->connectors_changed) + asyh->or.bpc = connector->display_info.bpc; + + return 0; } /****************************************************************************** @@ -770,32 +780,54 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, struct nv50_mstm *mstm = mstc->mstm; struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); int slots; + int ret; + + ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, + mstc->native); + if (ret) + return ret; + + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) + return 0; + + /* + * When restoring duplicated states, we need to make sure that the bw + * remains the same and avoid recalculating it, as the connector's bpc + * may have changed after the state was duplicated + */ + if (!state->duplicated) { + const int clock = crtc_state->adjusted_mode.clock; - if (crtc_state->mode_changed || crtc_state->connectors_changed) { /* - * When restoring duplicated states, we need to make sure that - * the bw remains the same and avoid recalculating it, as the - * connector's bpc may have changed after the state was - * duplicated + * XXX: Since we don't use HDR in userspace quite yet, limit + * the bpc to 8 to save bandwidth on the topology. In the + * future, we'll want to properly fix this by dynamically + * selecting the highest possible bpc that would fit in the + * topology */ - if (!state->duplicated) { - const int bpp = connector->display_info.bpc * 3; - const int clock = crtc_state->adjusted_mode.clock; + asyh->or.bpc = min(connector->display_info.bpc, 8U); + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3); + } - asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp); - } + slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, + asyh->dp.pbn); + if (slots < 0) + return slots; - slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, - mstc->port, - asyh->dp.pbn); - if (slots < 0) - return slots; + asyh->dp.tu = slots; - asyh->dp.tu = slots; - } + return 0; +} - return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, - mstc->native); +static u8 +nv50_dp_bpc_to_depth(unsigned int bpc) +{ + switch (bpc) { + case 6: return 0x2; + case 8: return 0x5; + case 10: /* fall-through */ + default: return 0x6; + } } static void @@ -808,7 +840,7 @@ nv50_msto_enable(struct drm_encoder *encoder) struct nv50_mstm *mstm = NULL; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - u8 proto, depth; + u8 proto; bool r; drm_connector_list_iter_begin(encoder->dev, &conn_iter); @@ -837,14 +869,8 @@ nv50_msto_enable(struct drm_encoder *encoder) else proto = 0x9; - switch (mstc->connector.display_info.bpc) { - case 6: depth = 0x2; break; - case 8: depth = 0x5; break; - case 10: - default: depth = 0x6; break; - } - - mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth); + mstm->outp->update(mstm->outp, head->base.index, armh, proto, + nv50_dp_bpc_to_depth(armh->or.bpc)); msto->head = head; msto->mstc = mstc; @@ -1498,20 +1524,14 @@ nv50_sor_enable(struct drm_encoder *encoder) lvds.lvds.script |= 0x0200; } - if (nv_connector->base.display_info.bpc == 8) + if (asyh->or.bpc == 8) lvds.lvds.script |= 0x0200; } nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds)); break; case DCB_OUTPUT_DP: - if (nv_connector->base.display_info.bpc == 6) - depth = 0x2; - else - if (nv_connector->base.display_info.bpc == 8) - depth = 0x5; - else - depth = 0x6; + depth = nv50_dp_bpc_to_depth(asyh->or.bpc); if (nv_encoder->link & 1) proto = 0x8; @@ -1662,7 +1682,7 @@ nv50_pior_enable(struct drm_encoder *encoder) nv50_outp_acquire(nv_encoder); nv_connector = nouveau_encoder_connector_get(nv_encoder); - switch (nv_connector->base.display_info.bpc) { + switch (asyh->or.bpc) { case 10: asyh->or.depth = 0x6; break; case 8: asyh->or.depth = 0x5; break; case 6: asyh->or.depth = 0x2; break; diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 71c23bf1fe25..c9692df2b76c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh, struct nv50_head_atom *asyh, struct nouveau_conn_atom *asyc) { - struct drm_connector *connector = asyc->state.connector; u32 mode = 0x00; if (asyc->dither.mode == DITHERING_MODE_AUTO) { - if (asyh->base.depth > connector->display_info.bpc * 3) + if (asyh->base.depth > asyh->or.bpc * 3) mode = DITHERING_MODE_DYNAMIC2X2; } else { mode = asyc->dither.mode; } if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { - if (connector->display_info.bpc >= 8) + if (asyh->or.bpc >= 8) mode |= DITHERING_DEPTH_8BPC; } else { mode |= asyc->dither.depth; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 5b413588b823..9a9a7f5003d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector) void nouveau_conn_reset(struct drm_connector *connector) { + struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_conn_atom *asyc; - if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) - return; + if (drm_drv_uses_atomic_modeset(connector->dev)) { + if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) + return; + + if (connector->state) + nouveau_conn_atomic_destroy_state(connector, + connector->state); + + __drm_atomic_helper_connector_reset(connector, &asyc->state); + } else { + asyc = &nv_connector->properties_state; + } - if (connector->state) - nouveau_conn_atomic_destroy_state(connector, connector->state); - __drm_atomic_helper_connector_reset(connector, &asyc->state); asyc->dither.mode = DITHERING_MODE_AUTO; asyc->dither.depth = DITHERING_DEPTH_AUTO; asyc->scaler.mode = DRM_MODE_SCALE_NONE; @@ -276,8 +284,14 @@ void nouveau_conn_attach_properties(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state); struct nouveau_display *disp = nouveau_display(dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_conn_atom *armc; + + if (drm_drv_uses_atomic_modeset(connector->dev)) + armc = nouveau_conn_atom(connector->state); + else + armc = &nv_connector->properties_state; /* Init DVI-I specific properties. */ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) @@ -748,9 +762,9 @@ static int nouveau_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { - struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state); struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct nouveau_conn_atom *asyc = &nv_connector->properties_state; struct drm_encoder *encoder = to_drm_encoder(nv_encoder); int ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index f43a8d63aef8..de84fb4708c7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -29,6 +29,7 @@ #include <nvif/notify.h> +#include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_dp_helper.h> @@ -44,6 +45,60 @@ struct dcb_output; struct nouveau_backlight; #endif +#define nouveau_conn_atom(p) \ + container_of((p), struct nouveau_conn_atom, state) + +struct nouveau_conn_atom { + struct drm_connector_state state; + + struct { + /* The enum values specifically defined here match nv50/gf119 + * hw values, and the code relies on this. + */ + enum { + DITHERING_MODE_OFF = 0x00, + DITHERING_MODE_ON = 0x01, + DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, + DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, + DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, + DITHERING_MODE_AUTO + } mode; + enum { + DITHERING_DEPTH_6BPC = 0x00, + DITHERING_DEPTH_8BPC = 0x02, + DITHERING_DEPTH_AUTO + } depth; + } dither; + + struct { + int mode; /* DRM_MODE_SCALE_* */ + struct { + enum { + UNDERSCAN_OFF, + UNDERSCAN_ON, + UNDERSCAN_AUTO, + } mode; + u32 hborder; + u32 vborder; + } underscan; + bool full; + } scaler; + + struct { + int color_vibrance; + int vibrant_hue; + } procamp; + + union { + struct { + bool dither:1; + bool scaler:1; + bool procamp:1; + }; + u8 mask; + } set; +}; + struct nouveau_connector { struct drm_connector base; enum dcb_connector_type type; @@ -63,6 +118,12 @@ struct nouveau_connector { #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT struct nouveau_backlight *backlight; #endif + /* + * Our connector property code expects a nouveau_conn_atom struct + * even on pre-nv50 where we do not support atomic. This embedded + * version gets used in the non atomic modeset case. + */ + struct nouveau_conn_atom properties_state; }; static inline struct nouveau_connector *nouveau_connector( @@ -121,61 +182,6 @@ extern int nouveau_ignorelid; extern int nouveau_duallink; extern int nouveau_hdmimhz; -#include <drm/drm_crtc.h> -#define nouveau_conn_atom(p) \ - container_of((p), struct nouveau_conn_atom, state) - -struct nouveau_conn_atom { - struct drm_connector_state state; - - struct { - /* The enum values specifically defined here match nv50/gf119 - * hw values, and the code relies on this. - */ - enum { - DITHERING_MODE_OFF = 0x00, - DITHERING_MODE_ON = 0x01, - DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, - DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, - DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, - DITHERING_MODE_AUTO - } mode; - enum { - DITHERING_DEPTH_6BPC = 0x00, - DITHERING_DEPTH_8BPC = 0x02, - DITHERING_DEPTH_AUTO - } depth; - } dither; - - struct { - int mode; /* DRM_MODE_SCALE_* */ - struct { - enum { - UNDERSCAN_OFF, - UNDERSCAN_ON, - UNDERSCAN_AUTO, - } mode; - u32 hborder; - u32 vborder; - } underscan; - bool full; - } scaler; - - struct { - int color_vibrance; - int vibrant_hue; - } procamp; - - union { - struct { - bool dither:1; - bool scaler:1; - bool procamp:1; - }; - u8 mask; - } set; -}; - void nouveau_conn_attach_properties(struct drm_connector *); void nouveau_conn_reset(struct drm_connector *); struct drm_connector_state * diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 4c4e8a30a1ac..536ba93b0f46 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -18,15 +18,18 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev); static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct panfrost_device *pfdev = dev_get_drvdata(dev); + struct dev_pm_opp *opp; int err; + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) + return PTR_ERR(opp); + dev_pm_opp_put(opp); + err = dev_pm_opp_set_rate(dev, *freq); if (err) return err; - *freq = clk_get_rate(pfdev->clock); - return 0; } @@ -60,20 +63,10 @@ static int panfrost_devfreq_get_dev_status(struct device *dev, return 0; } -static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) -{ - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - - *freq = clk_get_rate(pfdev->clock); - - return 0; -} - static struct devfreq_dev_profile panfrost_devfreq_profile = { .polling_ms = 50, /* ~3 frames */ .target = panfrost_devfreq_target, .get_dev_status = panfrost_devfreq_get_dev_status, - .get_cur_freq = panfrost_devfreq_get_cur_freq, }; int panfrost_devfreq_init(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 9458dc6c750c..88b431a267af 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) { + struct panfrost_file_priv *priv = file->driver_priv; struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; + struct panfrost_gem_mapping *mapping; if (!args->size || args->pad || (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) @@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); - args->offset = bo->node.start << PAGE_SHIFT; + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + drm_gem_object_put_unlocked(&bo->base.base); + return -EINVAL; + } + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } @@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev, struct drm_panfrost_submit *args, struct panfrost_job *job) { + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo; + unsigned int i; + int ret; + job->bo_count = args->bo_handle_count; if (!job->bo_count) @@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev, if (!job->implicit_fences) return -ENOMEM; - return drm_gem_objects_lookup(file_priv, - (void __user *)(uintptr_t)args->bo_handles, - job->bo_count, &job->bos); + ret = drm_gem_objects_lookup(file_priv, + (void __user *)(uintptr_t)args->bo_handles, + job->bo_count, &job->bos); + if (ret) + return ret; + + job->mappings = kvmalloc_array(job->bo_count, + sizeof(struct panfrost_gem_mapping *), + GFP_KERNEL | __GFP_ZERO); + if (!job->mappings) + return -ENOMEM; + + for (i = 0; i < job->bo_count; i++) { + struct panfrost_gem_mapping *mapping; + + bo = to_panfrost_bo(job->bos[i]); + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + ret = -EINVAL; + break; + } + + job->mappings[i] = mapping; + } + + return ret; } /** @@ -303,21 +340,26 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, } /* Don't allow mmapping of heap objects as pages are not pinned. */ - if (to_panfrost_bo(gem_obj)->is_heap) - return -EINVAL; + if (to_panfrost_bo(gem_obj)->is_heap) { + ret = -EINVAL; + goto out; + } ret = drm_gem_create_mmap_offset(gem_obj); if (ret == 0) args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_put_unlocked(gem_obj); +out: + drm_gem_object_put_unlocked(gem_obj); return ret; } static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_get_bo_offset *args = data; + struct panfrost_gem_mapping *mapping; struct drm_gem_object *gem_obj; struct panfrost_gem_object *bo; @@ -328,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, } bo = to_panfrost_bo(gem_obj); - args->offset = bo->node.start << PAGE_SHIFT; - + mapping = panfrost_gem_mapping_get(bo, priv); drm_gem_object_put_unlocked(gem_obj); + + if (!mapping) + return -EINVAL; + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_madvise *args = data; struct panfrost_device *pfdev = dev->dev_private; struct drm_gem_object *gem_obj; + struct panfrost_gem_object *bo; + int ret = 0; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { @@ -347,23 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, return -ENOENT; } - args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); + bo = to_panfrost_bo(gem_obj); - if (args->retained) { - struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj); + mutex_lock(&pfdev->shrinker_lock); + mutex_lock(&bo->mappings.lock); + if (args->madv == PANFROST_MADV_DONTNEED) { + struct panfrost_gem_mapping *first; + + first = list_first_entry(&bo->mappings.list, + struct panfrost_gem_mapping, + node); + + /* + * If we want to mark the BO purgeable, there must be only one + * user: the caller FD. + * We could do something smarter and mark the BO purgeable only + * when all its users have marked it purgeable, but globally + * visible/shared BOs are likely to never be marked purgeable + * anyway, so let's not bother. + */ + if (!list_is_singular(&bo->mappings.list) || + WARN_ON_ONCE(first->mmu != &priv->mmu)) { + ret = -EINVAL; + goto out_unlock_mappings; + } + } - mutex_lock(&pfdev->shrinker_lock); + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); + if (args->retained) { if (args->madv == PANFROST_MADV_DONTNEED) - list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list); + list_add_tail(&bo->base.madv_list, + &pfdev->shrinker_list); else if (args->madv == PANFROST_MADV_WILLNEED) list_del_init(&bo->base.madv_list); - - mutex_unlock(&pfdev->shrinker_lock); } +out_unlock_mappings: + mutex_unlock(&bo->mappings.lock); + mutex_unlock(&pfdev->shrinker_lock); + drm_gem_object_put_unlocked(gem_obj); - return 0; + return ret; } int panfrost_unstable_ioctl_check(void) @@ -443,7 +518,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) { struct panfrost_file_priv *panfrost_priv = file->driver_priv; - panfrost_perfcnt_close(panfrost_priv); + panfrost_perfcnt_close(file); panfrost_job_close(panfrost_priv); panfrost_mmu_pgtable_free(panfrost_priv); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index deca0c30bbd4..17b654e1eb94 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -19,6 +19,22 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_device *pfdev = obj->dev->dev_private; + /* + * Make sure the BO is no longer inserted in the shrinker list before + * taking care of the destruction itself. If we don't do that we have a + * race condition between this function and what's done in + * panfrost_gem_shrinker_scan(). + */ + mutex_lock(&pfdev->shrinker_lock); + list_del_init(&bo->base.madv_list); + mutex_unlock(&pfdev->shrinker_lock); + + /* + * If we still have mappings attached to the BO, there's a problem in + * our refcounting. + */ + WARN_ON_ONCE(!list_empty(&bo->mappings.list)); + if (bo->sgts) { int i; int n_sgt = bo->base.base.size / SZ_2M; @@ -33,15 +49,73 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) kfree(bo->sgts); } - mutex_lock(&pfdev->shrinker_lock); - if (!list_empty(&bo->base.madv_list)) - list_del(&bo->base.madv_list); - mutex_unlock(&pfdev->shrinker_lock); - drm_gem_shmem_free_object(obj); } -static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv) +{ + struct panfrost_gem_mapping *iter, *mapping = NULL; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + kref_get(&iter->refcount); + mapping = iter; + break; + } + } + mutex_unlock(&bo->mappings.lock); + + return mapping; +} + +static void +panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) +{ + struct panfrost_file_priv *priv; + + if (mapping->active) + panfrost_mmu_unmap(mapping); + + priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); + spin_lock(&priv->mm_lock); + if (drm_mm_node_allocated(&mapping->mmnode)) + drm_mm_remove_node(&mapping->mmnode); + spin_unlock(&priv->mm_lock); +} + +static void panfrost_gem_mapping_release(struct kref *kref) +{ + struct panfrost_gem_mapping *mapping; + + mapping = container_of(kref, struct panfrost_gem_mapping, refcount); + + panfrost_gem_teardown_mapping(mapping); + drm_gem_object_put_unlocked(&mapping->obj->base.base); + kfree(mapping); +} + +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) +{ + if (!mapping) + return; + + kref_put(&mapping->refcount, panfrost_gem_mapping_release); +} + +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo) +{ + struct panfrost_gem_mapping *mapping; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(mapping, &bo->mappings.list, node) + panfrost_gem_teardown_mapping(mapping); + mutex_unlock(&bo->mappings.lock); +} + +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) { int ret; size_t size = obj->size; @@ -49,6 +123,16 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p struct panfrost_gem_object *bo = to_panfrost_bo(obj); unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_mapping *mapping; + + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; + + INIT_LIST_HEAD(&mapping->node); + kref_init(&mapping->refcount); + drm_gem_object_get(obj); + mapping->obj = bo; /* * Executable buffers cannot cross a 16MB boundary as the program @@ -61,37 +145,48 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p else align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; - bo->mmu = &priv->mmu; + mapping->mmu = &priv->mmu; spin_lock(&priv->mm_lock); - ret = drm_mm_insert_node_generic(&priv->mm, &bo->node, + ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode, size >> PAGE_SHIFT, align, color, 0); spin_unlock(&priv->mm_lock); if (ret) - return ret; + goto err; if (!bo->is_heap) { - ret = panfrost_mmu_map(bo); - if (ret) { - spin_lock(&priv->mm_lock); - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); - } + ret = panfrost_mmu_map(mapping); + if (ret) + goto err; } + + mutex_lock(&bo->mappings.lock); + WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED); + list_add_tail(&mapping->node, &bo->mappings.list); + mutex_unlock(&bo->mappings.lock); + +err: + if (ret) + panfrost_gem_mapping_put(mapping); return ret; } -static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) +void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) { - struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + struct panfrost_gem_mapping *mapping = NULL, *iter; - if (bo->is_mapped) - panfrost_mmu_unmap(bo); + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + mapping = iter; + list_del(&iter->node); + break; + } + } + mutex_unlock(&bo->mappings.lock); - spin_lock(&priv->mm_lock); - if (drm_mm_node_allocated(&bo->node)) - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); + panfrost_gem_mapping_put(mapping); } static int panfrost_gem_pin(struct drm_gem_object *obj) @@ -131,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t if (!obj) return NULL; + INIT_LIST_HEAD(&obj->mappings.list); + mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; return &obj->base.base; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 50920819cc16..ca1bc9019600 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -13,23 +13,46 @@ struct panfrost_gem_object { struct drm_gem_shmem_object base; struct sg_table *sgts; - struct panfrost_mmu *mmu; - struct drm_mm_node node; - bool is_mapped :1; + /* + * Use a list for now. If searching a mapping ever becomes the + * bottleneck, we should consider using an RB-tree, or even better, + * let the core store drm_gem_object_mapping entries (where we + * could place driver specific data) instead of drm_gem_object ones + * in its drm_file->object_idr table. + * + * struct drm_gem_object_mapping { + * struct drm_gem_object *obj; + * void *driver_priv; + * }; + */ + struct { + struct list_head list; + struct mutex lock; + } mappings; + bool noexec :1; bool is_heap :1; }; +struct panfrost_gem_mapping { + struct list_head node; + struct kref refcount; + struct panfrost_gem_object *obj; + struct drm_mm_node mmnode; + struct panfrost_mmu *mmu; + bool active :1; +}; + static inline struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) { return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); } -static inline -struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) +static inline struct panfrost_gem_mapping * +drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node) { - return container_of(node, struct panfrost_gem_object, node); + return container_of(node, struct panfrost_gem_mapping, mmnode); } struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); @@ -45,6 +68,16 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, u32 flags, uint32_t *handle); +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); +void panfrost_gem_close(struct drm_gem_object *obj, + struct drm_file *file_priv); + +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv); +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo); + void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 458f0fa68111..f5dd7b29bc95 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc static bool panfrost_gem_purge(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct panfrost_gem_object *bo = to_panfrost_bo(obj); if (!mutex_trylock(&shmem->pages_lock)) return false; - panfrost_mmu_unmap(to_panfrost_bo(obj)); + panfrost_gem_teardown_mappings(bo); drm_gem_shmem_purge_locked(obj); mutex_unlock(&shmem->pages_lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index d411eb6c8eb9..e364ee00f3d0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref) dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); - if (job->bos) { + if (job->mappings) { for (i = 0; i < job->bo_count; i++) + panfrost_gem_mapping_put(job->mappings[i]); + kvfree(job->mappings); + } + + if (job->bos) { + struct panfrost_gem_object *bo; + + for (i = 0; i < job->bo_count; i++) { + bo = to_panfrost_bo(job->bos[i]); drm_gem_object_put_unlocked(job->bos[i]); + } + kvfree(job->bos); } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 62454128a792..bbd3ba97ff67 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -32,6 +32,7 @@ struct panfrost_job { /* Exclusive fences we have taken from the BOs to wait for */ struct dma_fence **implicit_fences; + struct panfrost_gem_mapping **mappings; struct drm_gem_object **bos; u32 bo_count; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index a3ed64a1f15e..763cfca886a7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, return 0; } -int panfrost_mmu_map(struct panfrost_gem_object *bo) +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE; - if (WARN_ON(bo->is_mapped)) + if (WARN_ON(mapping->active)) return 0; if (bo->noexec) @@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); - mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); - bo->is_mapped = true; + mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, + prot, sgt); + mapping->active = true; return 0; } -void panfrost_mmu_unmap(struct panfrost_gem_object *bo) +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; - size_t len = bo->node.size << PAGE_SHIFT; + struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; + u64 iova = mapping->mmnode.start << PAGE_SHIFT; + size_t len = mapping->mmnode.size << PAGE_SHIFT; size_t unmapped_len = 0; - if (WARN_ON(!bo->is_mapped)) + if (WARN_ON(!mapping->active)) return; - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); + dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", + mapping->mmu->as, iova, len); while (unmapped_len < len) { size_t unmapped_page; @@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) unmapped_len += pgsize; } - panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len); - bo->is_mapped = false; + panfrost_mmu_flush_range(pfdev, mapping->mmu, + mapping->mmnode.start << PAGE_SHIFT, len); + mapping->active = false; } static void mmu_tlb_inv_context_s1(void *cookie) @@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) free_io_pgtable_ops(mmu->pgtbl_ops); } -static struct panfrost_gem_object * -addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) +static struct panfrost_gem_mapping * +addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) { - struct panfrost_gem_object *bo = NULL; + struct panfrost_gem_mapping *mapping = NULL; struct panfrost_file_priv *priv; struct drm_mm_node *node; u64 offset = addr >> PAGE_SHIFT; @@ -418,8 +423,9 @@ found_mmu: drm_mm_for_each_node(node, &priv->mm) { if (offset >= node->start && offset < (node->start + node->size)) { - bo = drm_mm_node_to_panfrost_bo(node); - drm_gem_object_get(&bo->base.base); + mapping = drm_mm_node_to_panfrost_mapping(node); + + kref_get(&mapping->refcount); break; } } @@ -427,7 +433,7 @@ found_mmu: spin_unlock(&priv->mm_lock); out: spin_unlock(&pfdev->as_lock); - return bo; + return mapping; } #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) @@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) { int ret, i; + struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; - bo = addr_to_drm_mm_node(pfdev, as, addr); - if (!bo) + bomapping = addr_to_mapping(pfdev, as, addr); + if (!bomapping) return -ENOENT; + bo = bomapping->obj; if (!bo->is_heap) { dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", - bo->node.start << PAGE_SHIFT); + bomapping->mmnode.start << PAGE_SHIFT); ret = -EINVAL; goto err_bo; } - WARN_ON(bo->mmu->as != as); + WARN_ON(bomapping->mmu->as != as); /* Assume 2MB alignment and size multiple */ addr &= ~((u64)SZ_2M - 1); page_offset = addr >> PAGE_SHIFT; - page_offset -= bo->node.start; + page_offset -= bomapping->mmnode.start; mutex_lock(&bo->base.pages_lock); @@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_map; } - mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + mmu_map_sg(pfdev, bomapping->mmu, addr, + IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); - bo->is_mapped = true; + bomapping->active = true; dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); - drm_gem_object_put_unlocked(&bo->base.base); + panfrost_gem_mapping_put(bomapping); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index 7c5b6775ae23..44fc2edf63ce 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -4,12 +4,12 @@ #ifndef __PANFROST_MMU_H__ #define __PANFROST_MMU_H__ -struct panfrost_gem_object; +struct panfrost_gem_mapping; struct panfrost_file_priv; struct panfrost_mmu; -int panfrost_mmu_map(struct panfrost_gem_object *bo); -void panfrost_mmu_unmap(struct panfrost_gem_object *bo); +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping); +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); int panfrost_mmu_init(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 2dba192bf198..684820448be3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -25,7 +25,7 @@ #define V4_SHADERS_PER_COREGROUP 4 struct panfrost_perfcnt { - struct panfrost_gem_object *bo; + struct panfrost_gem_mapping *mapping; size_t bosize; void *buf; struct panfrost_file_priv *user; @@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) int ret; reinit_completion(&pfdev->perfcnt->dump_comp); - gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT; + gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); gpu_write(pfdev, GPU_INT_CLEAR, @@ -67,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) } static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user, + struct drm_file *file_priv, unsigned int counterset) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_gem_shmem_object *bo; u32 cfg; @@ -88,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (IS_ERR(bo)) return PTR_ERR(bo); - perfcnt->bo = to_panfrost_bo(&bo->base); - /* Map the perfcnt buf in the address space attached to file_priv. */ - ret = panfrost_mmu_map(perfcnt->bo); + ret = panfrost_gem_open(&bo->base, file_priv); if (ret) goto err_put_bo; + perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), + user); + if (!perfcnt->mapping) { + ret = -EINVAL; + goto err_close_bo; + } + perfcnt->buf = drm_gem_shmem_vmap(&bo->base); if (IS_ERR(perfcnt->buf)) { ret = PTR_ERR(perfcnt->buf); - goto err_put_bo; + goto err_put_mapping; } /* @@ -153,18 +159,26 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); + /* The BO ref is retained by the mapping. */ + drm_gem_object_put_unlocked(&bo->base); + return 0; err_vunmap: - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); +err_put_mapping: + panfrost_gem_mapping_put(perfcnt->mapping); +err_close_bo: + panfrost_gem_close(&bo->base, file_priv); err_put_bo: drm_gem_object_put_unlocked(&bo->base); return ret; } static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user) + struct drm_file *file_priv) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; if (user != perfcnt->user) @@ -178,10 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); perfcnt->buf = NULL; - drm_gem_object_put_unlocked(&perfcnt->bo->base.base); - perfcnt->bo = NULL; + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); + panfrost_gem_mapping_put(perfcnt->mapping); + perfcnt->mapping = NULL; pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); @@ -191,7 +206,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = dev->dev_private; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_enable *req = data; @@ -207,10 +221,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, mutex_lock(&perfcnt->lock); if (req->enable) - ret = panfrost_perfcnt_enable_locked(pfdev, pfile, + ret = panfrost_perfcnt_enable_locked(pfdev, file_priv, req->counterset); else - ret = panfrost_perfcnt_disable_locked(pfdev, pfile); + ret = panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); return ret; @@ -248,15 +262,16 @@ out: return ret; } -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile) +void panfrost_perfcnt_close(struct drm_file *file_priv) { + struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = pfile->pfdev; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; pm_runtime_get_sync(pfdev->dev); mutex_lock(&perfcnt->lock); if (perfcnt->user == pfile) - panfrost_perfcnt_disable_locked(pfdev, pfile); + panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h index 13b8fdaa1b43..8bbcf5f5fb33 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h @@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev); void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev); int panfrost_perfcnt_init(struct panfrost_device *pfdev); void panfrost_perfcnt_fini(struct panfrost_device *pfdev); -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile); +void panfrost_perfcnt_close(struct drm_file *file_priv); int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv); int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 098bc9f40b98..15bf8a207cb0 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -443,7 +443,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ mem->bus.size); else mem->bus.addr = - ioremap_nocache(mem->bus.base + mem->bus.offset, + ioremap(mem->bus.base + mem->bus.offset, mem->bus.size); if (!mem->bus.addr) return -ENOMEM; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index 83c4586665b4..81ac9b658a70 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -95,7 +95,7 @@ struct cdn_dp_device { struct cdn_dp_port *port[MAX_PHY]; u8 ports; u8 max_lanes; - u8 max_rate; + unsigned int max_rate; u8 lanes; int active_port; diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 68289b0b063a..68261c7f8c5f 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -534,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev) DRM_ERROR("Invalid dvo resource\n"); return -ENOMEM; } - dvo->regs = devm_ioremap_nocache(dev, res->start, + dvo->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!dvo->regs) return -ENOMEM; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 8f7bf33815fd..2bb32009d117 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -759,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev) DRM_ERROR("Invalid hda resource\n"); return -ENOMEM; } - hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + hda->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!hda->regs) return -ENOMEM; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "video-dacs-ctrl"); if (res) { - hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start, + hda->video_dacs_ctrl = devm_ioremap(dev, res->start, resource_size(res)); if (!hda->video_dacs_ctrl) return -ENOMEM; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 814560ead4e1..64ed102033c8 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1393,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev) ret = -ENOMEM; goto release_adapter; } - hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + hdmi->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!hdmi->regs) { ret = -ENOMEM; goto release_adapter; diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index 5767e93dd1cd..c36a8da373cb 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -860,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev) DRM_ERROR("Invalid glue resource\n"); return -ENOMEM; } - tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + tvout->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!tvout->regs) return -ENOMEM; diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 0b17ac8a3faa..5e5f82b6a5d9 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev) DRM_ERROR("Get memory resource failed\n"); return -ENOMEM; } - vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + vtg->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!vtg->regs) { DRM_ERROR("failed to remap I/O memory\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index a7c4654445c7..68d4644ac2dc 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, struct sun4i_hdmi *hdmi = dev_get_drvdata(dev); cec_unregister_adapter(hdmi->cec_adap); - drm_connector_cleanup(&hdmi->connector); - drm_encoder_cleanup(&hdmi->encoder); i2c_del_adapter(hdmi->i2c); i2c_put_adapter(hdmi->ddc_i2c); clk_disable_unprepare(hdmi->mod_clk); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 42651d737c55..c81cdce6ed55 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, WARN_ON(!tcon->quirks->has_channel_0); - tcon->dclk_min_div = 1; + tcon->dclk_min_div = tcon->quirks->dclk_min_div; tcon->dclk_max_div = 127; sun4i_tcon0_mode_set_common(tcon, mode); @@ -1426,12 +1426,14 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, static const struct sun4i_tcon_quirks sun4i_a10_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun4i_a10_tcon_set_mux, }; static const struct sun4i_tcon_quirks sun5i_a13_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, .set_mux = sun5i_a13_tcon_set_mux, }; @@ -1440,6 +1442,7 @@ static const struct sun4i_tcon_quirks sun6i_a31_quirks = { .has_channel_1 = true, .has_lvds_alt = true, .needs_de_be_mux = true, + .dclk_min_div = 1, .set_mux = sun6i_tcon_set_mux, }; @@ -1447,11 +1450,13 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = { .has_channel_0 = true, .has_channel_1 = true, .needs_de_be_mux = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun7i_a20_quirks = { .has_channel_0 = true, .has_channel_1 = true, + .dclk_min_div = 4, /* Same display pipeline structure as A10 */ .set_mux = sun4i_a10_tcon_set_mux, }; @@ -1459,11 +1464,13 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = { static const struct sun4i_tcon_quirks sun8i_a33_quirks = { .has_channel_0 = true, .has_lvds_alt = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { .supports_lvds = true, .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = { @@ -1477,11 +1484,13 @@ static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { .has_channel_0 = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = { - .has_channel_0 = true, - .needs_edp_reset = true, + .has_channel_0 = true, + .needs_edp_reset = true, + .dclk_min_div = 1, }; static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index f9f1fe80b206..a62ec826ae71 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -224,6 +224,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool supports_lvds; /* Does the TCON support an LVDS output? */ + u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ /* callback to handle tcon muxing options */ int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 2a9e67597375..a3612369750f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -256,7 +256,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) goto init_failed; } - priv->mmio = ioremap_nocache(res->start, resource_size(res)); + priv->mmio = ioremap(res->start, resource_size(res)); if (!priv->mmio) { dev_err(dev, "failed to ioremap\n"); ret = -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 6b0883a1776e..97fd1dafc3e8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -218,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m if (mem->placement & TTM_PL_FLAG_WC) addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); else - addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); + addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size); if (!addr) { (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); @@ -565,7 +565,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, size); else - map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, + map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset, size); } return (!map->virtual) ? -ENOMEM : 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 390524143139..1635a9ff4794 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (!objs) return; virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); + virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_transfer_to_host_2d (vgdev, 0, plane->state->crtc_w, diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 8063b1d567b1..e6e4c841fb06 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 && - (usage->hid & HID_USAGE) != 0x00 && !usage->type) { + (usage->hid & HID_USAGE) != 0x00 && + (usage->hid & HID_USAGE) != 0xff && !usage->type) { hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n", usage->hid & HID_USAGE); } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e0b241bd3070..851fe54ea59e 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign offset = report->size; report->size += parser->global.report_size * parser->global.report_count; + /* Total size check: Allow for possible report index byte */ + if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { + hid_err(parser->device, "report is too long\n"); + return -1; + } + if (!parser->local.usage_index) /* Ignore padding fields */ return 0; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7e1689ef35f5..3a400ce603c4 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -631,6 +631,7 @@ #define USB_VENDOR_ID_ITE 0x048d #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 +#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a #define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396 #define USB_DEVICE_ID_ITE8595 0x8595 @@ -730,6 +731,7 @@ #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 #define USB_DEVICE_ID_LG_MELFAS_MT 0x6007 #define I2C_DEVICE_ID_LG_8001 0x8001 +#define I2C_DEVICE_ID_LG_7010 0x7010 #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e @@ -1102,6 +1104,7 @@ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 +#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 63855f275a38..dea9cc65bf80 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: - if (device->driver->input_mapped && device->driver->input_mapped(device, - hidinput, field, usage, &bit, &max) < 0) - goto ignore; + if (device->driver->input_mapped && + device->driver->input_mapped(device, hidinput, field, usage, + &bit, &max) < 0) { + /* + * The driver indicated that no further generic handling + * of the usage is desired. + */ + return; + } set_bit(usage->type, input->evbit); @@ -1215,9 +1221,11 @@ mapped: set_bit(MSC_SCAN, input->mscbit); } -ignore: return; +ignore: + usage->type = 0; + usage->code = 0; } static void hidinput_handle_scroll(struct hid_usage *usage, diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index a45f2352618d..c436e12feb23 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id ite_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) }, + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index cd9193078525..70e1cb928bf0 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64 +#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0) +#define HIDPP_REPORT_LONG_SUPPORTED BIT(1) +#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2) + #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03 #define HIDPP_SUB_ID_ROLLER 0x05 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06 @@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1) #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) +#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4) /* * There are two hidpp protocols in use, the first version hidpp10 is known @@ -135,12 +140,15 @@ struct hidpp_report { struct hidpp_battery { u8 feature_index; u8 solar_feature_index; + u8 voltage_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; + int voltage; + int charge_type; bool online; }; @@ -183,9 +191,12 @@ struct hidpp_device { unsigned long quirks; unsigned long capabilities; + u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; + + u8 wireless_feature_index; }; /* HID++ 1.0 error codes */ @@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, struct hidpp_report *message; int ret, max_count; + /* Send as long report if short reports are not supported. */ + if (report_id == REPORT_ID_HIDPP_SHORT && + !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED)) + report_id = REPORT_ID_HIDPP_LONG; + switch (report_id) { case REPORT_ID_HIDPP_SHORT: max_count = HIDPP_REPORT_SHORT_LENGTH - 4; @@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question, (answer->fap.params[0] == question->fap.funcindex_clientid); } -static inline bool hidpp_report_is_connect_event(struct hidpp_report *report) +static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp, + struct hidpp_report *report) { - return (report->report_id == REPORT_ID_HIDPP_SHORT) && - (report->rap.sub_id == 0x41); + return (hidpp->wireless_feature_index && + (report->fap.feature_index == hidpp->wireless_feature_index)) || + ((report->report_id == REPORT_ID_HIDPP_SHORT) && + (report->rap.sub_id == 0x41)); } /** @@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp, return 0; } +/* -------------------------------------------------------------------------- */ +/* 0x1001: Battery voltage */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001 + +#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00 + +#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00 + +static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, + int *level, int *charge_type) +{ + int status; + + long charge_sts = (long)data[2]; + + *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + switch (data[2] & 0xe0) { + case 0x00: + status = POWER_SUPPLY_STATUS_CHARGING; + break; + case 0x20: + status = POWER_SUPPLY_STATUS_FULL; + *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + break; + case 0x40: + status = POWER_SUPPLY_STATUS_DISCHARGING; + break; + case 0xe0: + status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + default: + status = POWER_SUPPLY_STATUS_UNKNOWN; + } + + *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; + if (test_bit(3, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; + } + if (test_bit(4, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + } + + if (test_bit(5, &charge_sts)) { + *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + } + + *voltage = get_unaligned_be16(data); + + return status; +} + +static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp, + u8 feature_index, + int *status, int *voltage, + int *level, int *charge_type) +{ + struct hidpp_report response; + int ret; + u8 *params = (u8 *)response.fap.params; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE, + NULL, 0, &response); + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE; + + *status = hidpp20_battery_map_status_voltage(params, voltage, + level, charge_type); + + return 0; +} + +static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + int status, voltage, level, charge_type; + + if (hidpp->battery.voltage_feature_index == 0xff) { + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE, + &hidpp->battery.voltage_feature_index, + &feature_type); + if (ret) + return ret; + } + + ret = hidpp20_battery_get_battery_voltage(hidpp, + hidpp->battery.voltage_feature_index, + &status, &voltage, &level, &charge_type); + + if (ret) + return ret; + + hidpp->battery.status = status; + hidpp->battery.voltage = voltage; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + return 0; +} + +static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp, + u8 *data, int size) +{ + struct hidpp_report *report = (struct hidpp_report *)data; + int status, voltage, level, charge_type; + + if (report->fap.feature_index != hidpp->battery.voltage_feature_index || + report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST) + return 0; + + status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage, + &level, &charge_type); + + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { + hidpp->battery.voltage = voltage; + hidpp->battery.status = status; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + if (hidpp->battery.ps) + power_supply_changed(hidpp->battery.ps); + } + return 0; +} + static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_STATUS, @@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */ 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */ + 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */ }; static int hidpp_battery_get_property(struct power_supply *psy, @@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = hidpp->hid_dev->uniq; break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + /* hardware reports voltage in in mV. sysfs expects uV */ + val->intval = hidpp->battery.voltage * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = hidpp->battery.charge_type; + break; default: ret = -EINVAL; break; @@ -1277,6 +1442,24 @@ static int hidpp_battery_get_property(struct power_supply *psy, } /* -------------------------------------------------------------------------- */ +/* 0x1d4b: Wireless device status */ +/* -------------------------------------------------------------------------- */ +#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b + +static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + + ret = hidpp_root_get_feature(hidpp, + HIDPP_PAGE_WIRELESS_DEVICE_STATUS, + &hidpp->wireless_feature_index, + &feature_type); + + return ret; +} + +/* -------------------------------------------------------------------------- */ /* 0x2120: Hi-resolution scrolling */ /* -------------------------------------------------------------------------- */ @@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, } } - if (unlikely(hidpp_report_is_connect_event(report))) { + if (unlikely(hidpp_report_is_connect_event(hidpp, report))) { atomic_set(&hidpp->connected, !(report->rap.params[0] & (1 << 6))); if (schedule_work(&hidpp->work) == 0) @@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, ret = hidpp_solar_battery_event(hidpp, data, size); if (ret != 0) return ret; + ret = hidpp20_battery_voltage_event(hidpp, data, size); + if (ret != 0) + return ret; } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { @@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) hidpp->battery.feature_index = 0xff; hidpp->battery.solar_feature_index = 0xff; + hidpp->battery.voltage_feature_index = 0xff; if (hidpp->protocol_major >= 2) { if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750) ret = hidpp_solar_request_battery_event(hidpp); - else - ret = hidpp20_query_battery_info(hidpp); + else { + ret = hidpp20_query_battery_voltage_info(hidpp); + if (ret) + ret = hidpp20_query_battery_info(hidpp); + } if (ret) return ret; @@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) if (!battery_props) return -ENOMEM; - num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2; + num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) battery_props[num_battery_props++] = @@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY_LEVEL; + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + battery_props[num_battery_props++] = + POWER_SUPPLY_PROP_VOLTAGE_NOW; + battery = &hidpp->battery; n = atomic_inc_return(&battery_no) - 1; @@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp) else hidpp10_query_battery_status(hidpp); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { - hidpp20_query_battery_info(hidpp); + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + hidpp20_query_battery_voltage_info(hidpp); + else + hidpp20_query_battery_info(hidpp); } if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); @@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id) return report->field[0]->report_count + 1; } -static bool hidpp_validate_device(struct hid_device *hdev) +static u8 hidpp_validate_device(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); - int id, report_length, supported_reports = 0; + int id, report_length; + u8 supported_reports = 0; id = REPORT_ID_HIDPP_SHORT; report_length = hidpp_get_report_length(hdev, id); @@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_SHORT_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED; } id = REPORT_ID_HIDPP_LONG; @@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_LONG_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_LONG_SUPPORTED; } id = REPORT_ID_HIDPP_VERY_LONG; @@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED; hidpp->very_long_report_length = report_length; } @@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) /* * Make sure the device is HID++ capable, otherwise treat as generic HID */ - if (!hidpp_validate_device(hdev)) { + hidpp->supported_reports = hidpp_validate_device(hdev); + + if (!hidpp->supported_reports) { hid_set_drvdata(hdev, NULL); devm_kfree(&hdev->dev, hidpp); return hid_hw_start(hdev, HID_CONNECT_DEFAULT); @@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret < 0) { dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n", __func__, ret); - hid_hw_stop(hdev); goto hid_hw_open_fail; } @@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp_overwrite_name(hdev); } + if (connected && hidpp->protocol_major >= 2) { + ret = hidpp_set_wireless_feature_index(hidpp); + if (ret == -ENOENT) + hidpp->wireless_feature_index = 0; + else if (ret) + goto hid_hw_init_fail; + } + if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { ret = wtp_get_config(hidpp); if (ret) @@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = { { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech MX Master 2S */ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Master 3 */ + LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech Performance MX */ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Keyboard logitech K400 */ @@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* MX Master mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* MX Master 3 mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, {} }; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 3cfeb1629f79..362805ddf377 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input, tool = MT_TOOL_DIAL; else if (unlikely(!confidence_state)) { tool = MT_TOOL_PALM; - if (!active && + if (!active && mt && input_mt_is_active(&mt->slots[slotnum])) { /* * The non-confidence was reported for @@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_LG, HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, + { .driver_data = MT_CLS_LG, + HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, + USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index d1b39c29e353..0e7b2d998395 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, { 0 } }; diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8dae0f9b819e..6286204d4c56 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev, if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); + /* If using a wireless adaptor ask for connection status */ + steam->connected = false; steam_request_conn_status(steam); } else { + /* A wired connection is always present */ + steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index c3fc0ceb8096..2eee5e31c2b7 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -249,13 +249,14 @@ out: static __poll_t hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) - return EPOLLIN | EPOLLRDNORM | EPOLLOUT; + mask |= EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist) - return EPOLLERR | EPOLLHUP; - return 0; + mask |= EPOLLERR | EPOLLHUP; + return mask; } static int hidraw_open(struct inode *inode, struct file *file) @@ -450,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, -EFAULT : len; break; } + + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) { + int len = strlen(hid->uniq) + 1; + if (len > _IOC_SIZE(cmd)) + len = _IOC_SIZE(cmd); + ret = copy_to_user(user_arg, hid->uniq, len) ? + -EFAULT : len; + break; + } } ret = -ENOTTY; diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index a358e61fbc82..009000c5d55c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -49,6 +49,8 @@ #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) +#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) + /* flags */ #define I2C_HID_STARTED 0 @@ -175,6 +177,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_BOGUS_IRQ }, { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, I2C_HID_QUIRK_RESET_ON_RESUME }, + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, + I2C_HID_QUIRK_BAD_INPUT_SIZE }, { 0, 0 } }; @@ -496,9 +500,15 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) } if ((ret_size > size) || (ret_size < 2)) { - dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", - __func__, size, ret_size); - return; + if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) { + ihid->inbuf[0] = size & 0xff; + ihid->inbuf[1] = size >> 8; + ret_size = size; + } else { + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", + __func__, size, ret_size); + return; + } } i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf); diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 6c1e6110867f..1fb294ca463e 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -24,7 +24,9 @@ #define ICL_MOBILE_DEVICE_ID 0x34FC #define SPT_H_DEVICE_ID 0xA135 #define CML_LP_DEVICE_ID 0x02FC +#define CMP_H_DEVICE_ID 0x06FC #define EHL_Ax_DEVICE_ID 0x4BB3 +#define TGL_LP_DEVICE_ID 0xA0FC #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 784dcc8c7022..f491d8b4e24c 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index fa0cc0899827..8fe3efcb8327 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -766,13 +766,14 @@ unlock: static __poll_t uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */ poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) - return EPOLLIN | EPOLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; - return 0; + return mask; } static const struct file_operations uhid_fops = { diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index e421cdf2d1a4..a970b809d778 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file) return 0; } +static int __hiddev_open(struct hiddev *hiddev, struct file *file) +{ + struct hiddev_list *list; + int error; + + lockdep_assert_held(&hiddev->existancelock); + + list = vzalloc(sizeof(*list)); + if (!list) + return -ENOMEM; + + mutex_init(&list->thread_lock); + list->hiddev = hiddev; + + if (!hiddev->open++) { + error = hid_hw_power(hiddev->hid, PM_HINT_FULLON); + if (error < 0) + goto err_drop_count; + + error = hid_hw_open(hiddev->hid); + if (error < 0) + goto err_normal_power; + } + + spin_lock_irq(&hiddev->list_lock); + list_add_tail(&list->node, &hiddev->list); + spin_unlock_irq(&hiddev->list_lock); + + file->private_data = list; + + return 0; + +err_normal_power: + hid_hw_power(hiddev->hid, PM_HINT_NORMAL); +err_drop_count: + hiddev->open--; + vfree(list); + return error; +} + /* * open file op */ static int hiddev_open(struct inode *inode, struct file *file) { - struct hiddev_list *list; struct usb_interface *intf; struct hid_device *hid; struct hiddev *hiddev; @@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file) intf = usbhid_find_interface(iminor(inode)); if (!intf) return -ENODEV; + hid = usb_get_intfdata(intf); hiddev = hid->hiddev; - if (!(list = vzalloc(sizeof(struct hiddev_list)))) - return -ENOMEM; - mutex_init(&list->thread_lock); - list->hiddev = hiddev; - file->private_data = list; - - /* - * no need for locking because the USB major number - * is shared which usbcore guards against disconnect - */ - if (list->hiddev->exist) { - if (!list->hiddev->open++) { - res = hid_hw_open(hiddev->hid); - if (res < 0) - goto bail; - } - } else { - res = -ENODEV; - goto bail; - } - - spin_lock_irq(&list->hiddev->list_lock); - list_add_tail(&list->node, &hiddev->list); - spin_unlock_irq(&list->hiddev->list_lock); - mutex_lock(&hiddev->existancelock); - /* - * recheck exist with existance lock held to - * avoid opening a disconnected device - */ - if (!list->hiddev->exist) { - res = -ENODEV; - goto bail_unlock; - } - if (!list->hiddev->open++) - if (list->hiddev->exist) { - struct hid_device *hid = hiddev->hid; - res = hid_hw_power(hid, PM_HINT_FULLON); - if (res < 0) - goto bail_unlock; - res = hid_hw_open(hid); - if (res < 0) - goto bail_normal_power; - } - mutex_unlock(&hiddev->existancelock); - return 0; -bail_normal_power: - hid_hw_power(hid, PM_HINT_NORMAL); -bail_unlock: + res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV; mutex_unlock(&hiddev->existancelock); - spin_lock_irq(&list->hiddev->list_lock); - list_del(&list->node); - spin_unlock_irq(&list->hiddev->list_lock); -bail: - file->private_data = NULL; - vfree(list); return res; } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index ccb74529bc78..d99a9d407671 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */ hdev->product == 0x392 || /* Intuos Pro 2 */ - hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */ + hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */ + hdev->product == 0x3AA)) { /* MobileStudio Pro */ value = (field->logical_maximum - value); if (hdev->product == 0x357 || hdev->product == 0x358 || hdev->product == 0x392) value = wacom_offset_rotation(input, usage, value, 3, 16); else if (hdev->product == 0x34d || hdev->product == 0x34e || - hdev->product == 0x398 || hdev->product == 0x399) + hdev->product == 0x398 || hdev->product == 0x399 || + hdev->product == 0x3AA) value = wacom_offset_rotation(input, usage, value, 1, 2); } else { diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 766bd8457346..296f9098c9e4 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -211,7 +211,7 @@ static struct timespec64 hv_get_adj_host_time(void) unsigned long flags; spin_lock_irqsave(&host_ts.lock, flags); - reftime = hyperv_cs->read(hyperv_cs); + reftime = hv_read_reference_counter(); newtime = host_ts.host_time + (reftime - host_ts.ref_time); ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100); spin_unlock_irqrestore(&host_ts.lock, flags); @@ -250,7 +250,7 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags) */ spin_lock_irqsave(&host_ts.lock, flags); - cur_reftime = hyperv_cs->read(hyperv_cs); + cur_reftime = hv_read_reference_counter(); host_ts.host_time = hosttime; host_ts.ref_time = cur_reftime; @@ -315,7 +315,7 @@ static void timesync_onchannelcallback(void *context) sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; adj_guesttime(timedatap->parenttime, - hyperv_cs->read(hyperv_cs), + hv_read_reference_counter(), timedatap->flags); } } @@ -524,7 +524,7 @@ static struct ptp_clock *hv_ptp_clock; static int hv_timesync_init(struct hv_util_service *srv) { /* TimeSync requires Hyper-V clocksource. */ - if (!hyperv_cs) + if (!hv_read_reference_counter) return -ENODEV; spin_lock_init(&host_ts.lock); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 23dfe848979a..47ac20aee06f 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -164,6 +164,16 @@ config SENSORS_ADM1031 This driver can also be built as a module. If so, the module will be called adm1031. +config SENSORS_ADM1177 + tristate "Analog Devices ADM1177 and compatibles" + depends on I2C + help + If you say yes here you get support for Analog Devices ADM1177 + sensor chips. + + This driver can also be built as a module. If so, the module + will be called adm1177. + config SENSORS_ADM9240 tristate "Analog Devices ADM9240 and compatibles" depends on I2C @@ -385,6 +395,16 @@ config SENSORS_ATXP1 This driver can also be built as a module. If so, the module will be called atxp1. +config SENSORS_DRIVETEMP + tristate "Hard disk drives with temperature sensors" + depends on SCSI && ATA + help + If you say yes you get support for the temperature sensor on + hard disk drives. + + This driver can also be built as a module. If so, the module + will be called satatemp. + config SENSORS_DS620 tristate "Dallas Semiconductor DS620" depends on I2C @@ -889,7 +909,7 @@ config SENSORS_MAX197 will be called max197. config SENSORS_MAX31722 -tristate "MAX31722 temperature sensor" + tristate "MAX31722 temperature sensor" depends on SPI help Support for the Maxim Integrated MAX31722/MAX31723 digital @@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor" This driver can also be built as a module. If so, the module will be called max31722. +config SENSORS_MAX31730 + tristate "MAX31730 temperature sensor" + depends on I2C + help + Support for the Maxim Integrated MAX31730 3-Channel Remote + Temperature Sensor. + + This driver can also be built as a module. If so, the module + will be called max31730. + config SENSORS_MAX6621 tristate "Maxim MAX6621 sensor chip" depends on I2C @@ -1905,7 +1935,7 @@ config SENSORS_W83627HF will be called w83627hf. config SENSORS_W83627EHF - tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F" + tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG" depends on !PPC select HWMON_VID help @@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF the Core 2 Duo. And also the W83627UHG, which is a stripped down version of the W83627DHG (as far as hardware monitoring goes.) - This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F - (also known as W83667HG-I), and NCT6776F. + This driver also supports Nuvoton W83667HG and W83667HG-B. This driver can also be built as a module. If so, the module will be called w83627ehf. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 6db5db9cdc29..613f50987965 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o +obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o @@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o obj-$(CONFIG_SENSORS_DME1737) += dme1737.o +obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o obj-$(CONFIG_SENSORS_DS620) += ds620.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o @@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o obj-$(CONFIG_SENSORS_MAX1668) += max1668.o obj-$(CONFIG_SENSORS_MAX197) += max197.o obj-$(CONFIG_SENSORS_MAX31722) += max31722.o +obj-$(CONFIG_SENSORS_MAX31730) += max31730.o obj-$(CONFIG_SENSORS_MAX6621) += max6621.o obj-$(CONFIG_SENSORS_MAX6639) += max6639.o obj-$(CONFIG_SENSORS_MAX6642) += max6642.o diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c new file mode 100644 index 000000000000..d314223a404a --- /dev/null +++ b/drivers/hwmon/adm1177.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin + * + * Copyright 2015-2019 Analog Devices Inc. + */ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +/* Command Byte Operations */ +#define ADM1177_CMD_V_CONT BIT(0) +#define ADM1177_CMD_I_CONT BIT(2) +#define ADM1177_CMD_VRANGE BIT(4) + +/* Extended Register */ +#define ADM1177_REG_ALERT_TH 2 + +#define ADM1177_BITS 12 + +/** + * struct adm1177_state - driver instance specific data + * @client pointer to i2c client + * @reg regulator info for the the power supply of the device + * @r_sense_uohm current sense resistor value + * @alert_threshold_ua current limit for shutdown + * @vrange_high internal voltage divider + */ +struct adm1177_state { + struct i2c_client *client; + struct regulator *reg; + u32 r_sense_uohm; + u32 alert_threshold_ua; + bool vrange_high; +}; + +static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data) +{ + return i2c_master_recv(st->client, data, num); +} + +static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd) +{ + return i2c_smbus_write_byte(st->client, cmd); +} + +static int adm1177_write_alert_thr(struct adm1177_state *st, + u32 alert_threshold_ua) +{ + u64 val; + int ret; + + val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm; + val = div_u64(val, 105840000U); + val = div_u64(val, 1000U); + if (val > 0xFF) + val = 0xFF; + + ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH, + val); + if (ret) + return ret; + + st->alert_threshold_ua = alert_threshold_ua; + return 0; +} + +static int adm1177_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + u8 data[3]; + long dummy; + int ret; + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[1] << 4) | (data[2] & 0xF); + /* + * convert to milliamperes + * ((105.84mV / 4096) x raw) / senseResistor(ohm) + */ + *val = div_u64((105840000ull * dummy), + 4096 * st->r_sense_uohm); + return 0; + case hwmon_curr_max_alarm: + *val = st->alert_threshold_ua; + return 0; + default: + return -EOPNOTSUPP; + } + case hwmon_in: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[0] << 4) | (data[2] >> 4); + /* + * convert to millivolts based on resistor devision + * (V_fullscale / 4096) * raw + */ + if (st->vrange_high) + dummy *= 26350; + else + dummy *= 6650; + + *val = DIV_ROUND_CLOSEST(dummy, 4096); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int adm1177_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_max_alarm: + adm1177_write_alert_thr(st, val); + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static umode_t adm1177_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct adm1177_state *st = data; + + switch (type) { + case hwmon_in: + switch (attr) { + case hwmon_in_input: + return 0444; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + if (st->r_sense_uohm) + return 0444; + return 0; + case hwmon_curr_max_alarm: + if (st->r_sense_uohm) + return 0644; + return 0; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *adm1177_info[] = { + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_MAX_ALARM), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT), + NULL +}; + +static const struct hwmon_ops adm1177_hwmon_ops = { + .is_visible = adm1177_is_visible, + .read = adm1177_read, + .write = adm1177_write, +}; + +static const struct hwmon_chip_info adm1177_chip_info = { + .ops = &adm1177_hwmon_ops, + .info = adm1177_info, +}; + +static void adm1177_remove(void *data) +{ + struct adm1177_state *st = data; + + regulator_disable(st->reg); +} + +static int adm1177_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct adm1177_state *st; + u32 alert_threshold_ua; + int ret; + + st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->client = client; + + st->reg = devm_regulator_get_optional(&client->dev, "vref"); + if (IS_ERR(st->reg)) { + if (PTR_ERR(st->reg) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + st->reg = NULL; + } else { + ret = regulator_enable(st->reg); + if (ret) + return ret; + ret = devm_add_action_or_reset(&client->dev, adm1177_remove, + st); + if (ret) + return ret; + } + + if (device_property_read_u32(dev, "shunt-resistor-micro-ohms", + &st->r_sense_uohm)) + st->r_sense_uohm = 0; + if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp", + &alert_threshold_ua)) { + if (st->r_sense_uohm) + /* + * set maximum default value from datasheet based on + * shunt-resistor + */ + alert_threshold_ua = div_u64(105840000000, + st->r_sense_uohm); + else + alert_threshold_ua = 0; + } + st->vrange_high = device_property_read_bool(dev, + "adi,vrange-high-enable"); + if (alert_threshold_ua && st->r_sense_uohm) + adm1177_write_alert_thr(st, alert_threshold_ua); + + ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT | + ADM1177_CMD_I_CONT | + (st->vrange_high ? 0 : ADM1177_CMD_VRANGE)); + if (ret) + return ret; + + hwmon_dev = + devm_hwmon_device_register_with_info(dev, client->name, st, + &adm1177_chip_info, NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id adm1177_id[] = { + {"adm1177", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, adm1177_id); + +static const struct of_device_id adm1177_dt_ids[] = { + { .compatible = "adi,adm1177" }, + {}, +}; +MODULE_DEVICE_TABLE(of, adm1177_dt_ids); + +static struct i2c_driver adm1177_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "adm1177", + .of_match_table = adm1177_dt_ids, + }, + .probe = adm1177_probe, + .id_table = adm1177_id, +}; +module_i2c_driver(adm1177_driver); + +MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>"); +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 6c64d50c9aae..01c2eeb02aa9 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) long reg; if (bypass_attn & (1 << channel)) - reg = (volt * 1024) / 2250; + reg = DIV_ROUND_CLOSEST(volt * 1024, 2250); else - reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250); + reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024, + (r[0] + r[1]) * 2250); return clamp_val(reg, 0, 1023) & (0xff << 2); } diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c new file mode 100644 index 000000000000..370d0c74eb01 --- /dev/null +++ b/drivers/hwmon/drivetemp.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hwmon client for disk and solid state drives with temperature sensors + * Copyright (C) 2019 Zodiac Inflight Innovations + * + * With input from: + * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors. + * (C) 2018 Linus Walleij + * + * hwmon: Driver for SCSI/ATA temperature sensors + * by Constantin Baranov <const@mimas.ru>, submitted September 2009 + * + * This drive supports reporting the temperatire of SATA drives. It can be + * easily extended to report the temperature of SCSI drives. + * + * The primary means to read drive temperatures and temperature limits + * for ATA drives is the SCT Command Transport feature set as specified in + * ATA8-ACS. + * It can be used to read the current drive temperature, temperature limits, + * and historic minimum and maximum temperatures. The SCT Command Transport + * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set + * (ATA8-ACS)". + * + * If the SCT Command Transport feature set is not available, drive temperatures + * may be readable through SMART attributes. Since SMART attributes are not well + * defined, this method is only used as fallback mechanism. + * + * There are three SMART attributes which may report drive temperatures. + * Those are defined as follows (from + * http://www.cropel.com/library/smart-attribute-list.aspx). + * + * 190 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 194 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 231 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * Wikipedia defines attributes a bit differently. + * + * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer + * Difference or to set a minimum threshold which corresponds to a + * Airflow maximum temperature. This also follows the convention of + * Temperature 100 being a best-case value and lower values being + * undesirable. However, some older drives may instead + * report raw Temperature (identical to 0xC2) or + * Temperature minus 50 here. + * 194 Temperature or Indicates the device temperature, if the appropriate + * Temperature sensor is fitted. Lowest byte of the raw value contains + * Celsius the exact temperature value (Celsius degrees). + * 231 Life Left Indicates the approximate SSD life left, in terms of + * (SSDs) or program/erase cycles or available reserved blocks. + * Temperature A normalized value of 100 represents a new drive, with + * a threshold value at 10 indicating a need for + * replacement. A value of 0 may mean that the drive is + * operating in read-only mode to allow data recovery. + * Previously (pre-2010) occasionally used for Drive + * Temperature (more typically reported at 0xC2). + * + * Common denominator is that the first raw byte reports the temperature + * in degrees C on almost all drives. Some drives may report a fractional + * temperature in the second raw byte. + * + * Known exceptions (from libatasmart): + * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th + * degrees C in the first two raw bytes. + * - A few Maxtor drives report an unknown or bad value in attribute 194. + * - Certain Apple SSD drives report an unknown value in attribute 190. + * Only certain firmware versions are affected. + * + * Those exceptions affect older ATA drives and are currently ignored. + * Also, the second raw byte (possibly reporting the fractional temperature) + * is currently ignored. + * + * Many drives also report temperature limits in additional SMART data raw + * bytes. The format of those is not well defined and varies widely. + * The driver does not currently attempt to report those limits. + * + * According to data in smartmontools, attribute 231 is rarely used to report + * drive temperatures. At the same time, several drives report SSD life left + * in attribute 231, but do not support temperature sensors. For this reason, + * attribute 231 is currently ignored. + * + * Following above definitions, temperatures are reported as follows. + * If SCT Command Transport is supported, it is used to read the + * temperature and, if available, temperature limits. + * - Otherwise, if SMART attribute 194 is supported, it is used to read + * the temperature. + * - Otherwise, if SMART attribute 190 is supported, it is used to read + * the temperature. + */ + +#include <linux/ata.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_driver.h> +#include <scsi/scsi_proto.h> + +struct drivetemp_data { + struct list_head list; /* list of instantiated devices */ + struct mutex lock; /* protect data buffer accesses */ + struct scsi_device *sdev; /* SCSI device */ + struct device *dev; /* instantiating device */ + struct device *hwdev; /* hardware monitoring device */ + u8 smartdata[ATA_SECT_SIZE]; /* local buffer */ + int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val); + bool have_temp_lowest; /* lowest temp in SCT status */ + bool have_temp_highest; /* highest temp in SCT status */ + bool have_temp_min; /* have min temp */ + bool have_temp_max; /* have max temp */ + bool have_temp_lcrit; /* have lower critical limit */ + bool have_temp_crit; /* have critical limit */ + int temp_min; /* min temp */ + int temp_max; /* max temp */ + int temp_lcrit; /* lower critical limit */ + int temp_crit; /* critical limit */ +}; + +static LIST_HEAD(drivetemp_devlist); + +#define ATA_MAX_SMART_ATTRS 30 +#define SMART_TEMP_PROP_190 190 +#define SMART_TEMP_PROP_194 194 + +#define SCT_STATUS_REQ_ADDR 0xe0 +#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */ +#define SCT_STATUS_VERSION_HIGH 1 +#define SCT_STATUS_TEMP 200 +#define SCT_STATUS_TEMP_LOWEST 201 +#define SCT_STATUS_TEMP_HIGHEST 202 +#define SCT_READ_LOG_ADDR 0xe1 +#define SMART_READ_LOG 0xd5 +#define SMART_WRITE_LOG 0xd6 + +#define INVALID_TEMP 0x80 + +#define temp_is_valid(temp) ((temp) != INVALID_TEMP) +#define temp_from_sct(temp) (((s8)(temp)) * 1000) + +static inline bool ata_id_smart_supported(u16 *id) +{ + return id[ATA_ID_COMMAND_SET_1] & BIT(0); +} + +static inline bool ata_id_smart_enabled(u16 *id) +{ + return id[ATA_ID_CFS_ENABLE_1] & BIT(0); +} + +static int drivetemp_scsi_command(struct drivetemp_data *st, + u8 ata_command, u8 feature, + u8 lba_low, u8 lba_mid, u8 lba_high) +{ + u8 scsi_cmd[MAX_COMMAND_SIZE]; + int data_dir; + + memset(scsi_cmd, 0, sizeof(scsi_cmd)); + scsi_cmd[0] = ATA_16; + if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) { + scsi_cmd[1] = (5 << 1); /* PIO Data-out */ + /* + * No off.line or cc, write to dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x06; + data_dir = DMA_TO_DEVICE; + } else { + scsi_cmd[1] = (4 << 1); /* PIO Data-in */ + /* + * No off.line or cc, read from dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x0e; + data_dir = DMA_FROM_DEVICE; + } + scsi_cmd[4] = feature; + scsi_cmd[6] = 1; /* 1 sector */ + scsi_cmd[8] = lba_low; + scsi_cmd[10] = lba_mid; + scsi_cmd[12] = lba_high; + scsi_cmd[14] = ata_command; + + return scsi_execute_req(st->sdev, scsi_cmd, data_dir, + st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5, + NULL); +} + +static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature, + u8 select) +{ + return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select, + ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS); +} + +static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr, + long *temp) +{ + u8 *buf = st->smartdata; + bool have_temp = false; + u8 temp_raw; + u8 csum; + int err; + int i; + + err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0); + if (err) + return err; + + /* Checksum the read value table */ + csum = 0; + for (i = 0; i < ATA_SECT_SIZE; i++) + csum += buf[i]; + if (csum) { + dev_dbg(&st->sdev->sdev_gendev, + "checksum error reading SMART values\n"); + return -EIO; + } + + for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) { + u8 *attr = buf + i * 12; + int id = attr[2]; + + if (!id) + continue; + + if (id == SMART_TEMP_PROP_190) { + temp_raw = attr[7]; + have_temp = true; + } + if (id == SMART_TEMP_PROP_194) { + temp_raw = attr[7]; + have_temp = true; + break; + } + } + + if (have_temp) { + *temp = temp_raw * 1000; + return 0; + } + + return -ENXIO; +} + +static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val) +{ + u8 *buf = st->smartdata; + int err; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + return err; + switch (attr) { + case hwmon_temp_input: + *val = temp_from_sct(buf[SCT_STATUS_TEMP]); + break; + case hwmon_temp_lowest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]); + break; + case hwmon_temp_highest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]); + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int drivetemp_identify_sata(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + u8 *buf = st->smartdata; + struct scsi_vpd *vpd; + bool is_ata, is_sata; + bool have_sct_data_table; + bool have_sct_temp; + bool have_smart; + bool have_sct; + u16 *ata_id; + u16 version; + long temp; + int err; + + /* SCSI-ATA Translation present? */ + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + + /* + * Verify that ATA IDENTIFY DEVICE data is included in ATA Information + * VPD and that the drive implements the SATA protocol. + */ + if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA || + vpd->data[36] != 0x34) { + rcu_read_unlock(); + return -ENODEV; + } + ata_id = (u16 *)&vpd->data[60]; + is_ata = ata_id_is_ata(ata_id); + is_sata = ata_id_is_sata(ata_id); + have_sct = ata_id_sct_supported(ata_id); + have_sct_data_table = ata_id_sct_data_tables(ata_id); + have_smart = ata_id_smart_supported(ata_id) && + ata_id_smart_enabled(ata_id); + + rcu_read_unlock(); + + /* bail out if this is not a SATA device */ + if (!is_ata || !is_sata) + return -ENODEV; + if (!have_sct) + goto skip_sct; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct; + + version = (buf[SCT_STATUS_VERSION_HIGH] << 8) | + buf[SCT_STATUS_VERSION_LOW]; + if (version != 2 && version != 3) + goto skip_sct; + + have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]); + if (!have_sct_temp) + goto skip_sct; + + st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]); + st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]); + + if (!have_sct_data_table) + goto skip_sct; + + /* Request and read temperature history table */ + memset(buf, '\0', sizeof(st->smartdata)); + buf[0] = 5; /* data table command */ + buf[2] = 1; /* read table */ + buf[4] = 2; /* temperature history table */ + + err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct_data; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR); + if (err) + goto skip_sct_data; + + /* + * Temperature limits per AT Attachment 8 - + * ATA/ATAPI Command Set (ATA8-ACS) + */ + st->have_temp_max = temp_is_valid(buf[6]); + st->have_temp_crit = temp_is_valid(buf[7]); + st->have_temp_min = temp_is_valid(buf[8]); + st->have_temp_lcrit = temp_is_valid(buf[9]); + + st->temp_max = temp_from_sct(buf[6]); + st->temp_crit = temp_from_sct(buf[7]); + st->temp_min = temp_from_sct(buf[8]); + st->temp_lcrit = temp_from_sct(buf[9]); + +skip_sct_data: + if (have_sct_temp) { + st->get_temp = drivetemp_get_scttemp; + return 0; + } +skip_sct: + if (!have_smart) + return -ENODEV; + st->get_temp = drivetemp_get_smarttemp; + return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp); +} + +static int drivetemp_identify(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + + /* Bail out immediately if there is no inquiry data */ + if (!sdev->inquiry || sdev->inquiry_len < 16) + return -ENODEV; + + /* Disk device? */ + if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC) + return -ENODEV; + + return drivetemp_identify_sata(st); +} + +static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct drivetemp_data *st = dev_get_drvdata(dev); + int err = 0; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_lowest: + case hwmon_temp_highest: + mutex_lock(&st->lock); + err = st->get_temp(st, attr, val); + mutex_unlock(&st->lock); + break; + case hwmon_temp_lcrit: + *val = st->temp_lcrit; + break; + case hwmon_temp_min: + *val = st->temp_min; + break; + case hwmon_temp_max: + *val = st->temp_max; + break; + case hwmon_temp_crit: + *val = st->temp_crit; + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static umode_t drivetemp_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct drivetemp_data *st = data; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_lowest: + if (st->have_temp_lowest) + return 0444; + break; + case hwmon_temp_highest: + if (st->have_temp_highest) + return 0444; + break; + case hwmon_temp_min: + if (st->have_temp_min) + return 0444; + break; + case hwmon_temp_max: + if (st->have_temp_max) + return 0444; + break; + case hwmon_temp_lcrit: + if (st->have_temp_lcrit) + return 0444; + break; + case hwmon_temp_crit: + if (st->have_temp_crit) + return 0444; + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *drivetemp_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | + HWMON_T_LOWEST | HWMON_T_HIGHEST | + HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_LCRIT | HWMON_T_CRIT), + NULL +}; + +static const struct hwmon_ops drivetemp_ops = { + .is_visible = drivetemp_is_visible, + .read = drivetemp_read, +}; + +static const struct hwmon_chip_info drivetemp_chip_info = { + .ops = &drivetemp_ops, + .info = drivetemp_info, +}; + +/* + * The device argument points to sdev->sdev_dev. Its parent is + * sdev->sdev_gendev, which we can use to get the scsi_device pointer. + */ +static int drivetemp_add(struct device *dev, struct class_interface *intf) +{ + struct scsi_device *sdev = to_scsi_device(dev->parent); + struct drivetemp_data *st; + int err; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->sdev = sdev; + st->dev = dev; + mutex_init(&st->lock); + + if (drivetemp_identify(st)) { + err = -ENODEV; + goto abort; + } + + st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp", + st, &drivetemp_chip_info, + NULL); + if (IS_ERR(st->hwdev)) { + err = PTR_ERR(st->hwdev); + goto abort; + } + + list_add(&st->list, &drivetemp_devlist); + return 0; + +abort: + kfree(st); + return err; +} + +static void drivetemp_remove(struct device *dev, struct class_interface *intf) +{ + struct drivetemp_data *st, *tmp; + + list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) { + if (st->dev == dev) { + list_del(&st->list); + hwmon_device_unregister(st->hwdev); + kfree(st); + break; + } + } +} + +static struct class_interface drivetemp_interface = { + .add_dev = drivetemp_add, + .remove_dev = drivetemp_remove, +}; + +static int __init drivetemp_init(void) +{ + return scsi_register_interface(&drivetemp_interface); +} + +static void __exit drivetemp_exit(void) +{ + scsi_unregister_interface(&drivetemp_interface); +} + +module_init(drivetemp_init); +module_exit(drivetemp_exit); + +MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>"); +MODULE_DESCRIPTION("Hard drive temperature monitor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 1f3b30b085b9..6a30fb453f7a 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -51,6 +51,7 @@ struct hwmon_device_attribute { #define to_hwmon_attr(d) \ container_of(d, struct hwmon_device_attribute, dev_attr) +#define to_dev_attr(a) container_of(a, struct device_attribute, attr) /* * Thermal zone information @@ -58,7 +59,7 @@ struct hwmon_device_attribute { * also provides the sensor index. */ struct hwmon_thermal_data { - struct hwmon_device *hwdev; /* Reference to hwmon device */ + struct device *dev; /* Reference to hwmon device */ int index; /* sensor index */ }; @@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = { NULL }; +static void hwmon_free_attrs(struct attribute **attrs) +{ + int i; + + for (i = 0; attrs[i]; i++) { + struct device_attribute *dattr = to_dev_attr(attrs[i]); + struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr); + + kfree(hattr); + } + kfree(attrs); +} + static void hwmon_dev_release(struct device *dev) { - kfree(to_hwmon_device(dev)); + struct hwmon_device *hwdev = to_hwmon_device(dev); + + if (hwdev->group.attrs) + hwmon_free_attrs(hwdev->group.attrs); + kfree(hwdev->groups); + kfree(hwdev); } static struct class hwmon_class = { @@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida); static int hwmon_thermal_get_temp(void *data, int *temp) { struct hwmon_thermal_data *tdata = data; - struct hwmon_device *hwdev = tdata->hwdev; + struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); int ret; long t; - ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input, + ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input, tdata->index, &t); if (ret < 0) return ret; @@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = { .get_temp = hwmon_thermal_get_temp, }; -static int hwmon_thermal_add_sensor(struct device *dev, - struct hwmon_device *hwdev, int index) +static int hwmon_thermal_add_sensor(struct device *dev, int index) { struct hwmon_thermal_data *tdata; struct thermal_zone_device *tzd; @@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev, if (!tdata) return -ENOMEM; - tdata->hwdev = hwdev; + tdata->dev = dev; tdata->index = index; - tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata, + tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); /* * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, @@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, return 0; } #else -static int hwmon_thermal_add_sensor(struct device *dev, - struct hwmon_device *hwdev, int index) +static int hwmon_thermal_add_sensor(struct device *dev, int index) { return 0; } @@ -171,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, static int hwmon_attr_base(enum hwmon_sensor_types type) { - if (type == hwmon_in) + if (type == hwmon_in || type == hwmon_intrusion) return 0; return 1; } @@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr) (type == hwmon_fan && attr == hwmon_fan_label); } -static struct attribute *hwmon_genattr(struct device *dev, - const void *drvdata, +static struct attribute *hwmon_genattr(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int index, @@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev, if ((mode & 0222) && !ops->write) return ERR_PTR(-EINVAL); - hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL); + hattr = kzalloc(sizeof(*hattr), GFP_KERNEL); if (!hattr) return ERR_PTR(-ENOMEM); @@ -327,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = { }; static const char * const hwmon_temp_attr_templates[] = { + [hwmon_temp_enable] = "temp%d_enable", [hwmon_temp_input] = "temp%d_input", [hwmon_temp_type] = "temp%d_type", [hwmon_temp_lcrit] = "temp%d_lcrit", @@ -354,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = { }; static const char * const hwmon_in_attr_templates[] = { + [hwmon_in_enable] = "in%d_enable", [hwmon_in_input] = "in%d_input", [hwmon_in_min] = "in%d_min", [hwmon_in_max] = "in%d_max", @@ -369,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = { [hwmon_in_max_alarm] = "in%d_max_alarm", [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm", [hwmon_in_crit_alarm] = "in%d_crit_alarm", - [hwmon_in_enable] = "in%d_enable", }; static const char * const hwmon_curr_attr_templates[] = { + [hwmon_curr_enable] = "curr%d_enable", [hwmon_curr_input] = "curr%d_input", [hwmon_curr_min] = "curr%d_min", [hwmon_curr_max] = "curr%d_max", @@ -391,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = { }; static const char * const hwmon_power_attr_templates[] = { + [hwmon_power_enable] = "power%d_enable", [hwmon_power_average] = "power%d_average", [hwmon_power_average_interval] = "power%d_average_interval", [hwmon_power_average_interval_max] = "power%d_interval_max", @@ -422,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = { }; static const char * const hwmon_energy_attr_templates[] = { + [hwmon_energy_enable] = "energy%d_enable", [hwmon_energy_input] = "energy%d_input", [hwmon_energy_label] = "energy%d_label", }; static const char * const hwmon_humidity_attr_templates[] = { + [hwmon_humidity_enable] = "humidity%d_enable", [hwmon_humidity_input] = "humidity%d_input", [hwmon_humidity_label] = "humidity%d_label", [hwmon_humidity_min] = "humidity%d_min", @@ -438,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = { }; static const char * const hwmon_fan_attr_templates[] = { + [hwmon_fan_enable] = "fan%d_enable", [hwmon_fan_input] = "fan%d_input", [hwmon_fan_label] = "fan%d_label", [hwmon_fan_min] = "fan%d_min", @@ -458,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = { [hwmon_pwm_freq] = "pwm%d_freq", }; +static const char * const hwmon_intrusion_attr_templates[] = { + [hwmon_intrusion_alarm] = "intrusion%d_alarm", + [hwmon_intrusion_beep] = "intrusion%d_beep", +}; + static const char * const *__templates[] = { [hwmon_chip] = hwmon_chip_attrs, [hwmon_temp] = hwmon_temp_attr_templates, @@ -468,6 +495,7 @@ static const char * const *__templates[] = { [hwmon_humidity] = hwmon_humidity_attr_templates, [hwmon_fan] = hwmon_fan_attr_templates, [hwmon_pwm] = hwmon_pwm_attr_templates, + [hwmon_intrusion] = hwmon_intrusion_attr_templates, }; static const int __templates_size[] = { @@ -480,6 +508,7 @@ static const int __templates_size[] = { [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates), [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates), [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates), + [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates), }; static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) @@ -492,8 +521,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) return n; } -static int hwmon_genattrs(struct device *dev, - const void *drvdata, +static int hwmon_genattrs(const void *drvdata, struct attribute **attrs, const struct hwmon_ops *ops, const struct hwmon_channel_info *info) @@ -519,7 +547,7 @@ static int hwmon_genattrs(struct device *dev, attr_mask &= ~BIT(attr); if (attr >= template_size) return -EINVAL; - a = hwmon_genattr(dev, drvdata, info->type, attr, i, + a = hwmon_genattr(drvdata, info->type, attr, i, templates[attr], ops); if (IS_ERR(a)) { if (PTR_ERR(a) != -ENOENT) @@ -533,8 +561,7 @@ static int hwmon_genattrs(struct device *dev, } static struct attribute ** -__hwmon_create_attrs(struct device *dev, const void *drvdata, - const struct hwmon_chip_info *chip) +__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip) { int ret, i, aindex = 0, nattrs = 0; struct attribute **attrs; @@ -545,15 +572,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata, if (nattrs == 0) return ERR_PTR(-EINVAL); - attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL); + attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) return ERR_PTR(-ENOMEM); for (i = 0; chip->info[i]; i++) { - ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops, + ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops, chip->info[i]); - if (ret < 0) + if (ret < 0) { + hwmon_free_attrs(attrs); return ERR_PTR(ret); + } aindex += ret; } @@ -595,14 +624,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, for (i = 0; groups[i]; i++) ngroups++; - hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), - GFP_KERNEL); + hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL); if (!hwdev->groups) { err = -ENOMEM; goto free_hwmon; } - attrs = __hwmon_create_attrs(dev, drvdata, chip); + attrs = __hwmon_create_attrs(drvdata, chip); if (IS_ERR(attrs)) { err = PTR_ERR(attrs); goto free_hwmon; @@ -647,8 +675,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, hwmon_temp_input, j)) continue; if (info[i]->config[j] & HWMON_T_INPUT) { - err = hwmon_thermal_add_sensor(dev, - hwdev, j); + err = hwmon_thermal_add_sensor(hdev, j); if (err) { device_unregister(hdev); /* @@ -667,7 +694,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, return hdev; free_hwmon: - kfree(hwdev); + hwmon_dev_release(hdev); ida_remove: ida_simple_remove(&hwmon_ida, id); return ERR_PTR(err); diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index b09c39abd3a8..eeac4b04df27 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c @@ -528,7 +528,7 @@ static int i5k_amb_probe(struct platform_device *pdev) goto err; } - data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len); + data->amb_mmio = ioremap(data->amb_base, data->amb_len); if (!data->amb_mmio) { res = -EBUSY; goto err_map_failed; diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 5c1dddde193c..e39354ffe973 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,13 +1,29 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h + * processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> + * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net> + * + * Implementation notes: + * - CCD register address information as well as the calculation to + * convert raw register values is from https://github.com/ocerman/zenpower. + * The information is not confirmed from chip datasheets, but experiments + * suggest that it provides reasonable temperature values. + * - Register addresses to read chip voltage and current are also from + * https://github.com/ocerman/zenpower, and not confirmed from chip + * datasheets. Current calibration is board specific and not typically + * shared by board vendors. For this reason, current values are + * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC + * current. Reported values can be adjusted using the sensors configuration + * file. */ +#include <linux/bitops.h> +#include <linux/debugfs.h> #include <linux/err.h> #include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> @@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #endif /* CPUID function 0x80000001, ebx */ -#define CPUID_PKGTYPE_MASK 0xf0000000 +#define CPUID_PKGTYPE_MASK GENMASK(31, 28) #define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 /* DRAM controller (PCI function 2) */ #define REG_DCT0_CONFIG_HIGH 0x094 -#define DDR3_MODE 0x00000100 +#define DDR3_MODE BIT(8) /* miscellaneous (PCI function 3) */ #define REG_HARDWARE_THERMAL_CONTROL 0x64 -#define HTC_ENABLE 0x00000001 +#define HTC_ENABLE BIT(0) #define REG_REPORTED_TEMPERATURE 0xa4 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 -#define NB_CAP_HTC 0x00000400 +#define NB_CAP_HTC BIT(10) /* * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL @@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); /* F17h M01h Access througn SMN */ #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 +#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4)) +#define F17H_M70H_CCD_TEMP_VALID BIT(11) +#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0) + +#define F17H_M01H_SVI 0x0005A000 +#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc) +#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10) + +#define CUR_TEMP_SHIFT 21 +#define CUR_TEMP_RANGE_SEL_MASK BIT(19) + +#define CFACTOR_ICORE 1000000 /* 1A / LSB */ +#define CFACTOR_ISOC 250000 /* 0.25A / LSB */ + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -67,6 +97,10 @@ struct k10temp_data { int temp_offset; u32 temp_adjust_mask; bool show_tdie; + u32 show_tccd; + u32 svi_addr[2]; + bool show_current; + int cfactor[2]; }; struct tctl_offset { @@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ }; +static bool is_threadripper(void) +{ + return strstr(boot_cpu_data.x86_model_id, "Threadripper"); +} + +static bool is_epyc(void) +{ + return strstr(boot_cpu_data.x86_model_id, "EPYC"); +} + static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) { pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); @@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); } -static unsigned int get_raw_temp(struct k10temp_data *data) +static long get_raw_temp(struct k10temp_data *data) { - unsigned int temp; u32 regval; + long temp; data->read_tempreg(data->pdev, ®val); - temp = (regval >> 21) * 125; + temp = (regval >> CUR_TEMP_SHIFT) * 125; if (regval & data->temp_adjust_mask) temp -= 49000; return temp; } -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); +const char *k10temp_temp_label[] = { + "Tdie", + "Tctl", + "Tccd1", + "Tccd2", + "Tccd3", + "Tccd4", + "Tccd5", + "Tccd6", + "Tccd7", + "Tccd8", +}; - if (temp > data->temp_offset) - temp -= data->temp_offset; - else - temp = 0; +const char *k10temp_in_label[] = { + "Vcore", + "Vsoc", +}; - return sprintf(buf, "%u\n", temp); -} +const char *k10temp_curr_label[] = { + "Icore", + "Isoc", +}; -static ssize_t temp2_input_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) { - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - return sprintf(buf, "%u\n", temp); + switch (type) { + case hwmon_temp: + *str = k10temp_temp_label[channel]; + break; + case hwmon_in: + *str = k10temp_in_label[channel]; + break; + case hwmon_curr: + *str = k10temp_curr_label[channel]; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_label_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_curr(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; - return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie"); + switch (attr) { + case hwmon_curr_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + *val = DIV_ROUND_CLOSEST(data->cfactor[channel] * + (regval & 0xff), + 1000); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) +static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val) { - return sprintf(buf, "%d\n", 70 * 1000); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; + + switch (attr) { + case hwmon_in_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + regval = (regval >> 16) & 0xff; + *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_crit_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct k10temp_data *data = dev_get_drvdata(dev); - int show_hyst = attr->index; u32 regval; - int value; - data->read_htcreg(data->pdev, ®val); - value = ((regval >> 16) & 0x7f) * 500 + 52000; - if (show_hyst) - value -= ((regval >> 24) & 0xf) * 500; - return sprintf(buf, "%d\n", value); + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie */ + *val = get_raw_temp(data) - data->temp_offset; + if (*val < 0) + *val = 0; + break; + case 1: /* Tctl */ + *val = get_raw_temp(data); + if (*val < 0) + *val = 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + F17H_M70H_CCD_TEMP(channel - 2), ®val); + *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_temp_max: + *val = 70 * 1000; + break; + case hwmon_temp_crit: + data->read_htcreg(data->pdev, ®val); + *val = ((regval >> 16) & 0x7f) * 500 + 52000; + break; + case hwmon_temp_crit_hyst: + data->read_htcreg(data->pdev, ®val); + *val = (((regval >> 16) & 0x7f) + - ((regval >> 24) & 0xf)) * 500 + 52000; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static DEVICE_ATTR_RO(temp1_input); -static DEVICE_ATTR_RO(temp1_max); -static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1); - -static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0); -static DEVICE_ATTR_RO(temp2_input); -static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1); +static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_temp: + return k10temp_read_temp(dev, attr, channel, val); + case hwmon_in: + return k10temp_read_in(dev, attr, channel, val); + case hwmon_curr: + return k10temp_read_curr(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} -static umode_t k10temp_is_visible(struct kobject *kobj, - struct attribute *attr, int index) +static umode_t k10temp_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) { - struct device *dev = container_of(kobj, struct device, kobj); - struct k10temp_data *data = dev_get_drvdata(dev); + const struct k10temp_data *data = _data; struct pci_dev *pdev = data->pdev; u32 reg; - switch (index) { - case 0 ... 1: /* temp1_input, temp1_max */ - default: - break; - case 2 ... 3: /* temp1_crit, temp1_crit_hyst */ - if (!data->read_htcreg) - return 0; - - pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, - ®); - if (!(reg & NB_CAP_HTC)) - return 0; - - data->read_htcreg(data->pdev, ®); - if (!(reg & HTC_ENABLE)) + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie, or Tctl if we don't show it */ + break; + case 1: /* Tctl */ + if (!data->show_tdie) + return 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + case hwmon_temp_max: + if (channel || data->show_tdie) + return 0; + break; + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + if (channel || !data->read_htcreg) + return 0; + + pci_read_config_dword(pdev, + REG_NORTHBRIDGE_CAPABILITIES, + ®); + if (!(reg & NB_CAP_HTC)) + return 0; + + data->read_htcreg(data->pdev, ®); + if (!(reg & HTC_ENABLE)) + return 0; + break; + case hwmon_temp_label: + /* No labels if we don't show the die temperature */ + if (!data->show_tdie) + return 0; + switch (channel) { + case 0: /* Tdie */ + case 1: /* Tctl */ + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + default: return 0; + } break; - case 4 ... 6: /* temp1_label, temp2_input, temp2_label */ - if (!data->show_tdie) + case hwmon_in: + case hwmon_curr: + if (!data->show_current) return 0; break; + default: + return 0; } - return attr->mode; + return 0444; } -static struct attribute *k10temp_attrs[] = { - &dev_attr_temp1_input.attr, - &dev_attr_temp1_max.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - &dev_attr_temp2_input.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL -}; - -static const struct attribute_group k10temp_group = { - .attrs = k10temp_attrs, - .is_visible = k10temp_is_visible, -}; -__ATTRIBUTE_GROUPS(k10temp); - static bool has_erratum_319(struct pci_dev *pdev) { u32 pkg_type, reg_dram_cfg; @@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev) (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } -static int k10temp_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +#ifdef CONFIG_DEBUG_FS + +static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev, + u32 addr, int count) +{ + u32 reg; + int i; + + for (i = 0; i < count; i++) { + if (!(i & 3)) + seq_printf(s, "0x%06x: ", addr + i * 4); + amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, ®); + seq_printf(s, "%08x ", reg); + if ((i & 3) == 3) + seq_puts(s, "\n"); + } +} + +static int svi_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(svi); + +static int thm_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, + F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(thm); + +static void k10temp_debugfs_cleanup(void *ddir) +{ + debugfs_remove_recursive(ddir); +} + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ + struct dentry *debugfs; + char name[32]; + + /* Only show debugfs data for Family 17h/18h CPUs */ + if (!data->show_tdie) + return; + + scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev)); + + debugfs = debugfs_create_dir(name, NULL); + if (debugfs) { + debugfs_create_file("svi", 0444, debugfs, data, &svi_fops); + debugfs_create_file("thm", 0444, debugfs, data, &thm_fops); + devm_add_action_or_reset(&data->pdev->dev, + k10temp_debugfs_cleanup, debugfs); + } +} + +#else + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ +} + +#endif + +static const struct hwmon_channel_info *k10temp_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL), + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_LABEL, + HWMON_C_INPUT | HWMON_C_LABEL), + NULL +}; + +static const struct hwmon_ops k10temp_hwmon_ops = { + .is_visible = k10temp_is_visible, + .read = k10temp_read, + .read_string = k10temp_read_labels, +}; + +static const struct hwmon_chip_info k10temp_chip_info = { + .ops = &k10temp_hwmon_ops, + .info = k10temp_info, +}; + +static void k10temp_get_ccd_support(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + u32 regval; + int i; + + for (i = 0; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + F17H_M70H_CCD_TEMP(i), ®val); + if (regval & F17H_M70H_CCD_TEMP_VALID) + data->show_tccd |= BIT(i); + } +} + +static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; @@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev, data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { - data->temp_adjust_mask = 0x80000; + data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_f17; data->show_tdie = true; + + switch (boot_cpu_data.x86_model) { + case 0x1: /* Zen */ + case 0x8: /* Zen+ */ + case 0x11: /* Zen APU */ + case 0x18: /* Zen+ APU */ + data->show_current = !is_threadripper() && !is_epyc(); + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1; + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + k10temp_get_ccd_support(pdev, data, 4); + break; + case 0x31: /* Zen2 Threadripper */ + case 0x71: /* Zen2 */ + data->show_current = !is_threadripper() && !is_epyc(); + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0; + k10temp_get_ccd_support(pdev, data, 8); + break; + } } else { data->read_htcreg = read_htcreg_pci; data->read_tempreg = read_tempreg_pci; @@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev, } } - hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, - k10temp_groups); - return PTR_ERR_OR_ZERO(hwmon_dev); + hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data, + &k10temp_chip_info, + NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + k10temp_init_debugfs(data); + + return 0; } static const struct pci_device_id k10temp_id_table[] = { diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c new file mode 100644 index 000000000000..eb22a34dc36b --- /dev/null +++ b/drivers/hwmon/max31730.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX31730 3-Channel Remote Temperature Sensor + * + * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net> + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/hwmon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/slab.h> + +/* Addresses scanned */ +static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c, + 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; + +/* The MAX31730 registers */ +#define MAX31730_REG_TEMP 0x00 +#define MAX31730_REG_CONF 0x13 +#define MAX31730_STOP BIT(7) +#define MAX31730_EXTRANGE BIT(1) +#define MAX31730_REG_TEMP_OFFSET 0x16 +#define MAX31730_TEMP_OFFSET_BASELINE 0x77 +#define MAX31730_REG_OFFSET_ENABLE 0x17 +#define MAX31730_REG_TEMP_MAX 0x20 +#define MAX31730_REG_TEMP_MIN 0x30 +#define MAX31730_REG_STATUS_HIGH 0x32 +#define MAX31730_REG_STATUS_LOW 0x33 +#define MAX31730_REG_CHANNEL_ENABLE 0x35 +#define MAX31730_REG_TEMP_FAULT 0x36 + +#define MAX31730_REG_MFG_ID 0x50 +#define MAX31730_MFG_ID 0x4d +#define MAX31730_REG_MFG_REV 0x51 +#define MAX31730_MFG_REV 0x01 + +#define MAX31730_TEMP_MIN (-128000) +#define MAX31730_TEMP_MAX 127937 + +/* Each client has this additional data */ +struct max31730_data { + struct i2c_client *client; + u8 orig_conf; + u8 current_conf; + u8 offset_enable; + u8 channel_enable; +}; + +/*-----------------------------------------------------------------------*/ + +static inline long max31730_reg_to_mc(s16 temp) +{ + return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16); +} + +static int max31730_write_config(struct max31730_data *data, u8 set_mask, + u8 clr_mask) +{ + u8 value; + + clr_mask |= MAX31730_EXTRANGE; + value = data->current_conf & ~clr_mask; + value |= set_mask; + + if (data->current_conf != value) { + s32 err; + + err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF, + value); + if (err) + return err; + data->current_conf = value; + } + return 0; +} + +static int max31730_set_enable(struct i2c_client *client, int reg, + u8 *confdata, int channel, bool enable) +{ + u8 regval = *confdata; + int err; + + if (enable) + regval |= BIT(channel); + else + regval &= ~BIT(channel); + + if (regval != *confdata) { + err = i2c_smbus_write_byte_data(client, reg, regval); + if (err) + return err; + *confdata = regval; + } + return 0; +} + +static int max31730_set_offset_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE, + &data->offset_enable, channel, enable); +} + +static int max31730_set_channel_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE, + &data->channel_enable, channel, enable); +} + +static int max31730_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int regval, reg, offset; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + if (!(data->channel_enable & BIT(channel))) + return -ENODATA; + reg = MAX31730_REG_TEMP + (channel * 2); + break; + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + (channel * 2); + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + *val = !!(data->channel_enable & BIT(channel)); + return 0; + case hwmon_temp_offset: + if (!channel) + return -EINVAL; + if (!(data->offset_enable & BIT(channel))) { + *val = 0; + return 0; + } + offset = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET); + if (offset < 0) + return offset; + *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125; + return 0; + case hwmon_temp_fault: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_FAULT); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_min_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_LOW); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_max_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_HIGH); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + default: + return -EINVAL; + } + regval = i2c_smbus_read_word_swapped(data->client, reg); + if (regval < 0) + return regval; + + *val = max31730_reg_to_mc(regval); + + return 0; +} + +static int max31730_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int reg, err; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + channel * 2; + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + if (val != 0 && val != 1) + return -EINVAL; + return max31730_set_channel_enable(data, channel, val); + case hwmon_temp_offset: + val = clamp_val(val, -14875, 17000) + 14875; + val = DIV_ROUND_CLOSEST(val, 125); + err = max31730_set_offset_enable(data, channel, + val != MAX31730_TEMP_OFFSET_BASELINE); + if (err) + return err; + return i2c_smbus_write_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET, val); + default: + return -EINVAL; + } + + val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX); + val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4; + + return i2c_smbus_write_word_swapped(data->client, reg, (u16)val); +} + +static umode_t max31730_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_fault: + return 0444; + case hwmon_temp_min: + return channel ? 0444 : 0644; + case hwmon_temp_offset: + case hwmon_temp_enable: + case hwmon_temp_max: + return 0644; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *max31730_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT + ), + NULL +}; + +static const struct hwmon_ops max31730_hwmon_ops = { + .is_visible = max31730_is_visible, + .read = max31730_read, + .write = max31730_write, +}; + +static const struct hwmon_chip_info max31730_chip_info = { + .ops = &max31730_hwmon_ops, + .info = max31730_info, +}; + +static void max31730_remove(void *data) +{ + struct max31730_data *max31730 = data; + struct i2c_client *client = max31730->client; + + i2c_smbus_write_byte_data(client, MAX31730_REG_CONF, + max31730->orig_conf); +} + +static int +max31730_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct max31730_data *data; + int status, err; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) + return -EIO; + + data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + + /* Cache original configuration and enable status */ + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE); + if (status < 0) + return status; + data->channel_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE); + if (status < 0) + return status; + data->offset_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF); + if (status < 0) + return status; + data->orig_conf = status; + data->current_conf = status; + + err = max31730_write_config(data, + data->channel_enable ? 0 : MAX31730_STOP, + data->channel_enable ? MAX31730_STOP : 0); + if (err) + return err; + + dev_set_drvdata(dev, data); + + err = devm_add_action_or_reset(dev, max31730_remove, data); + if (err) + return err; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, + &max31730_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id max31730_ids[] = { + { "max31730", 0, }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max31730_ids); + +static const struct of_device_id __maybe_unused max31730_of_match[] = { + { + .compatible = "maxim,max31730", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, max31730_of_match); + +static bool max31730_check_reg_temp(struct i2c_client *client, + int reg) +{ + int regval; + + regval = i2c_smbus_read_byte_data(client, reg + 1); + return regval < 0 || (regval & 0x0f); +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max31730_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int regval; + int i; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID); + if (regval != MAX31730_MFG_ID) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV); + if (regval != MAX31730_MFG_REV) + return -ENODEV; + + /* lower 4 bit of temperature and limit registers must be 0 */ + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN)) + return -ENODEV; + + for (i = 0; i < 4; i++) { + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2)) + return -ENODEV; + if (max31730_check_reg_temp(client, + MAX31730_REG_TEMP_MAX + i * 2)) + return -ENODEV; + } + + strlcpy(info->type, "max31730", I2C_NAME_SIZE); + + return 0; +} + +static int __maybe_unused max31730_suspend(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, MAX31730_STOP, 0); +} + +static int __maybe_unused max31730_resume(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, 0, MAX31730_STOP); +} + +static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume); + +static struct i2c_driver max31730_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "max31730", + .of_match_table = of_match_ptr(max31730_of_match), + .pm = &max31730_pm_ops, + }, + .probe = max31730_probe, + .id_table = max31730_ids, + .detect = max31730_detect, + .address_list = normal_i2c, +}; + +module_i2c_driver(max31730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("MAX31730 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index f3dd2a17bd42..2e97e56c72c7 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -23,8 +23,8 @@ static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e }; static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = { - { 0x40, 0x00, 0x42, 0x44, 0x46 }, - { 0x3f, 0x00, 0x41, 0x43, 0x45 }, + { 0x46, 0x00, 0x40, 0x42, 0x44 }, + { 0x45, 0x00, 0x3f, 0x41, 0x43 }, }; static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 }; @@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = { struct nct7802_data { struct regmap *regmap; struct mutex access_lock; /* for multi-byte read and write operations */ + u8 in_status; + struct mutex in_alarm_lock; }; static ssize_t temp_type_show(struct device *dev, @@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr, return err ? : count; } +static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); + struct nct7802_data *data = dev_get_drvdata(dev); + int volt, min, max, ret; + unsigned int val; + + mutex_lock(&data->in_alarm_lock); + + /* + * The SMI Voltage status register is the only register giving a status + * for voltages. A bit is set for each input crossing a threshold, in + * both direction, but the "inside" or "outside" limits info is not + * available. Also this register is cleared on read. + * Note: this is not explicitly spelled out in the datasheet, but + * from experiment. + * To deal with this we use a status cache with one validity bit and + * one status bit for each input. Validity is cleared at startup and + * each time the register reports a change, and the status is processed + * by software based on current input value and limits. + */ + ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */ + if (ret < 0) + goto abort; + + /* invalidate cached status for all inputs crossing a threshold */ + data->in_status &= ~((val & 0x0f) << 4); + + /* if cached status for requested input is invalid, update it */ + if (!(data->in_status & (0x10 << sattr->index))) { + ret = nct7802_read_voltage(data, sattr->nr, 0); + if (ret < 0) + goto abort; + volt = ret; + + ret = nct7802_read_voltage(data, sattr->nr, 1); + if (ret < 0) + goto abort; + min = ret; + + ret = nct7802_read_voltage(data, sattr->nr, 2); + if (ret < 0) + goto abort; + max = ret; + + if (volt < min || volt > max) + data->in_status |= (1 << sattr->index); + else + data->in_status &= ~(1 << sattr->index); + + data->in_status |= 0x10 << sattr->index; + } + + ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index))); +abort: + mutex_unlock(&data->in_alarm_lock); + return ret; +} + static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = { static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0); static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1); static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2); -static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3); +static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3); static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3); static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0); @@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0); static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0); static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1); static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2); -static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0); +static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0); static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0); static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0); static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1); static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2); -static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1); +static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1); static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1); static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0); static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1); static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2); -static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2); +static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2); static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2); static struct attribute *nct7802_in_attrs[] = { @@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client, return PTR_ERR(data->regmap); mutex_init(&data->access_lock); + mutex_init(&data->in_alarm_lock); ret = nct7802_init_chip(data); if (ret < 0) diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index 59859979571d..a9ea06204767 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -20,8 +20,8 @@ config SENSORS_PMBUS help If you say yes here you get hardware monitoring support for generic PMBus devices, including but not limited to ADP4000, BMR453, BMR454, - MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20, - TPS544B25, TPS544C20, TPS544C25, and UDT020. + MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, + TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020. This driver can also be built as a module. If so, the module will be called pmbus. @@ -145,6 +145,15 @@ config SENSORS_MAX16064 This driver can also be built as a module. If so, the module will be called max16064. +config SENSORS_MAX20730 + tristate "Maxim MAX20730, MAX20734, MAX20743" + help + If you say yes here you get hardware monitoring support for Maxim + MAX20730, MAX20734, and MAX20743. + + This driver can also be built as a module. If so, the module will + be called max20730. + config SENSORS_MAX20751 tristate "Maxim MAX20751" help @@ -200,20 +209,20 @@ config SENSORS_TPS40422 be called tps40422. config SENSORS_TPS53679 - tristate "TI TPS53679" + tristate "TI TPS53679, TPS53688" help If you say yes here you get hardware monitoring support for TI - TPS53679. + TPS53679, TPS53688 This driver can also be built as a module. If so, the module will be called tps53679. config SENSORS_UCD9000 - tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910" + tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910" help If you say yes here you get hardware monitoring support for TI - UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System - Health Controllers. + UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer + and System Health Controllers. This driver can also be built as a module. If so, the module will be called ucd9000. @@ -228,6 +237,15 @@ config SENSORS_UCD9200 This driver can also be built as a module. If so, the module will be called ucd9200. +config SENSORS_XDPE122 + tristate "Infineon XDPE122 family" + help + If you say yes here you get hardware monitoring support for Infineon + XDPE12254, XDPE12284, device. + + This driver can also be built as a module. If so, the module will + be called xdpe12284. + config SENSORS_ZL6100 tristate "Intersil ZL6100 and compatibles" help diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 3f8c1014938b..5feb45806123 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o +obj-$(CONFIG_SENSORS_MAX20730) += max20730.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o obj-$(CONFIG_SENSORS_MAX31785) += max31785.o obj-$(CONFIG_SENSORS_MAX34440) += max34440.o @@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o +obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c index d359b76bcb36..3795fe55b84f 100644 --- a/drivers/hwmon/pmbus/ibm-cffps.c +++ b/drivers/hwmon/pmbus/ibm-cffps.c @@ -20,12 +20,15 @@ #define CFFPS_FRU_CMD 0x9A #define CFFPS_PN_CMD 0x9B +#define CFFPS_HEADER_CMD 0x9C #define CFFPS_SN_CMD 0x9E +#define CFFPS_MAX_POWER_OUT_CMD 0xA7 #define CFFPS_CCIN_CMD 0xBD #define CFFPS_FW_CMD 0xFA #define CFFPS1_FW_NUM_BYTES 4 #define CFFPS2_FW_NUM_WORDS 3 #define CFFPS_SYS_CONFIG_CMD 0xDA +#define CFFPS_12VCS_VOUT_CMD 0xDE #define CFFPS_INPUT_HISTORY_CMD 0xD6 #define CFFPS_INPUT_HISTORY_SIZE 100 @@ -44,22 +47,21 @@ #define CFFPS_MFR_VAUX_FAULT BIT(6) #define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7) -/* - * LED off state actually relinquishes LED control to PSU firmware, so it can - * turn on the LED for faults. - */ -#define CFFPS_LED_OFF 0 #define CFFPS_LED_BLINK BIT(0) #define CFFPS_LED_ON BIT(1) +#define CFFPS_LED_OFF BIT(2) #define CFFPS_BLINK_RATE_MS 250 enum { CFFPS_DEBUGFS_INPUT_HISTORY = 0, CFFPS_DEBUGFS_FRU, CFFPS_DEBUGFS_PN, + CFFPS_DEBUGFS_HEADER, CFFPS_DEBUGFS_SN, + CFFPS_DEBUGFS_MAX_POWER_OUT, CFFPS_DEBUGFS_CCIN, CFFPS_DEBUGFS_FW, + CFFPS_DEBUGFS_ON_OFF_CONFIG, CFFPS_DEBUGFS_NUM_ENTRIES }; @@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu, psu->input_history.byte_count); } -static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) { u8 cmd; int i, rc; int *idxp = file->private_data; int idx = *idxp; struct ibm_cffps *psu = to_psu(idxp, idx); - char data[I2C_SMBUS_BLOCK_MAX] = { 0 }; + char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 }; pmbus_set_page(psu->client, 0); @@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, case CFFPS_DEBUGFS_PN: cmd = CFFPS_PN_CMD; break; + case CFFPS_DEBUGFS_HEADER: + cmd = CFFPS_HEADER_CMD; + break; case CFFPS_DEBUGFS_SN: cmd = CFFPS_SN_CMD; break; + case CFFPS_DEBUGFS_MAX_POWER_OUT: + rc = i2c_smbus_read_word_swapped(psu->client, + CFFPS_MAX_POWER_OUT_CMD); + if (rc < 0) + return rc; + + rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc); + goto done; case CFFPS_DEBUGFS_CCIN: rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD); if (rc < 0) @@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, return -EOPNOTSUPP; } goto done; + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + rc = i2c_smbus_read_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG); + if (rc < 0) + return rc; + + rc = snprintf(data, 3, "%02x", rc); + goto done; default: return -EINVAL; } @@ -214,9 +235,42 @@ done: return simple_read_from_buffer(buf, count, ppos, data, rc); } +static ssize_t ibm_cffps_debugfs_write(struct file *file, + const char __user *buf, size_t count, + loff_t *ppos) +{ + u8 data; + ssize_t rc; + int *idxp = file->private_data; + int idx = *idxp; + struct ibm_cffps *psu = to_psu(idxp, idx); + + switch (idx) { + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + pmbus_set_page(psu->client, 0); + + rc = simple_write_to_buffer(&data, 1, ppos, buf, count); + if (rc <= 0) + return rc; + + rc = i2c_smbus_write_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG, data); + if (rc) + return rc; + + rc = 1; + break; + default: + return -EINVAL; + } + + return rc; +} + static const struct file_operations ibm_cffps_fops = { .llseek = noop_llseek, - .read = ibm_cffps_debugfs_op, + .read = ibm_cffps_debugfs_read, + .write = ibm_cffps_debugfs_write, .open = simple_open, }; @@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page, if (mfr & CFFPS_MFR_PS_KILL) rc |= PB_STATUS_OFF; break; + case PMBUS_VIRT_READ_VMON: + rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD); + break; default: rc = -ENODATA; break; @@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu) rc = devm_led_classdev_register(dev, &psu->led); if (rc) dev_warn(dev, "failed to register led class: %d\n", rc); + else + i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD, + CFFPS_LED_OFF); } static struct pmbus_driver_info ibm_cffps_info[] = { @@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = { PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP | - PMBUS_HAVE_STATUS_FAN12, + PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON, .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT, @@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client, debugfs_create_file("part_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_PN], &ibm_cffps_fops); + debugfs_create_file("header", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER], + &ibm_cffps_fops); debugfs_create_file("serial_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_SN], &ibm_cffps_fops); + debugfs_create_file("max_power_out", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT], + &ibm_cffps_fops); debugfs_create_file("ccin", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN], &ibm_cffps_fops); debugfs_create_file("fw_version", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_FW], &ibm_cffps_fops); + debugfs_create_file("on_off_config", 0644, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG], + &ibm_cffps_fops); return 0; } diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c new file mode 100644 index 000000000000..294e2212f61e --- /dev/null +++ b/drivers/hwmon/pmbus/max20730.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down + * Switching Regulators + * + * Copyright 2019 Google LLC. + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/pmbus.h> +#include <linux/util_macros.h> +#include "pmbus.h" + +enum chips { + max20730, + max20734, + max20743 +}; + +struct max20730_data { + enum chips id; + struct pmbus_driver_info info; + struct mutex lock; /* Used to protect against parallel writes */ + u16 mfr_devset1; +}; + +#define to_max20730_data(x) container_of(x, struct max20730_data, info) + +#define MAX20730_MFR_DEVSET1 0xd2 + +/* + * Convert discreet value to direct data format. Strictly speaking, all passed + * values are constants, so we could do that calculation manually. On the + * downside, that would make the driver more difficult to maintain, so lets + * use this approach. + */ +static u16 val_to_direct(int v, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; /* take milli-units into account */ + int b = info->b[class] * 1000; + long d; + + d = v * info->m[class] + b; + /* + * R < 0 is true for all callers, so we don't need to bother + * about the R > 0 case. + */ + while (R < 0) { + d = DIV_ROUND_CLOSEST(d, 10); + R++; + } + return (u16)d; +} + +static long direct_to_val(u16 w, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; + int b = info->b[class] * 1000; + int m = info->m[class]; + long d = (s16)w; + + if (m == 0) + return 0; + + while (R < 0) { + d *= 10; + R++; + } + d = (d - b) / m; + return d; +} + +static u32 max_current[][5] = { + [max20730] = { 13000, 16600, 20100, 23600 }, + [max20734] = { 21000, 27000, 32000, 38000 }, + [max20743] = { 18900, 24100, 29200, 34100 }, +}; + +static int max20730_read_word_data(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + const struct max20730_data *data = to_max20730_data(info); + int ret = 0; + u32 max_c; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + switch ((data->mfr_devset1 >> 11) & 0x3) { + case 0x0: + ret = val_to_direct(150000, PSC_TEMPERATURE, info); + break; + case 0x1: + ret = val_to_direct(130000, PSC_TEMPERATURE, info); + break; + default: + ret = -ENODATA; + break; + } + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3]; + ret = val_to_direct(max_c, PSC_CURRENT_OUT, info); + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int max20730_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + struct pmbus_driver_info *info; + struct max20730_data *data; + u16 devset1; + int ret = 0; + int idx; + + info = (struct pmbus_driver_info *)pmbus_get_driver_info(client); + data = to_max20730_data(info); + + mutex_lock(&data->lock); + devset1 = data->mfr_devset1; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + devset1 &= ~(BIT(11) | BIT(12)); + if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000) + devset1 |= BIT(11); + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + devset1 &= ~(BIT(5) | BIT(6)); + + idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info), + max_current[data->id], 4); + devset1 |= (idx << 5); + break; + default: + ret = -ENODATA; + break; + } + + if (!ret && devset1 != data->mfr_devset1) { + ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1, + devset1); + if (!ret) { + data->mfr_devset1 = devset1; + pmbus_clear_cache(client); + } + } + mutex_unlock(&data->lock); + return ret; +} + +static const struct pmbus_driver_info max20730_info[] = { + [max20730] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3609, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + /* + * Values in the datasheet are adjusted for temperature and + * for the relationship between Vin and Vout. + * Unfortunately, the data sheet suggests that Vout measurement + * may be scaled with a resistor array. This is indeed the case + * at least on the evaulation boards. As a result, any in-driver + * adjustments would either be wrong or require elaborate means + * to configure the scaling. Instead of doing that, just report + * raw values and let userspace handle adjustments. + */ + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 153, + .b[PSC_CURRENT_OUT] = 4976, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20734] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6209 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3592, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 111, + .b[PSC_CURRENT_OUT] = 3461, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20743] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3597, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 95, + .b[PSC_CURRENT_OUT] = 5014, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, +}; + +static int max20730_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + struct max20730_data *data; + enum chips chip_id; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA | + I2C_FUNC_SMBUS_READ_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf); + if (ret < 0) { + dev_err(&client->dev, "Failed to read Manufacturer ID\n"); + return ret; + } + if (ret != 5 || strncmp(buf, "MAXIM", 5)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf); + return -ENODEV; + } + + /* + * The chips support reading PMBUS_MFR_MODEL. On both MAX20730 + * and MAX20734, reading it returns M20743. Presumably that is + * the reason why the command is not documented. Unfortunately, + * that means that there is no reliable means to detect the chip. + * However, we can at least detect the chip series. Compare + * the returned value against 'M20743' and bail out if there is + * a mismatch. If that doesn't work for all chips, we may have + * to remove this check. + */ + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Model\n"); + return ret; + } + if (ret != 6 || strncmp(buf, "M20743", 6)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf); + return -ENODEV; + } + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Revision\n"); + return ret; + } + if (ret != 1 || buf[0] != 'F') { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf); + return -ENODEV; + } + + if (client->dev.of_node) + chip_id = (enum chips)of_device_get_match_data(dev); + else + chip_id = id->driver_data; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->id = chip_id; + mutex_init(&data->lock); + memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info)); + + ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1); + if (ret < 0) + return ret; + data->mfr_devset1 = ret; + + return pmbus_do_probe(client, id, &data->info); +} + +static const struct i2c_device_id max20730_id[] = { + { "max20730", max20730 }, + { "max20734", max20734 }, + { "max20743", max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, max20730_id); + +static const struct of_device_id max20730_of_match[] = { + { .compatible = "maxim,max20730", .data = (void *)max20730 }, + { .compatible = "maxim,max20734", .data = (void *)max20734 }, + { .compatible = "maxim,max20743", .data = (void *)max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(of, max20730_of_match); + +static struct i2c_driver max20730_driver = { + .driver = { + .name = "max20730", + .of_match_table = max20730_of_match, + }, + .probe = max20730_probe, + .remove = pmbus_do_remove, + .id_table = max20730_id, +}; + +module_i2c_driver(max20730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c index ee5f0cdbde06..da3c38cb9a5c 100644 --- a/drivers/hwmon/pmbus/max20751.c +++ b/drivers/hwmon/pmbus/max20751.c @@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = { .pages = 1, .format[PSC_VOLTAGE_IN] = linear, .format[PSC_VOLTAGE_OUT] = vid, - .vrm_version = vr12, + .vrm_version[0] = vr12, .format[PSC_TEMPERATURE] = linear, .format[PSC_CURRENT_OUT] = linear, .format[PSC_POWER] = linear, diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index c0bc43d01018..51e8312b6c2d 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client, } if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - int vout_mode; + int vout_mode, i; vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { @@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client, break; case 1: info->format[PSC_VOLTAGE_OUT] = vid; - info->vrm_version = vr11; + for (i = 0; i < info->pages; i++) + info->vrm_version[i] = vr11; break; case 2: info->format[PSC_VOLTAGE_OUT] = direct; @@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = { {"dps460", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps800", (kernel_ulong_t)&pmbus_info_one_skip}, + {"max20796", (kernel_ulong_t)&pmbus_info_one}, {"mdt040", (kernel_ulong_t)&pmbus_info_one}, {"ncp4200", (kernel_ulong_t)&pmbus_info_one}, {"ncp4208", (kernel_ulong_t)&pmbus_info_one}, diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index d198af3a92b6..13b34bd67f23 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -22,6 +22,8 @@ enum pmbus_regs { PMBUS_CLEAR_FAULTS = 0x03, PMBUS_PHASE = 0x04, + PMBUS_WRITE_PROTECT = 0x10, + PMBUS_CAPABILITY = 0x19, PMBUS_QUERY = 0x1A, @@ -226,6 +228,15 @@ enum pmbus_regs { #define PB_OPERATION_CONTROL_ON BIT(7) /* + * WRITE_PROTECT + */ +#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */ +#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */ +#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */ + +#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT) + +/* * CAPABILITY */ #define PB_CAPABILITY_SMBALERT BIT(4) @@ -377,12 +388,12 @@ enum pmbus_sensor_classes { #define PMBUS_PAGE_VIRTUAL BIT(31) enum pmbus_data_format { linear = 0, direct, vid }; -enum vrm_version { vr11 = 0, vr12, vr13 }; +enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv }; struct pmbus_driver_info { int pages; /* Total number of pages */ enum pmbus_data_format format[PSC_NUM_CLASSES]; - enum vrm_version vrm_version; + enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */ /* * Support one set of coefficients for each sensor type * Used for chips providing data in direct mode. diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 8470097907bc..d9c17feb7b4a 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, long val = sensor->data; long rv = 0; - switch (data->info->vrm_version) { + switch (data->info->vrm_version[sensor->page]) { case vr11: if (val >= 0x02 && val <= 0xb2) rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100); @@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, if (val >= 0x01) rv = 500 + (val - 1) * 10; break; + case imvp9: + if (val >= 0x01) + rv = 200 + (val - 1) * 10; + break; + case amd625mv: + if (val >= 0x0 && val <= 0xd8) + rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100); + break; } return rv; } @@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, snprintf(sensor->name, sizeof(sensor->name), "%s%d", name, seq); + if (data->flags & PMBUS_WRITE_PROTECTED) + readonly = true; + sensor->page = page; sensor->reg = reg; sensor->class = class; @@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) client->flags |= I2C_CLIENT_PEC; + /* + * Check if the chip is write protected. If it is, we can not clear + * faults, and we should not try it. Also, in that case, writes into + * limit registers need to be disabled. + */ + ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT); + if (ret > 0 && (ret & PB_WP_ANY)) + data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK; + if (data->info->pages) pmbus_clear_faults(client); else diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c index ebe3f023f840..517584cff3de 100644 --- a/drivers/hwmon/pmbus/pxe1610.c +++ b/drivers/hwmon/pmbus/pxe1610.c @@ -19,26 +19,30 @@ static int pxe1610_identify(struct i2c_client *client, struct pmbus_driver_info *info) { - if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - u8 vout_mode; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_mode = ret & GENMASK(4, 0); - - switch (vout_mode) { - case 1: - info->vrm_version = vr12; - break; - case 2: - info->vrm_version = vr13; - break; - default: - return -ENODEV; + int i; + + for (i = 0; i < PXE1610_NUM_PAGES; i++) { + if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) { + u8 vout_mode; + int ret; + + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_mode = ret & GENMASK(4, 0); + + switch (vout_mode) { + case 1: + info->vrm_version[i] = vr12; + break; + case 2: + info->vrm_version[i] = vr13; + break; + default: + return -ENODEV; + } } } diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c index 86bb3aca09ed..9c22e9013dd7 100644 --- a/drivers/hwmon/pmbus/tps53679.c +++ b/drivers/hwmon/pmbus/tps53679.c @@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client, struct pmbus_driver_info *info) { u8 vout_params; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_params = ret & GENMASK(4, 0); - - switch (vout_params) { - case TPS53679_PROT_VR13_10MV: - case TPS53679_PROT_VR12_5_10MV: - info->vrm_version = vr13; - break; - case TPS53679_PROT_VR13_5MV: - case TPS53679_PROT_VR12_5MV: - case TPS53679_PROT_IMVP8_5MV: - info->vrm_version = vr12; - break; - default: - return -EINVAL; + int i, ret; + + for (i = 0; i < TPS53679_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case TPS53679_PROT_VR13_10MV: + case TPS53679_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case TPS53679_PROT_VR13_5MV: + case TPS53679_PROT_VR12_5MV: + case TPS53679_PROT_IMVP8_5MV: + info->vrm_version[i] = vr12; + break; + default: + return -EINVAL; + } } return 0; @@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client, static const struct i2c_device_id tps53679_id[] = { {"tps53679", 0}, + {"tps53688", 0}, {} }; @@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id); static const struct of_device_id __maybe_unused tps53679_of_match[] = { {.compatible = "ti,tps53679"}, + {.compatible = "ti,tps53688"}, {} }; MODULE_DEVICE_TABLE(of, tps53679_of_match); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index a9229c6b0e84..23ea3415f166 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -18,7 +18,8 @@ #include <linux/gpio/driver.h> #include "pmbus.h" -enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; +enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090, + ucd90910 }; #define UCD9000_MONITOR_CONFIG 0xd5 #define UCD9000_NUM_PAGES 0xd6 @@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_OUTPUT 1 #define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07) -#define UCD9000_MON_PAGE(x) ((x) & 0x0f) +#define UCD9000_MON_PAGE(x) ((x) & 0x1f) #define UCD9000_MON_VOLTAGE 1 #define UCD9000_MON_TEMPERATURE 2 @@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_NAME_LEN 16 #define UCD9090_NUM_GPIOS 23 #define UCD901XX_NUM_GPIOS 26 +#define UCD90320_NUM_GPIOS 84 #define UCD90910_NUM_GPIOS 26 #define UCD9000_DEBUGFS_NAME_LEN 24 #define UCD9000_GPI_COUNT 8 +#define UCD90320_GPI_COUNT 32 struct ucd9000_data { u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX]; @@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = { {"ucd90120", ucd90120}, {"ucd90124", ucd90124}, {"ucd90160", ucd90160}, + {"ucd90320", ucd90320}, {"ucd9090", ucd9090}, {"ucd90910", ucd90910}, {} @@ -155,6 +159,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = { .data = (void *)ucd90160 }, { + .compatible = "ti,ucd90320", + .data = (void *)ucd90320 + }, + { .compatible = "ti,ucd9090", .data = (void *)ucd9090 }, @@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client, case ucd90160: data->gpio.ngpio = UCD901XX_NUM_GPIOS; break; + case ucd90320: + data->gpio.ngpio = UCD90320_NUM_GPIOS; + break; case ucd90910: data->gpio.ngpio = UCD90910_NUM_GPIOS; break; @@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val) struct ucd9000_debugfs_entry *entry = data; struct i2c_client *client = entry->client; u8 buffer[I2C_SMBUS_BLOCK_MAX]; - int ret; + int ret, i; ret = ucd9000_get_mfr_status(client, buffer); if (ret < 0) return ret; /* - * Attribute only created for devices with gpi fault bits at bits - * 16-23, which is the second byte of the response. + * GPI fault bits are in sets of 8, two bytes from end of response. */ - *val = !!(buffer[1] & BIT(entry->index)); + i = ret - 3 - entry->index / 8; + if (i >= 0) + *val = !!(buffer[i] & BIT(entry->index % 8)); return 0; } @@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client, { struct dentry *debugfs; struct ucd9000_debugfs_entry *entries; - int i; + int i, gpi_count; char name[UCD9000_DEBUGFS_NAME_LEN]; debugfs = pmbus_get_debugfs_dir(client); @@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client, /* * Of the chips this driver supports, only the UCD9090, UCD90160, - * and UCD90910 report GPI faults in their MFR_STATUS register, so only - * create the GPI fault debugfs attributes for those chips. + * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS + * register, so only create the GPI fault debugfs attributes for those + * chips. */ if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 || - mid->driver_data == ucd90910) { + mid->driver_data == ucd90320 || mid->driver_data == ucd90910) { + gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT + : UCD9000_GPI_COUNT; entries = devm_kcalloc(&client->dev, - UCD9000_GPI_COUNT, sizeof(*entries), + gpi_count, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; - for (i = 0; i < UCD9000_GPI_COUNT; i++) { + for (i = 0; i < gpi_count; i++) { entries[i].client = client; entries[i].index = i; scnprintf(name, UCD9000_DEBUGFS_NAME_LEN, diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c new file mode 100644 index 000000000000..3d47806ff4d3 --- /dev/null +++ b/drivers/hwmon/pmbus/xdpe12284.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers + * + * Copyright (c) 2020 Mellanox Technologies. All rights reserved. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include "pmbus.h" + +#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */ +#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */ +#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */ +#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */ +#define XDPE122_PAGE_NUM 2 + +static int xdpe122_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + u8 vout_params; + int i, ret; + + for (i = 0; i < XDPE122_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case XDPE122_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case XDPE122_PROT_VR12_5MV: + info->vrm_version[i] = vr12; + break; + case XDPE122_PROT_IMVP9_10MV: + info->vrm_version[i] = imvp9; + break; + case XDPE122_AMD_625MV: + info->vrm_version[i] = amd625mv; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static struct pmbus_driver_info xdpe122_info = { + .pages = XDPE122_PAGE_NUM, + .format[PSC_VOLTAGE_IN] = linear, + .format[PSC_VOLTAGE_OUT] = vid, + .format[PSC_TEMPERATURE] = linear, + .format[PSC_CURRENT_IN] = linear, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_POWER] = linear, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .identify = xdpe122_identify, +}; + +static int xdpe122_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pmbus_driver_info *info; + + info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + return pmbus_do_probe(client, id, info); +} + +static const struct i2c_device_id xdpe122_id[] = { + {"xdpe12254", 0}, + {"xdpe12284", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, xdpe122_id); + +static const struct of_device_id __maybe_unused xdpe122_of_match[] = { + {.compatible = "infineon, xdpe12254"}, + {.compatible = "infineon, xdpe12284"}, + {} +}; +MODULE_DEVICE_TABLE(of, xdpe122_of_match); + +static struct i2c_driver xdpe122_driver = { + .driver = { + .name = "xdpe12284", + .of_match_table = of_match_ptr(xdpe122_of_match), + }, + .probe = xdpe122_probe, + .remove = pmbus_do_remove, + .id_table = xdpe122_id, +}; + +module_i2c_driver(xdpe122_driver); + +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 42ffd2e5182d..30b7b3ea8836 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int pwm_fan_suspend(struct device *dev) +static int pwm_fan_disable(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); struct pwm_args args; @@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev) return 0; } +static void pwm_fan_shutdown(struct platform_device *pdev) +{ + pwm_fan_disable(&pdev->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int pwm_fan_suspend(struct device *dev) +{ + return pwm_fan_disable(dev); +} + static int pwm_fan_resume(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); @@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, + .shutdown = pwm_fan_shutdown, .driver = { .name = "pwm-fan", .pm = &pwm_fan_pm, diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index eb171d15ac48..7ffadc2da57b 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -28,8 +28,6 @@ * w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3 * w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3 * w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3 - * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3 - * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -50,7 +48,7 @@ enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83627uhg, - w83667hg, w83667hg_b, nct6775, nct6776, + w83667hg, w83667hg_b, }; /* used to set data->name = w83627ehf_device_names[data->sio_kind] */ @@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = { "w83627uhg", "w83667hg", "w83667hg", - "nct6775", - "nct6776", }; static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -static unsigned short fan_debounce; -module_param(fan_debounce, ushort, 0); -MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); - #define DRVNAME "w83627ehf" /* @@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); #define SIO_W83627UHG_ID 0xa230 #define SIO_W83667HG_ID 0xa510 #define SIO_W83667HG_B_ID 0xb350 -#define SIO_NCT6775_ID 0xb470 -#define SIO_NCT6776_ID 0xc330 #define SIO_ID_MASK 0xFFF0 static inline void @@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 }; #define W83627EHF_REG_DIODE 0x59 #define W83627EHF_REG_SMI_OVT 0x4C -/* NCT6775F has its own fan divider registers */ -#define NCT6775_REG_FANDIV1 0x506 -#define NCT6775_REG_FANDIV2 0x507 -#define NCT6775_REG_FAN_DEBOUNCE 0xf0 - #define W83627EHF_REG_ALARM1 0x459 #define W83627EHF_REG_ALARM2 0x45A #define W83627EHF_REG_ALARM3 0x45B @@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 }; -static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 }; -static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 }; -static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 }; -static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 }; -static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 }; -static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 }; -static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a }; -static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b }; -static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; -static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642}; - -static const u16 NCT6775_REG_TEMP[] - = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d }; -static const u16 NCT6775_REG_TEMP_CONFIG[] - = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A }; -static const u16 NCT6775_REG_TEMP_HYST[] - = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D }; -static const u16 NCT6775_REG_TEMP_OVER[] - = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C }; -static const u16 NCT6775_REG_TEMP_SOURCE[] - = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 }; - static const char *const w83667hg_b_temp_label[] = { "SYSTIN", "CPUTIN", @@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = { "PECI Agent 4" }; -static const char *const nct6775_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "AMD SB-TSI", - "PECI Agent 0", - "PECI Agent 1", - "PECI Agent 2", - "PECI Agent 3", - "PECI Agent 4", - "PECI Agent 5", - "PECI Agent 6", - "PECI Agent 7", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP" -}; - -static const char *const nct6776_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "SMBUSMASTER 0", - "SMBUSMASTER 1", - "SMBUSMASTER 2", - "SMBUSMASTER 3", - "SMBUSMASTER 4", - "SMBUSMASTER 5", - "SMBUSMASTER 6", - "SMBUSMASTER 7", - "PECI Agent 0", - "PECI Agent 1", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP", - "BYTE_TEMP" -}; - -#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP) +#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP) static int is_word_sized(u16 reg) { @@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg) return 1350000U / (reg << divreg); } -static unsigned int fan_from_reg13(u16 reg, unsigned int divreg) -{ - if ((reg & 0xff1f) == 0xff1f) - return 0; - - reg = (reg & 0x1f) | ((reg & 0xff00) >> 3); - - if (reg == 0) - return 0; - - return 1350000U / reg; -} - -static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) -{ - if (reg == 0 || reg == 0xffff) - return 0; - - /* - * Even though the registers are 16 bit wide, the fan divisor - * still applies. - */ - return 1350000U / (reg << divreg); -} - static inline unsigned int div_from_reg(u8 reg) { @@ -418,7 +306,6 @@ struct w83627ehf_data { int addr; /* IO base of hw monitor block */ const char *name; - struct device *hwmon_dev; struct mutex lock; u16 reg_temp[NUM_REG_TEMP]; @@ -428,20 +315,10 @@ struct w83627ehf_data { u8 temp_src[NUM_REG_TEMP]; const char * const *temp_label; - const u16 *REG_PWM; - const u16 *REG_TARGET; - const u16 *REG_FAN; - const u16 *REG_FAN_MIN; - const u16 *REG_FAN_START_OUTPUT; - const u16 *REG_FAN_STOP_OUTPUT; - const u16 *REG_FAN_STOP_TIME; const u16 *REG_FAN_MAX_OUTPUT; const u16 *REG_FAN_STEP_OUTPUT; const u16 *scale_in; - unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg); - unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg); - struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -457,7 +334,6 @@ struct w83627ehf_data { u8 fan_div[5]; u8 has_fan; /* some fan inputs can be disabled */ u8 has_fan_min; /* some fans don't have min register */ - bool has_fan_div; u8 temp_type[3]; s8 temp_offset[3]; s16 temp[9]; @@ -494,6 +370,7 @@ struct w83627ehf_data { u16 have_temp_offset; u8 in6_skip:1; u8 temp3_val_only:1; + u8 have_vid:1; #ifdef CONFIG_PM /* Remember extra register values over suspend/resume */ @@ -584,35 +461,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg, } /* This function assumes that the caller holds data->update_lock */ -static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr) -{ - u8 reg; - - switch (nr) { - case 0: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70) - | (data->fan_div[0] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 1: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7) - | ((data->fan_div[1] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 2: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70) - | (data->fan_div[2] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - case 3: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7) - | ((data->fan_div[3] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - } -} - -/* This function assumes that the caller holds data->update_lock */ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) { u8 reg; @@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) } } -static void w83627ehf_write_fan_div_common(struct device *dev, - struct w83627ehf_data *data, int nr) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_write_fan_div(data, nr); - else - w83627ehf_write_fan_div(data, nr); -} - -static void nct6775_update_fan_div(struct w83627ehf_data *data) -{ - u8 i; - - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fan_div[0] = i & 0x7; - data->fan_div[1] = (i & 0x70) >> 4; - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - data->fan_div[2] = i & 0x7; - if (data->has_fan & (1<<3)) - data->fan_div[3] = (i & 0x70) >> 4; -} - static void w83627ehf_update_fan_div(struct w83627ehf_data *data) { int i; @@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data) } } -static void w83627ehf_update_fan_div_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_update_fan_div(data); - else - w83627ehf_update_fan_div(data); -} - -static void nct6775_update_pwm(struct w83627ehf_data *data) -{ - int i; - int pwmcfg, fanmodecfg; - - for (i = 0; i < data->pwm_num; i++) { - pwmcfg = w83627ehf_read_value(data, - W83627EHF_REG_PWM_ENABLE[i]); - fanmodecfg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[i]); - data->pwm_mode[i] = - ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; - data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1; - data->tolerance[i] = fanmodecfg & 0x0f; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); - } -} - static void w83627ehf_update_pwm(struct w83627ehf_data *data) { int i; @@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data) ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i]) & 3) + 1; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); + data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]); data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f; } } -static void w83627ehf_update_pwm_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) - nct6775_update_pwm(data); - else - w83627ehf_update_pwm(data); -} - static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int i; mutex_lock(&data->update_lock); @@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (time_after(jiffies, data->last_updated + HZ + HZ/2) || !data->valid) { /* Fan clock dividers */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Measured voltages and limits */ for (i = 0; i < data->in_num; i++) { @@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (!(data->has_fan & (1 << i))) continue; - reg = w83627ehf_read_value(data, data->REG_FAN[i]); - data->rpm[i] = data->fan_from_reg(reg, - data->fan_div[i]); + reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]); + data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]); if (data->has_fan_min & (1 << i)) data->fan_min[i] = w83627ehf_read_value(data, - data->REG_FAN_MIN[i]); + W83627EHF_REG_FAN_MIN[i]); /* * If we failed to measure the fan speed and clock * divider can be increased, let's try that for next * time */ - if (data->has_fan_div - && (reg >= 0xff || (sio_data->kind == nct6775 - && reg == 0x00)) - && data->fan_div[i] < 0x07) { + if (reg >= 0xff && data->fan_div[i] < 0x07) { dev_dbg(dev, "Increasing fan%d clock divider from %u to %u\n", i + 1, div_from_reg(data->fan_div[i]), div_from_reg(data->fan_div[i] + 1)); data->fan_div[i]++; - w83627ehf_write_fan_div_common(dev, data, i); + w83627ehf_write_fan_div(data, i); /* Preserve min limit if possible */ if ((data->has_fan_min & (1 << i)) && data->fan_min[i] >= 2 && data->fan_min[i] != 255) w83627ehf_write_value(data, - data->REG_FAN_MIN[i], + W83627EHF_REG_FAN_MIN[i], (data->fan_min[i] /= 2)); } } - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) { if (!(data->has_fan & (1 << i))) @@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->fan_start_output[i] = w83627ehf_read_value(data, - data->REG_FAN_START_OUTPUT[i]); + W83627EHF_REG_FAN_START_OUTPUT[i]); data->fan_stop_output[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_OUTPUT[i]); + W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_TIME[i]); + W83627EHF_REG_FAN_STOP_TIME[i]); if (data->REG_FAN_MAX_OUTPUT && data->REG_FAN_MAX_OUTPUT[i] != 0xff) @@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->target_temp[i] = w83627ehf_read_value(data, - data->REG_TARGET[i]) & + W83627EHF_REG_TARGET[i]) & (data->pwm_mode[i] == 1 ? 0x7f : 0xff); } @@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) return data; } -/* - * Sysfs callback functions - */ -#define show_in_reg(reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \ - data->scale_in)); \ -} -show_in_reg(in) -show_in_reg(in_min) -show_in_reg(in_max) - #define store_in_reg(REG, reg) \ -static ssize_t \ -store_in_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - unsigned long val; \ - int err; \ - err = kstrtoul(buf, 10, &val); \ - if (err < 0) \ - return err; \ + if (val < 0) \ + return -EINVAL; \ mutex_lock(&data->update_lock); \ - data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \ - w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \ - data->in_##reg[nr]); \ + data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \ + w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \ + data->in_##reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_in_reg(MIN, min) store_in_reg(MAX, max) -static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, - char *buf) +static int +store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01); -} - -static struct sensor_device_attribute sda_in_input[] = { - SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), - SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), - SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), - SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), - SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), - SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), - SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), - SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), - SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), - SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9), -}; - -static struct sensor_device_attribute sda_in_alarm[] = { - SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0), - SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1), - SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2), - SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3), - SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8), - SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21), - SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20), - SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16), - SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17), - SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19), -}; - -static struct sensor_device_attribute sda_in_min[] = { - SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), - SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), - SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), - SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), - SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), - SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), - SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), - SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), - SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), - SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), -}; - -static struct sensor_device_attribute sda_in_max[] = { - SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), - SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), - SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), - SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), - SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), - SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), - SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), - SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), - SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), - SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), -}; - -static ssize_t -show_fan(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", data->rpm[nr]); -} - -static ssize_t -show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", - data->fan_from_reg_min(data->fan_min[nr], - data->fan_div[nr])); -} - -static ssize_t -show_fan_div(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr])); -} - -static ssize_t -store_fan_min(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; unsigned int reg; u8 new_div; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; + if (val < 0) + return -EINVAL; mutex_lock(&data->update_lock); - if (!data->has_fan_div) { - /* - * Only NCT6776F for now, so we know that this is a 13 bit - * register - */ - if (!val) { - val = 0xff1f; - } else { - if (val > 1350000U) - val = 135000U; - val = 1350000U / val; - val = (val & 0x1f) | ((val << 3) & 0xff00); - } - data->fan_min[nr] = val; - goto done; /* Leave fan divider alone */ - } if (!val) { /* No min limit, alarm disabled */ - data->fan_min[nr] = 255; - new_div = data->fan_div[nr]; /* No change */ - dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1); + data->fan_min[channel] = 255; + new_div = data->fan_div[channel]; /* No change */ + dev_info(dev, "fan%u low limit and alarm disabled\n", + channel + 1); } else if ((reg = 1350000U / val) >= 128 * 255) { /* * Speed below this value cannot possibly be represented, * even with the highest divider (128) */ - data->fan_min[nr] = 254; + data->fan_min[channel] = 254; new_div = 7; /* 128 == (1 << 7) */ dev_warn(dev, "fan%u low limit %lu below minimum %u, set to minimum\n", - nr + 1, val, data->fan_from_reg_min(254, 7)); + channel + 1, val, fan_from_reg8(254, 7)); } else if (!reg) { /* * Speed above this value cannot possibly be represented, * even with the lowest divider (1) */ - data->fan_min[nr] = 1; + data->fan_min[channel] = 1; new_div = 0; /* 1 == (1 << 0) */ dev_warn(dev, "fan%u low limit %lu above maximum %u, set to maximum\n", - nr + 1, val, data->fan_from_reg_min(1, 0)); + channel + 1, val, fan_from_reg8(1, 0)); } else { /* * Automatically pick the best divider, i.e. the one such @@ -1127,362 +763,117 @@ store_fan_min(struct device *dev, struct device_attribute *attr, reg >>= 1; new_div++; } - data->fan_min[nr] = reg; + data->fan_min[channel] = reg; } /* * Write both the fan clock divider (if it changed) and the new * fan min (unconditionally) */ - if (new_div != data->fan_div[nr]) { + if (new_div != data->fan_div[channel]) { dev_dbg(dev, "fan%u clock divider changed from %u to %u\n", - nr + 1, div_from_reg(data->fan_div[nr]), + channel + 1, div_from_reg(data->fan_div[channel]), div_from_reg(new_div)); - data->fan_div[nr] = new_div; - w83627ehf_write_fan_div_common(dev, data, nr); + data->fan_div[channel] = new_div; + w83627ehf_write_fan_div(data, channel); /* Give the chip time to sample a new speed value */ data->last_updated = jiffies; } -done: - w83627ehf_write_value(data, data->REG_FAN_MIN[nr], - data->fan_min[nr]); - mutex_unlock(&data->update_lock); - return count; -} - -static struct sensor_device_attribute sda_fan_input[] = { - SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), - SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), - SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), - SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3), - SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4), -}; - -static struct sensor_device_attribute sda_fan_alarm[] = { - SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6), - SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7), - SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11), - SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10), - SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23), -}; - -static struct sensor_device_attribute sda_fan_min[] = { - SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 0), - SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 1), - SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 2), - SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 3), - SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 4), -}; - -static struct sensor_device_attribute sda_fan_div[] = { - SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0), - SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1), - SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2), - SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3), - SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4), -}; - -static ssize_t -show_temp_label(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]); -} + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel], + data->fan_min[channel]); + mutex_unlock(&data->update_lock); -#define show_temp_reg(addr, reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \ + return 0; } -show_temp_reg(reg_temp, temp); -show_temp_reg(reg_temp_over, temp_max); -show_temp_reg(reg_temp_hyst, temp_max_hyst); #define store_temp_reg(addr, reg) \ -static ssize_t \ -store_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - int err; \ - long val; \ - err = kstrtol(buf, 10, &val); \ - if (err < 0) \ - return err; \ mutex_lock(&data->update_lock); \ - data->reg[nr] = LM75_TEMP_TO_REG(val); \ - w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \ + data->reg[channel] = LM75_TEMP_TO_REG(val); \ + w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_temp_reg(reg_temp_over, temp_max); store_temp_reg(reg_temp_hyst, temp_max_hyst); -static ssize_t -show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) +static int +store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - - return sprintf(buf, "%d\n", - data->temp_offset[sensor_attr->index] * 1000); -} - -static ssize_t -store_temp_offset(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); mutex_lock(&data->update_lock); - data->temp_offset[nr] = val; - w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val); + data->temp_offset[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val); mutex_unlock(&data->update_lock); - return count; -} - -static ssize_t -show_temp_type(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", (int)data->temp_type[nr]); -} - -static struct sensor_device_attribute sda_temp_input[] = { - SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0), - SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1), - SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2), - SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3), - SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4), - SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5), - SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6), - SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7), - SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_label[] = { - SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0), - SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1), - SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2), - SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3), - SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4), - SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5), - SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6), - SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7), - SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_max[] = { - SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 0), - SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 1), - SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 2), - SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 3), - SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 4), - SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 5), - SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 6), - SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 7), - SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 8), -}; - -static struct sensor_device_attribute sda_temp_max_hyst[] = { - SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 0), - SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 1), - SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 2), - SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 3), - SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 4), - SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 5), - SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 6), - SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 7), - SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 8), -}; - -static struct sensor_device_attribute sda_temp_alarm[] = { - SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4), - SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5), - SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13), -}; - -static struct sensor_device_attribute sda_temp_type[] = { - SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0), - SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1), - SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2), -}; - -static struct sensor_device_attribute sda_temp_offset[] = { - SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 0), - SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 1), - SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 2), -}; - -#define show_pwm_reg(reg) \ -static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", data->reg[nr]); \ + return 0; } -show_pwm_reg(pwm_mode) -show_pwm_reg(pwm_enable) -show_pwm_reg(pwm) - -static ssize_t -store_pwm_mode(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (val > 1) - return -EINVAL; - - /* On NCT67766F, DC mode is only supported for pwm1 */ - if (sio_data->kind == nct6776 && nr && val != 1) + if (val < 0 || val > 1) return -EINVAL; mutex_lock(&data->update_lock); - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - data->pwm_mode[nr] = val; - reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]); + data->pwm_mode[channel] = val; + reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]); if (!val) - reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); + reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; - - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); - data->pwm[nr] = val; - w83627ehf_write_value(data, data->REG_PWM[nr], val); + data->pwm[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm_enable(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (!val || (val > 4 && val != data->pwm_enable_orig[nr])) - return -EINVAL; - /* SmartFan III mode is not supported on NCT6776F */ - if (sio_data->kind == nct6776 && val == 4) + if (!val || val < 0 || + (val > 4 && val != data->pwm_enable_orig[channel])) return -EINVAL; mutex_lock(&data->update_lock); - data->pwm_enable[nr] = val; - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - reg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[nr]); - reg &= 0x0f; - reg |= (val - 1) << 4; - w83627ehf_write_value(data, - NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]); - reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); - } + data->pwm_enable[channel] = val; + reg = w83627ehf_read_value(data, + W83627EHF_REG_PWM_ENABLE[channel]); + reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]); + reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], + reg); mutex_unlock(&data->update_lock); - return count; + return 0; } - #define show_tol_temp(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1510,7 +901,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); data->target_temp[nr] = val; - w83627ehf_write_value(data, data->REG_TARGET[nr], val); + w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val); mutex_unlock(&data->update_lock); return count; } @@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; u16 reg; @@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr, val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); mutex_lock(&data->update_lock); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* Limit tolerance further for NCT6776F */ - if (sio_data->kind == nct6776 && val > 7) - val = 7; - reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); + if (nr == 1) + reg = (reg & 0x0f) | (val << 4); + else reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); - if (nr == 1) - reg = (reg & 0x0f) | (val << 4); - else - reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); - } + w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); data->tolerance[nr] = val; mutex_unlock(&data->update_lock); return count; } -static struct sensor_device_attribute sda_pwm[] = { - SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0), - SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), - SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2), - SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3), -}; - -static struct sensor_device_attribute sda_pwm_mode[] = { - SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 0), - SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 1), - SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 2), - SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 3), -}; - -static struct sensor_device_attribute sda_pwm_enable[] = { - SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 0), - SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 1), - SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 2), - SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 3), -}; - -static struct sensor_device_attribute sda_target_temp[] = { - SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 0), - SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 1), - SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 2), - SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 3), -}; - -static struct sensor_device_attribute sda_tolerance[] = { - SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 0), - SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 1), - SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 2), - SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 3), -}; +static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp, + store_target_temp, 0); +static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp, + store_target_temp, 1); +static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp, + store_target_temp, 2); +static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp, + store_target_temp, 3); + +static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance, + store_tolerance, 0); +static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance, + store_tolerance, 1); +static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance, + store_tolerance, 2); +static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance, + store_tolerance, 3); /* Smart Fan registers */ @@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = { static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = clamp_val(val, 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } -fan_functions(fan_start_output, FAN_START_OUTPUT) -fan_functions(fan_stop_output, FAN_STOP_OUTPUT) -fan_functions(fan_max_output, FAN_MAX_OUTPUT) -fan_functions(fan_step_output, FAN_STEP_OUTPUT) +fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT) +fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT) +fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT) +fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT) #define fan_time_functions(reg, REG) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = step_time_to_reg(val, data->pwm_mode[nr]); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } \ -fan_time_functions(fan_stop_time, FAN_STOP_TIME) - -static ssize_t name_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - - return sprintf(buf, "%s\n", data->name); -} -static DEVICE_ATTR_RO(name); - -static struct sensor_device_attribute sda_sf3_arrays_fan4[] = { - SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 3), - SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 3), - SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 3), - SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 3), - SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 3), -}; - -static struct sensor_device_attribute sda_sf3_arrays_fan3[] = { - SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 2), - SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 2), - SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 2), -}; - -static struct sensor_device_attribute sda_sf3_arrays[] = { - SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 0), - SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 1), - SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 0), - SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 1), - SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 0), - SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 1), -}; +fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME) + +static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 3); +static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output, + store_fan_start_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output, + store_fan_max_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output, + store_fan_step_output, 3); + +static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 2); +static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output, + store_fan_start_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 2); + +static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 1); +static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output, + store_fan_start_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output, + store_fan_start_output, 1); +static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 1); /* * pwm1 and pwm3 don't support max and step settings on all chips. * Need to check support while generating/removing attribute files. */ -static struct sensor_device_attribute sda_sf3_max_step_arrays[] = { - SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 0), - SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 0), - SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 1), - SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 1), - SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 2), - SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 2), -}; +static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output, + store_fan_max_output, 0); +static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output, + store_fan_step_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output, + store_fan_max_output, 1); +static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output, + store_fan_step_output, 1); +static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output, + store_fan_max_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output, + store_fan_step_output, 2); static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) struct w83627ehf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } -static DEVICE_ATTR_RO(cpu0_vid); +DEVICE_ATTR_RO(cpu0_vid); /* Case open detection */ - -static ssize_t -show_caseopen(struct device *dev, struct device_attribute *attr, char *buf) +static int +clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - - return sprintf(buf, "%d\n", - !!(data->caseopen & to_sensor_dev_attr_2(attr)->index)); -} - -static ssize_t -clear_caseopen(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - unsigned long val; - u16 reg, mask; + const u16 mask = 0x80; + u16 reg; - if (kstrtoul(buf, 10, &val) || val != 0) + if (val != 0 || channel != 0) return -EINVAL; - mask = to_sensor_dev_attr_2(attr)->nr; - mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR); w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask); @@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr, data->valid = 0; /* Force cache refresh */ mutex_unlock(&data->update_lock); - return count; + return 0; } -static struct sensor_device_attribute_2 sda_caseopen[] = { - SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x80, 0x10), - SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x40, 0x40), -}; - -/* - * Driver and device management - */ - -static void w83627ehf_device_remove_files(struct device *dev) +static umode_t w83627ehf_attrs_visible(struct kobject *kobj, + struct attribute *a, int n) { - /* - * some entries in the following arrays may not have been used in - * device_create_file(), but device_remove_file() will ignore them - */ - int i; + struct device *dev = container_of(kobj, struct device, kobj); struct w83627ehf_data *data = dev_get_drvdata(dev); + struct device_attribute *devattr; + struct sensor_device_attribute *sda; + + devattr = container_of(a, struct device_attribute, attr); + + /* Not sensor */ + if (devattr->show == cpu0_vid_show && data->have_vid) + return a->mode; + + sda = (struct sensor_device_attribute *)devattr; + + if (sda->index < 2 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index < 3 && + (devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output) && + data->REG_FAN_STEP_OUTPUT && + data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff) + return a->mode; + + /* if fan3 and fan4 are enabled create the files for them */ + if (sda->index == 2 && + (data->has_fan & (1 << 2)) && data->pwm_num >= 3 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index == 3 && + (data->has_fan & (1 << 3)) && data->pwm_num >= 4 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output || + devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output)) + return a->mode; + + if ((devattr->show == show_target_temp || + devattr->show == show_tolerance) && + (data->has_fan & (1 << sda->index)) && + sda->index < data->pwm_num) + return a->mode; - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) - device_remove_file(dev, &sda_sf3_arrays[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) - device_remove_file(dev, &attr->dev_attr); - } - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) - device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) - device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr); - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - device_remove_file(dev, &sda_in_input[i].dev_attr); - device_remove_file(dev, &sda_in_alarm[i].dev_attr); - device_remove_file(dev, &sda_in_min[i].dev_attr); - device_remove_file(dev, &sda_in_max[i].dev_attr); - } - for (i = 0; i < 5; i++) { - device_remove_file(dev, &sda_fan_input[i].dev_attr); - device_remove_file(dev, &sda_fan_alarm[i].dev_attr); - device_remove_file(dev, &sda_fan_div[i].dev_attr); - device_remove_file(dev, &sda_fan_min[i].dev_attr); - } - for (i = 0; i < data->pwm_num; i++) { - device_remove_file(dev, &sda_pwm[i].dev_attr); - device_remove_file(dev, &sda_pwm_mode[i].dev_attr); - device_remove_file(dev, &sda_pwm_enable[i].dev_attr); - device_remove_file(dev, &sda_target_temp[i].dev_attr); - device_remove_file(dev, &sda_tolerance[i].dev_attr); - } - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - device_remove_file(dev, &sda_temp_input[i].dev_attr); - device_remove_file(dev, &sda_temp_label[i].dev_attr); - if (i == 2 && data->temp3_val_only) - continue; - device_remove_file(dev, &sda_temp_max[i].dev_attr); - device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr); - if (i > 2) - continue; - device_remove_file(dev, &sda_temp_alarm[i].dev_attr); - device_remove_file(dev, &sda_temp_type[i].dev_attr); - device_remove_file(dev, &sda_temp_offset[i].dev_attr); - } + return 0; +} + +/* These groups handle non-standard attributes used in this device */ +static struct attribute *w83627ehf_attrs[] = { + + &sensor_dev_attr_pwm1_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm1_start_output.dev_attr.attr, + &sensor_dev_attr_pwm1_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm1_max_output.dev_attr.attr, + &sensor_dev_attr_pwm1_step_output.dev_attr.attr, + &sensor_dev_attr_pwm1_target.dev_attr.attr, + &sensor_dev_attr_pwm1_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm2_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm2_start_output.dev_attr.attr, + &sensor_dev_attr_pwm2_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm2_max_output.dev_attr.attr, + &sensor_dev_attr_pwm2_step_output.dev_attr.attr, + &sensor_dev_attr_pwm2_target.dev_attr.attr, + &sensor_dev_attr_pwm2_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm3_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm3_start_output.dev_attr.attr, + &sensor_dev_attr_pwm3_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm3_max_output.dev_attr.attr, + &sensor_dev_attr_pwm3_step_output.dev_attr.attr, + &sensor_dev_attr_pwm3_target.dev_attr.attr, + &sensor_dev_attr_pwm3_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm4_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm4_start_output.dev_attr.attr, + &sensor_dev_attr_pwm4_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm4_max_output.dev_attr.attr, + &sensor_dev_attr_pwm4_step_output.dev_attr.attr, + &sensor_dev_attr_pwm4_target.dev_attr.attr, + &sensor_dev_attr_pwm4_tolerance.dev_attr.attr, + + &dev_attr_cpu0_vid.attr, + NULL +}; - device_remove_file(dev, &sda_caseopen[0].dev_attr); - device_remove_file(dev, &sda_caseopen[1].dev_attr); +static const struct attribute_group w83627ehf_group = { + .attrs = w83627ehf_attrs, + .is_visible = w83627ehf_attrs_visible, +}; - device_remove_file(dev, &dev_attr_name); - device_remove_file(dev, &dev_attr_cpu0_vid); -} +static const struct attribute_group *w83627ehf_groups[] = { + &w83627ehf_group, + NULL +}; + +/* + * Driver and device management + */ /* Get the monitoring functions started */ static inline void w83627ehf_init_device(struct w83627ehf_data *data, @@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data, } } -static void w82627ehf_swap_tempreg(struct w83627ehf_data *data, - int r1, int r2) -{ - swap(data->temp_src[r1], data->temp_src[r2]); - swap(data->reg_temp[r1], data->reg_temp[r2]); - swap(data->reg_temp_over[r1], data->reg_temp_over[r2]); - swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]); - swap(data->reg_temp_config[r1], data->reg_temp_config[r2]); -} - static void w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp) { @@ -1954,7 +1293,7 @@ static void w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, struct w83627ehf_data *data) { - int fan3pin, fan4pin, fan4min, fan5pin, regval; + int fan3pin, fan4pin, fan5pin, regval; /* The W83627UHG is simple, only two fan inputs, no config */ if (sio_data->kind == w83627uhg) { @@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, } /* fan4 and fan5 share some pins with the GPIO and serial flash */ - if (sio_data->kind == nct6775) { - /* On NCT6775, fan4 shares pins with the fdc interface */ - fan3pin = 1; - fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80); - fan4min = 0; - fan5pin = 0; - } else if (sio_data->kind == nct6776) { - bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE); - - if (regval & 0x80) - fan3pin = gpok; - else - fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); - - if (regval & 0x40) - fan4pin = gpok; - else - fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); - - if (regval & 0x20) - fan5pin = gpok; - else - fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); - - fan4min = fan4pin; - } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { fan3pin = 1; fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40; fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20; - fan4min = fan4pin; } else { fan3pin = 1; fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06); fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02); - fan4min = fan4pin; } data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */ data->has_fan |= (fan3pin << 2); data->has_fan_min |= (fan3pin << 2); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* - * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 - * register - */ - data->has_fan |= (fan4pin << 3) | (fan5pin << 4); - data->has_fan_min |= (fan4min << 3) | (fan5pin << 4); - } else { - /* - * It looks like fan4 and fan5 pins can be alternatively used - * as fan on/off switches, but fan5 control is write only :/ - * We assume that if the serial interface is disabled, designers - * connected fan5 as input unless they are emitting log 1, which - * is not the default. - */ - regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); - if ((regval & (1 << 2)) && fan4pin) { - data->has_fan |= (1 << 3); - data->has_fan_min |= (1 << 3); + /* + * It looks like fan4 and fan5 pins can be alternatively used + * as fan on/off switches, but fan5 control is write only :/ + * We assume that if the serial interface is disabled, designers + * connected fan5 as input unless they are emitting log 1, which + * is not the default. + */ + regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); + if ((regval & (1 << 2)) && fan4pin) { + data->has_fan |= (1 << 3); + data->has_fan_min |= (1 << 3); + } + if (!(regval & (1 << 1)) && fan5pin) { + data->has_fan |= (1 << 4); + data->has_fan_min |= (1 << 4); + } +} + +static umode_t +w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct w83627ehf_data *data = drvdata; + + switch (type) { + case hwmon_temp: + /* channel 0.., name 1.. */ + if (!(data->have_temp & (1 << channel))) + return 0; + if (attr == hwmon_temp_input || attr == hwmon_temp_label) + return 0444; + if (channel == 2 && data->temp3_val_only) + return 0; + if (attr == hwmon_temp_max) { + if (data->reg_temp_over[channel]) + return 0644; + else + return 0; + } + if (attr == hwmon_temp_max_hyst) { + if (data->reg_temp_hyst[channel]) + return 0644; + else + return 0; + } + if (channel > 2) + return 0; + if (attr == hwmon_temp_alarm || attr == hwmon_temp_type) + return 0444; + if (attr == hwmon_temp_offset) { + if (data->have_temp_offset & (1 << channel)) + return 0644; + else + return 0; + } + break; + + case hwmon_fan: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel))) + return 0; + if (attr == hwmon_fan_input || attr == hwmon_fan_alarm) + return 0444; + if (attr == hwmon_fan_div) { + return 0444; } - if (!(regval & (1 << 1)) && fan5pin) { - data->has_fan |= (1 << 4); - data->has_fan_min |= (1 << 4); + if (attr == hwmon_fan_min) { + if (data->has_fan_min & (1 << channel)) + return 0644; + else + return 0; } + break; + + case hwmon_in: + /* channel 0.., name 0.. */ + if (channel >= data->in_num) + return 0; + if (channel == 6 && data->in6_skip) + return 0; + if (attr == hwmon_in_alarm || attr == hwmon_in_input) + return 0444; + if (attr == hwmon_in_min || attr == hwmon_in_max) + return 0644; + break; + + case hwmon_pwm: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel)) || + channel >= data->pwm_num) + return 0; + if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable || + attr == hwmon_pwm_input) + return 0644; + break; + + case hwmon_intrusion: + return 0644; + + default: /* Shouldn't happen */ + return 0; } + + return 0; /* Shouldn't happen */ } +static int +w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_temp_input: + *val = LM75_TEMP_FROM_REG(data->temp[channel]); + return 0; + case hwmon_temp_max: + *val = LM75_TEMP_FROM_REG(data->temp_max[channel]); + return 0; + case hwmon_temp_max_hyst: + *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]); + return 0; + case hwmon_temp_offset: + *val = data->temp_offset[channel] * 1000; + return 0; + case hwmon_temp_type: + *val = (int)data->temp_type[channel]; + return 0; + case hwmon_temp_alarm: + if (channel < 3) { + int bit[] = { 4, 5, 13 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_in_input: + *val = in_from_reg(data->in[channel], channel, data->scale_in); + return 0; + case hwmon_in_min: + *val = in_from_reg(data->in_min[channel], channel, + data->scale_in); + return 0; + case hwmon_in_max: + *val = in_from_reg(data->in_max[channel], channel, + data->scale_in); + return 0; + case hwmon_in_alarm: + if (channel < 10) { + int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_fan_input: + *val = data->rpm[channel]; + return 0; + case hwmon_fan_min: + *val = fan_from_reg8(data->fan_min[channel], + data->fan_div[channel]); + return 0; + case hwmon_fan_div: + *val = div_from_reg(data->fan_div[channel]); + return 0; + case hwmon_fan_alarm: + if (channel < 5) { + int bit[] = { 6, 7, 11, 10, 23 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_pwm_input: + *val = data->pwm[channel]; + return 0; + case hwmon_pwm_enable: + *val = data->pwm_enable[channel]; + return 0; + case hwmon_pwm_mode: + *val = data->pwm_enable[channel]; + return 0; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + if (attr != hwmon_intrusion_alarm || channel != 0) + return -EOPNOTSUPP; /* shouldn't happen */ + + *val = !!(data->caseopen & 0x10); + return 0; +} + +static int +w83627ehf_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); + + switch (type) { + case hwmon_fan: + return w83627ehf_do_read_fan(data, attr, channel, val); + + case hwmon_in: + return w83627ehf_do_read_in(data, attr, channel, val); + + case hwmon_pwm: + return w83627ehf_do_read_pwm(data, attr, channel, val); + + case hwmon_temp: + return w83627ehf_do_read_temp(data, attr, channel, val); + + case hwmon_intrusion: + return w83627ehf_do_read_intrusion(data, attr, channel, val); + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + if (attr == hwmon_temp_label) { + *str = data->temp_label[data->temp_src[channel]]; + return 0; + } + break; + + default: + break; + } + /* Nothing else should be read as a string */ + return -EOPNOTSUPP; +} + +static int +w83627ehf_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + if (type == hwmon_in && attr == hwmon_in_min) + return store_in_min(dev, data, channel, val); + if (type == hwmon_in && attr == hwmon_in_max) + return store_in_max(dev, data, channel, val); + + if (type == hwmon_fan && attr == hwmon_fan_min) + return store_fan_min(dev, data, channel, val); + + if (type == hwmon_temp && attr == hwmon_temp_max) + return store_temp_max(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_max_hyst) + return store_temp_max_hyst(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_offset) + return store_temp_offset(dev, data, channel, val); + + if (type == hwmon_pwm && attr == hwmon_pwm_mode) + return store_pwm_mode(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_enable) + return store_pwm_enable(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_input) + return store_pwm(dev, data, channel, val); + + if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm) + return clear_caseopen(dev, data, channel, val); + + return -EOPNOTSUPP; +} + +static const struct hwmon_ops w83627ehf_ops = { + .is_visible = w83627ehf_is_visible, + .read = w83627ehf_read, + .read_string = w83627ehf_read_string, + .write = w83627ehf_write, +}; + +static const struct hwmon_channel_info *w83627ehf_info[] = { + HWMON_CHANNEL_INFO(fan, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN), + HWMON_CHANNEL_INFO(in, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE), + HWMON_CHANNEL_INFO(temp, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE), + HWMON_CHANNEL_INFO(intrusion, + HWMON_INTRUSION_ALARM), + NULL +}; + +static const struct hwmon_chip_info w83627ehf_chip_info = { + .ops = &w83627ehf_ops, + .info = w83627ehf_info, +}; + static int w83627ehf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev) struct resource *res; u8 en_vrm10; int i, err = 0; + struct device *hwmon_dev; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) { @@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev) /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9; - /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */ + /* 667HG has 3 pwms, and 627UHG has only 2 */ switch (sio_data->kind) { default: data->pwm_num = 4; break; case w83667hg: case w83667hg_b: - case nct6775: - case nct6776: data->pwm_num = 3; break; case w83627uhg: @@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp = 0x07; /* Deal with temperature register setup first. */ - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - int mask = 0; - - /* - * Display temperature sensor output only if it monitors - * a source other than one already reported. Always display - * first three temperature registers, though. - */ - for (i = 0; i < NUM_REG_TEMP; i++) { - u8 src; - - data->reg_temp[i] = NCT6775_REG_TEMP[i]; - data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i]; - data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i]; - data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i]; - - src = w83627ehf_read_value(data, - NCT6775_REG_TEMP_SOURCE[i]); - src &= 0x1f; - if (src && !(mask & (1 << src))) { - data->have_temp |= 1 << i; - mask |= 1 << src; - } - - data->temp_src[i] = src; - - /* - * Now do some register swapping if index 0..2 don't - * point to SYSTIN(1), CPUIN(2), and AUXIN(3). - * Idea is to have the first three attributes - * report SYSTIN, CPUIN, and AUXIN if possible - * without overriding the basic system configuration. - */ - if (i > 0 && data->temp_src[0] != 1 - && data->temp_src[i] == 1) - w82627ehf_swap_tempreg(data, 0, i); - if (i > 1 && data->temp_src[1] != 2 - && data->temp_src[i] == 2) - w82627ehf_swap_tempreg(data, 1, i); - if (i > 2 && data->temp_src[2] != 3 - && data->temp_src[i] == 3) - w82627ehf_swap_tempreg(data, 2, i); - } - if (sio_data->kind == nct6776) { - /* - * On NCT6776, AUXTIN and VIN3 pins are shared. - * Only way to detect it is to check if AUXTIN is used - * as a temperature source, and if that source is - * enabled. - * - * If that is the case, disable in6, which reports VIN3. - * Otherwise disable temp3. - */ - if (data->temp_src[2] == 3) { - u8 reg; - - if (data->reg_temp_config[2]) - reg = w83627ehf_read_value(data, - data->reg_temp_config[2]); - else - reg = 0; /* Assume AUXTIN is used */ - - if (reg & 0x01) - data->have_temp &= ~(1 << 2); - else - data->in6_skip = 1; - } - data->temp_label = nct6776_temp_label; - } else { - data->temp_label = nct6775_temp_label; - } - data->have_temp_offset = data->have_temp & 0x07; - for (i = 0; i < 3; i++) { - if (data->temp_src[i] > 3) - data->have_temp_offset &= ~(1 << i); - } - } else if (sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg_b) { u8 reg; w83627ehf_set_temp_reg_ehf(data, 4); @@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp_offset = data->have_temp & 0x07; } - if (sio_data->kind == nct6775) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg16; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT; - data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT; - } else if (sio_data->kind == nct6776) { - data->has_fan_div = false; - data->fan_from_reg = fan_from_reg13; - data->fan_from_reg_min = fan_from_reg13; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = NCT6776_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - } else if (sio_data->kind == w83667hg_b) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; + if (sio_data->kind == w83667hg_b) { data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B; data->REG_FAN_STEP_OUTPUT = W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B; } else { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_COMMON; data->REG_FAN_STEP_OUTPUT = @@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev) goto exit_release; /* Read VID value */ - if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b || - sio_data->kind == nct6775 || sio_data->kind == nct6776) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { /* * W83667HG has different pins for VID input and output, so * we can get the VID input values directly at logical device D @@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev) */ superio_select(sio_data->sioreg, W83667HG_LD_VID); data->vid = superio_inb(sio_data->sioreg, 0xe3); - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else if (sio_data->kind != w83627uhg) { superio_select(sio_data->sioreg, W83627EHF_LD_HWM); if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) { @@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev) SIO_REG_VID_DATA); if (sio_data->kind == w83627ehf) /* 6 VID pins only */ data->vid &= 0x3f; - - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else { dev_info(dev, "VID pins in output mode, CPU VID not available\n"); } } - if (fan_debounce && - (sio_data->kind == nct6775 || sio_data->kind == nct6776)) { - u8 tmp; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE); - if (sio_data->kind == nct6776) - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x3e | tmp); - else - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x1e | tmp); - pr_info("Enabled fan debounce for chip %s\n", data->name); - } - w83627ehf_check_fan_inputs(sio_data, data); superio_exit(sio_data->sioreg); /* Read fan clock dividers immediately */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Read pwm data to save original values */ - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) data->pwm_enable_orig[i] = data->pwm_enable[i]; - /* Register sysfs hooks */ - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) { - err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { - err = device_create_file(dev, &attr->dev_attr); - if (err) - goto exit_remove; - } - } - /* if fan3 and fan4 are enabled create the sf3 files for them */ - if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan3[i].dev_attr); - if (err) - goto exit_remove; - } - if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan4[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - if ((err = device_create_file(dev, &sda_in_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_min[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_max[i].dev_attr))) - goto exit_remove; - } - - for (i = 0; i < 5; i++) { - if (data->has_fan & (1 << i)) { - if ((err = device_create_file(dev, - &sda_fan_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_fan_alarm[i].dev_attr))) - goto exit_remove; - if (sio_data->kind != nct6776) { - err = device_create_file(dev, - &sda_fan_div[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->has_fan_min & (1 << i)) { - err = device_create_file(dev, - &sda_fan_min[i].dev_attr); - if (err) - goto exit_remove; - } - if (i < data->pwm_num && - ((err = device_create_file(dev, - &sda_pwm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_mode[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_enable[i].dev_attr)) - || (err = device_create_file(dev, - &sda_target_temp[i].dev_attr)) - || (err = device_create_file(dev, - &sda_tolerance[i].dev_attr)))) - goto exit_remove; - } - } + hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, + data->name, + data, + &w83627ehf_chip_info, + w83627ehf_groups); - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - err = device_create_file(dev, &sda_temp_input[i].dev_attr); - if (err) - goto exit_remove; - if (data->temp_label) { - err = device_create_file(dev, - &sda_temp_label[i].dev_attr); - if (err) - goto exit_remove; - } - if (i == 2 && data->temp3_val_only) - continue; - if (data->reg_temp_over[i]) { - err = device_create_file(dev, - &sda_temp_max[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->reg_temp_hyst[i]) { - err = device_create_file(dev, - &sda_temp_max_hyst[i].dev_attr); - if (err) - goto exit_remove; - } - if (i > 2) - continue; - if ((err = device_create_file(dev, - &sda_temp_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_temp_type[i].dev_attr))) - goto exit_remove; - if (data->have_temp_offset & (1 << i)) { - err = device_create_file(dev, - &sda_temp_offset[i].dev_attr); - if (err) - goto exit_remove; - } - } - - err = device_create_file(dev, &sda_caseopen[0].dev_attr); - if (err) - goto exit_remove; - - if (sio_data->kind == nct6776) { - err = device_create_file(dev, &sda_caseopen[1].dev_attr); - if (err) - goto exit_remove; - } - - err = device_create_file(dev, &dev_attr_name); - if (err) - goto exit_remove; - - data->hwmon_dev = hwmon_device_register(dev); - if (IS_ERR(data->hwmon_dev)) { - err = PTR_ERR(data->hwmon_dev); - goto exit_remove; - } + return PTR_ERR_OR_ZERO(hwmon_dev); - return 0; - -exit_remove: - w83627ehf_device_remove_files(dev); exit_release: release_region(res->start, IOREGION_LENGTH); exit: @@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev) { struct w83627ehf_data *data = platform_get_drvdata(pdev); - hwmon_device_unregister(data->hwmon_dev); - w83627ehf_device_remove_files(&pdev->dev); release_region(data->addr, IOREGION_LENGTH); return 0; @@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev) static int w83627ehf_suspend(struct device *dev) { struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); mutex_lock(&data->update_lock); data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT); - if (sio_data->kind == nct6775) { - data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - } mutex_unlock(&data->update_lock); return 0; @@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev) static int w83627ehf_resume(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); int i; mutex_lock(&data->update_lock); @@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev) if (!(data->has_fan_min & (1 << i))) continue; - w83627ehf_write_value(data, data->REG_FAN_MIN[i], + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i], data->fan_min[i]); } @@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev) /* Restore other settings */ w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat); - if (sio_data->kind == nct6775) { - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2); - } /* Force re-reading all values */ data->valid = 0; @@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, static const char sio_name_W83627UHG[] __initconst = "W83627UHG"; static const char sio_name_W83667HG[] __initconst = "W83667HG"; static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B"; - static const char sio_name_NCT6775[] __initconst = "NCT6775F"; - static const char sio_name_NCT6776[] __initconst = "NCT6776F"; u16 val; const char *sio_name; @@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, sio_data->kind = w83667hg_b; sio_name = sio_name_W83667HG_B; break; - case SIO_NCT6775_ID: - sio_data->kind = nct6775; - sio_name = sio_name_NCT6775; - break; - case SIO_NCT6776_ID: - sio_data->kind = nct6776; - sio_name = sio_name_NCT6776; - break; default: if (val != 0xffff) pr_debug("unsupported chip ID: 0x%04x\n", val); diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index dc3f507e7562..a90d757f7043 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1132,7 +1132,6 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) drvdata->trcid = coresight_get_trace_id(drvdata->cpu); } -#ifdef CONFIG_CPU_PM static int etm4_cpu_save(struct etmv4_drvdata *drvdata) { int i, ret = 0; @@ -1402,17 +1401,17 @@ static struct notifier_block etm4_cpu_pm_nb = { static int etm4_cpu_pm_register(void) { - return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + if (IS_ENABLED(CONFIG_CPU_PM)) + return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + + return 0; } static void etm4_cpu_pm_unregister(void) { - cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + if (IS_ENABLED(CONFIG_CPU_PM)) + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); } -#else -static int etm4_cpu_pm_register(void) { return 0; } -static void etm4_cpu_pm_unregister(void) { } -#endif static int etm4_probe(struct amba_device *adev, const struct amba_id *id) { diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 0dfd97bbde9e..ca232ec565e8 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -834,9 +834,6 @@ static irqreturn_t intel_th_irq(int irq, void *data) ret |= d->irq(th->thdev[i]); } - if (ret == IRQ_NONE) - pr_warn_ratelimited("nobody cared for irq\n"); - return ret; } @@ -887,6 +884,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, if (th->irq == -1) th->irq = devres[r].start; + th->num_irqs++; break; default: dev_warn(dev, "Unknown resource type %lx\n", @@ -940,6 +938,9 @@ void intel_th_free(struct intel_th *th) th->num_thdevs = 0; + for (i = 0; i < th->num_irqs; i++) + devm_free_irq(th->dev, th->irq + i, th); + pm_runtime_get_sync(th->dev); pm_runtime_forbid(th->dev); diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 0df480072b6c..6f4f5486fe6d 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -261,6 +261,7 @@ enum th_mmio_idx { * @num_thdevs: number of devices in the @thdev array * @num_resources: number of resources in the @resource array * @irq: irq number + * @num_irqs: number of IRQs is use * @id: this Intel TH controller's device ID in the system * @major: device node major for output devices */ @@ -277,6 +278,7 @@ struct intel_th { unsigned int num_thdevs; unsigned int num_resources; int irq; + int num_irqs; int id; int major; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 6d240dfae9d9..8e48c7458aa3 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -1676,10 +1676,13 @@ static int intel_th_msc_init(struct msc *msc) return 0; } -static void msc_win_switch(struct msc *msc) +static int msc_win_switch(struct msc *msc) { struct msc_window *first; + if (list_empty(&msc->win_list)) + return -EINVAL; + first = list_first_entry(&msc->win_list, struct msc_window, entry); if (msc_is_last_win(msc->cur_win)) @@ -1691,6 +1694,8 @@ static void msc_win_switch(struct msc *msc) msc->base_addr = msc_win_base_dma(msc->cur_win); intel_th_trace_switch(msc->thdev); + + return 0; } /** @@ -2025,16 +2030,15 @@ win_switch_store(struct device *dev, struct device_attribute *attr, if (val != 1) return -EINVAL; + ret = -EINVAL; mutex_lock(&msc->buf_mutex); /* * Window switch can only happen in the "multi" mode. * If a external buffer is engaged, they have the full * control over window switching. */ - if (msc->mode != MSC_MODE_MULTI || msc->mbuf) - ret = -ENOTSUPP; - else - msc_win_switch(msc); + if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) + ret = msc_win_switch(msc); mutex_unlock(&msc->buf_mutex); return ret ? ret : size; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index ebf3e30e989a..e9d90b53bbc4 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -205,6 +205,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Comet Lake PCH-V */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Ice Lake NNPI */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -229,6 +234,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Elkhart Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index e13af4874976..5137e6297022 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c @@ -174,7 +174,7 @@ static struct at91_twi_pdata sama5d2_config = { static struct at91_twi_pdata sam9x60_config = { .clk_max_div = 7, - .clk_offset = 4, + .clk_offset = 3, .has_unre_flag = true, .has_alt_cmd = true, .has_hold_field = true, diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index e01b2b57e724..5ab901ad615d 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -58,6 +58,7 @@ struct bcm2835_i2c_dev { struct i2c_adapter adapter; struct completion completion; struct i2c_msg *curr_msg; + struct clk *bus_clk; int num_msgs; u32 msg_err; u8 *msg_buf; @@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) struct resource *mem, *irq; int ret; struct i2c_adapter *adap; - struct clk *bus_clk; struct clk *mclk; u32 bus_clk_rate; @@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) return PTR_ERR(mclk); } - bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); + i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); - if (IS_ERR(bus_clk)) { + if (IS_ERR(i2c_dev->bus_clk)) { dev_err(&pdev->dev, "Could not register clock\n"); - return PTR_ERR(bus_clk); + return PTR_ERR(i2c_dev->bus_clk); } ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", @@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) bus_clk_rate = 100000; } - ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate); + ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate); if (ret < 0) { dev_err(&pdev->dev, "Could not set clock frequency\n"); return ret; } - ret = clk_prepare_enable(bus_clk); + ret = clk_prepare_enable(i2c_dev->bus_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare clock"); return ret; @@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) static int bcm2835_i2c_remove(struct platform_device *pdev) { struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev); - struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div"); - clk_rate_exclusive_put(bus_clk); - clk_disable_unprepare(bus_clk); + clk_rate_exclusive_put(i2c_dev->bus_clk); + clk_disable_unprepare(i2c_dev->bus_clk); free_irq(i2c_dev->irq, i2c_dev); i2c_del_adapter(&i2c_dev->adapter); diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index ff340d7ae2e5..abfe3094c047 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -369,7 +369,7 @@ static int highlander_i2c_probe(struct platform_device *pdev) if (unlikely(!dev)) return -ENOMEM; - dev->base = ioremap_nocache(res->start, resource_size(res)); + dev->base = ioremap(res->start, resource_size(res)); if (unlikely(!dev->base)) { ret = -ENXIO; goto err; diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 38556381f4ca..2f8b8050a223 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -433,13 +433,17 @@ iop3xx_i2c_probe(struct platform_device *pdev) adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev, "scl", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_scl)) - return PTR_ERR(adapter_data->gpio_scl); + if (IS_ERR(adapter_data->gpio_scl)) { + ret = PTR_ERR(adapter_data->gpio_scl); + goto free_both; + } adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_ASIS); - if (IS_ERR(adapter_data->gpio_sda)) - return PTR_ERR(adapter_data->gpio_sda); + if (IS_ERR(adapter_data->gpio_sda)) { + ret = PTR_ERR(adapter_data->gpio_sda); + goto free_both; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 0829cb696d9d..4fde74eb34a7 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -281,7 +281,7 @@ static int pmcmsptwi_probe(struct platform_device *pldev) } /* remap the memory */ - pmcmsptwi_data.iobase = ioremap_nocache(res->start, + pmcmsptwi_data.iobase = ioremap(res->start, resource_size(res)); if (!pmcmsptwi_data.iobase) { dev_err(&pldev->dev, diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index a98bf31d0e5c..61339c665ebd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1608,14 +1608,18 @@ static int tegra_i2c_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) + if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_i2c_runtime_resume(&pdev->dev); - else + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto unprepare_div_clk; + } + } else { ret = pm_runtime_get_sync(i2c_dev->dev); - - if (ret < 0) { - dev_err(&pdev->dev, "runtime resume failed\n"); - goto unprepare_div_clk; + if (ret < 0) { + dev_err(&pdev->dev, "runtime resume failed\n"); + goto disable_rpm; + } } if (i2c_dev->is_multimaster_mode) { @@ -1623,7 +1627,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(i2c_dev->dev, "div_clk enable failed %d\n", ret); - goto disable_rpm; + goto put_rpm; } } @@ -1671,11 +1675,16 @@ disable_div_clk: if (i2c_dev->is_multimaster_mode) clk_disable(i2c_dev->div_clk); -disable_rpm: - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) +put_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_put_sync(&pdev->dev); + else tegra_i2c_runtime_suspend(&pdev->dev); +disable_rpm: + if (pm_runtime_enabled(&pdev->dev)) + pm_runtime_disable(&pdev->dev); + unprepare_div_clk: clk_unprepare(i2c_dev->div_clk); @@ -1710,9 +1719,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int err; i2c_mark_adapter_suspended(&i2c_dev->adapter); + err = pm_runtime_force_suspend(dev); + if (err < 0) + return err; + return 0; } @@ -1733,6 +1747,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; + err = pm_runtime_force_resume(dev); + if (err < 0) + return err; + i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 9333c865d4a9..35b209797d7b 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -186,10 +186,11 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) * If we can set SDA, we will always create a STOP to ensure additional * pulses will do no harm. This is achieved by letting SDA follow SCL * half a cycle later. Check the 'incomplete_write_byte' fault injector - * for details. + * for details. Note that we must honour tsu:sto, 4us, but lets use 5us + * here for simplicity. */ bri->set_scl(adap, scl); - ndelay(RECOVERY_NDELAY / 2); + ndelay(RECOVERY_NDELAY); if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); @@ -211,7 +212,13 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) scl = !scl; bri->set_scl(adap, scl); /* Creating STOP again, see above */ - ndelay(RECOVERY_NDELAY / 2); + if (scl) { + /* Honour minimum tsu:sto */ + ndelay(RECOVERY_NDELAY); + } else { + /* Honour minimum tf and thd:dat */ + ndelay(RECOVERY_NDELAY / 2); + } if (bri->set_sda) bri->set_sda(adap, scl); ndelay(RECOVERY_NDELAY / 2); @@ -896,29 +903,6 @@ struct i2c_client *i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address } EXPORT_SYMBOL_GPL(i2c_new_dummy_device); -/** - * i2c_new_dummy - return a new i2c device bound to a dummy driver - * @adapter: the adapter managing the device - * @address: seven bit address to be used - * Context: can sleep - * - * This deprecated function has the same functionality as @i2c_new_dummy_device, - * it just returns NULL instead of an ERR_PTR in case of an error for - * compatibility with current I2C API. It will be removed once all users are - * converted. - * - * This returns the new i2c client, which should be saved for later use with - * i2c_unregister_device(); or NULL to indicate an error. - */ -struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address) -{ - struct i2c_client *ret; - - ret = i2c_new_dummy_device(adapter, address); - return IS_ERR(ret) ? NULL : ret; -} -EXPORT_SYMBOL_GPL(i2c_new_dummy); - struct i2c_dummy_devres { struct i2c_client *client; }; diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 043691656245..7f8f896fa0c3 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = { .groups = i3c_masterdev_groups, }; -int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, - unsigned long max_i2c_scl_rate) +static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, + unsigned long max_i2c_scl_rate) { struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus); diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b0ff0e12d84c..bd26c3b9634e 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); struct i3c_master_controller *m = i3c_dev_get_master(dev); struct dw_i3c_master *master = to_dw_i3c_master(m); + int pos; + + pos = dw_i3c_master_get_free_pos(master); + + if (data->index > pos && pos > 0) { + writel(0, + master->regs + + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); + + master->addrs[data->index] = 0; + master->free_pos |= BIT(data->index); + + data->index = pos; + master->addrs[pos] = dev->info.dyn_addr; + master->free_pos &= ~BIT(pos); + } writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), master->regs + @@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { static int dw_i3c_probe(struct platform_device *pdev) { struct dw_i3c_master *master; - struct resource *res; int ret, irq; master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 10db0bf0655a..54712793709e 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> +#include <linux/of_device.h> #define DEV_ID 0x0 #define DEV_ID_I3C_MASTER 0x5034 @@ -60,6 +61,7 @@ #define CTRL_HALT_EN BIT(30) #define CTRL_MCS BIT(29) #define CTRL_MCS_EN BIT(28) +#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24)) #define CTRL_HJ_DISEC BIT(8) #define CTRL_MST_ACK BIT(7) #define CTRL_HJ_ACK BIT(6) @@ -70,6 +72,7 @@ #define CTRL_MIXED_FAST_BUS_MODE 2 #define CTRL_MIXED_SLOW_BUS_MODE 3 #define CTRL_BUS_MODE_MASK GENMASK(1, 0) +#define THD_DELAY_MAX 3 #define PRESCL_CTRL0 0x14 #define PRESCL_CTRL0_I2C(x) ((x) << 16) @@ -388,6 +391,10 @@ struct cdns_i3c_xfer { struct cdns_i3c_cmd cmds[0]; }; +struct cdns_i3c_data { + u8 thd_delay_ns; +}; + struct cdns_i3c_master { struct work_struct hj_work; struct i3c_master_controller base; @@ -408,6 +415,7 @@ struct cdns_i3c_master { struct clk *pclk; struct cdns_i3c_master_caps caps; unsigned long i3c_scl_lim; + const struct cdns_i3c_data *devdata; }; static inline struct cdns_i3c_master * @@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m) return 0; } +static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master) +{ + unsigned long sysclk_rate = clk_get_rate(master->sysclk); + u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns, + (NSEC_PER_SEC / sysclk_rate)); + + /* Every value greater than 3 is not valid. */ + if (thd_delay > THD_DELAY_MAX) + thd_delay = THD_DELAY_MAX; + + /* CTLR_THD_DEL value is encoded. */ + return (THD_DELAY_MAX - thd_delay); +} + static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) { struct cdns_i3c_master *master = to_cdns_i3c_master(m); @@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) * We will issue ENTDAA afterwards from the threaded IRQ handler. */ ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; + + /* + * Configure data hold delay based on device-specific data. + * + * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on + * master output. This setting allows to meet this timing on master's + * SoC outputs, regardless of PCB balancing. + */ + ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master)); writel(ctrl, master->regs + CTRL); cdns_i3c_master_enable(master); @@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work) i3c_master_do_daa(&master->base); } +static struct cdns_i3c_data cdns_i3c_devdata = { + .thd_delay_ns = 10, +}; + +static const struct of_device_id cdns_i3c_master_of_ids[] = { + { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata }, + { /* sentinel */ }, +}; + static int cdns_i3c_master_probe(struct platform_device *pdev) { struct cdns_i3c_master *master; - struct resource *res; int ret, irq; u32 val; @@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->devdata = of_device_get_match_data(&pdev->dev); + if (!master->devdata) + return -EINVAL; + + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); @@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id cdns_i3c_master_of_ids[] = { - { .compatible = "cdns,i3c-master" }, - { /* sentinel */ }, -}; - static struct platform_driver cdns_i3c_master = { .probe = cdns_i3c_master_probe, .remove = cdns_i3c_master_remove, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 75fd2a7b0842..7833e650789f 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -41,6 +41,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/tick.h> @@ -79,6 +80,7 @@ struct idle_cpu { unsigned long auto_demotion_disable_flags; bool byt_auto_demotion_disable_flag; bool disable_promotion_to_c1e; + bool use_acpi; }; static const struct idle_cpu *icpu; @@ -90,6 +92,11 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, static struct cpuidle_state *cpuidle_state_table; /* + * Enable this state by default even if the ACPI _CST does not list it. + */ +#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) + +/* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. * If this flag is set, SW flushes the TLB, so even if the @@ -124,7 +131,7 @@ static struct cpuidle_state nehalem_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -161,7 +168,7 @@ static struct cpuidle_state snb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -296,7 +303,7 @@ static struct cpuidle_state ivb_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -341,7 +348,7 @@ static struct cpuidle_state ivt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 80, .enter = &intel_idle, @@ -378,7 +385,7 @@ static struct cpuidle_state ivt_cstates_4s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 250, .enter = &intel_idle, @@ -415,7 +422,7 @@ static struct cpuidle_state ivt_cstates_8s[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 500, .enter = &intel_idle, @@ -452,7 +459,7 @@ static struct cpuidle_state hsw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -520,7 +527,7 @@ static struct cpuidle_state bdw_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -589,7 +596,7 @@ static struct cpuidle_state skl_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -658,7 +665,7 @@ static struct cpuidle_state skx_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -808,7 +815,7 @@ static struct cpuidle_state bxt_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -869,7 +876,7 @@ static struct cpuidle_state dnv_cstates[] = { { .name = "C1E", .desc = "MWAIT 0x01", - .flags = MWAIT2flg(0x01), + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, @@ -944,37 +951,19 @@ static void intel_idle_s2idle(struct cpuidle_device *dev, mwait_idle_with_hints(eax, ecx); } -static void __setup_broadcast_timer(bool on) -{ - if (on) - tick_broadcast_enable(); - else - tick_broadcast_disable(); -} - -static void auto_demotion_disable(void) -{ - unsigned long long msr_bits; - - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); - msr_bits &= ~(icpu->auto_demotion_disable_flags); - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); -} -static void c1e_promotion_disable(void) -{ - unsigned long long msr_bits; - - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); - msr_bits &= ~0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); -} - static const struct idle_cpu idle_cpu_nehalem = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_nhx = { + .state_table = nehalem_cstates, + .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_atom = { .state_table = atom_cstates, }; @@ -993,6 +982,12 @@ static const struct idle_cpu idle_cpu_snb = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_snx = { + .state_table = snb_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_byt = { .state_table = byt_cstates, .disable_promotion_to_c1e = true, @@ -1013,6 +1008,7 @@ static const struct idle_cpu idle_cpu_ivb = { static const struct idle_cpu idle_cpu_ivt = { .state_table = ivt_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_hsw = { @@ -1020,11 +1016,23 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_hsx = { + .state_table = hsw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_bdw = { .state_table = bdw_cstates, .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_bdx = { + .state_table = bdw_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_skl = { .state_table = skl_cstates, .disable_promotion_to_c1e = true, @@ -1033,15 +1041,18 @@ static const struct idle_cpu idle_cpu_skl = { static const struct idle_cpu idle_cpu_skx = { .state_table = skx_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_avn = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_knl = { .state_table = knl_cstates, + .use_acpi = true, }; static const struct idle_cpu idle_cpu_bxt = { @@ -1052,20 +1063,21 @@ static const struct idle_cpu idle_cpu_bxt = { static const struct idle_cpu idle_cpu_dnv = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, + .use_acpi = true, }; static const struct x86_cpu_id intel_idle_ids[] __initconst = { - INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem), + INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx), INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem), INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem), INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem), - INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem), - INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx), + INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx), INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft), - INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem), + INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx), INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb), - INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb), + INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx), INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom), INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt), INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier), @@ -1073,14 +1085,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb), INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt), INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw), - INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw), + INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx), INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw), INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw), INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn), INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw), INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw), - INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw), + INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx), + INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx), INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl), INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl), INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl), @@ -1095,76 +1107,169 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { {} }; -/* - * intel_idle_probe() +#define INTEL_CPU_FAM6_MWAIT \ + { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 } + +static const struct x86_cpu_id intel_mwait_ids[] __initconst = { + INTEL_CPU_FAM6_MWAIT, + {} +}; + +static bool __init intel_idle_max_cstate_reached(int cstate) +{ + if (cstate + 1 > max_cstate) { + pr_info("max_cstate %d reached\n", max_cstate); + return true; + } + return false; +} + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +#include <acpi/processor.h> + +static bool no_acpi __read_mostly; +module_param(no_acpi, bool, 0444); +MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); + +static struct acpi_processor_power acpi_state_table __initdata; + +/** + * intel_idle_cst_usable - Check if the _CST information can be used. + * + * Check if all of the C-states listed by _CST in the max_cstate range are + * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT. */ -static int __init intel_idle_probe(void) +static bool __init intel_idle_cst_usable(void) { - unsigned int eax, ebx, ecx; - const struct x86_cpu_id *id; + int cstate, limit; - if (max_cstate == 0) { - pr_debug("disabled\n"); - return -EPERM; - } + limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), + acpi_state_table.count); - id = x86_match_cpu(intel_idle_ids); - if (!id) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6) - pr_debug("does not run on family %d model %d\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return -ENODEV; + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; + + if (cx->entry_method != ACPI_CSTATE_FFH) + return false; } - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { - pr_debug("Please enable MWAIT in BIOS SETUP\n"); - return -ENODEV; + return true; +} + +static bool __init intel_idle_acpi_cst_extract(void) +{ + unsigned int cpu; + + if (no_acpi) { + pr_debug("Not allowed to use ACPI _CST\n"); + return false; } - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return -ENODEV; + for_each_possible_cpu(cpu) { + struct acpi_processor *pr = per_cpu(processors, cpu); - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + if (!pr) + continue; - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || - !mwait_substates) - return -ENODEV; + if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) + continue; - pr_debug("MWAIT substates: 0x%x\n", mwait_substates); + acpi_state_table.count++; - icpu = (const struct idle_cpu *)id->driver_data; - cpuidle_state_table = icpu->state_table; + if (!intel_idle_cst_usable()) + continue; - pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", - boot_cpu_data.x86_model); + if (!acpi_processor_claim_cst_control()) { + acpi_state_table.count = 0; + return false; + } - return 0; + return true; + } + + pr_debug("ACPI _CST not found or not usable\n"); + return false; } -/* - * intel_idle_cpuidle_devices_uninit() - * Unregisters the cpuidle devices. - */ -static void intel_idle_cpuidle_devices_uninit(void) +static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { - int i; - struct cpuidle_device *dev; + int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + struct acpi_processor_cx *cx; + struct cpuidle_state *state; - for_each_online_cpu(i) { - dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); - cpuidle_unregister_device(dev); + if (intel_idle_max_cstate_reached(cstate)) + break; + + cx = &acpi_state_table.states[cstate]; + + state = &drv->states[drv->state_count++]; + + snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); + strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + state->exit_latency = cx->latency; + /* + * For C1-type C-states use the same number for both the exit + * latency and target residency, because that is the case for + * C1 in the majority of the static C-states tables above. + * For the other types of C-states, however, set the target + * residency to 3 times the exit latency which should lead to + * a reasonable balance between energy-efficiency and + * performance in the majority of interesting cases. + */ + state->target_residency = cx->latency; + if (cx->type > ACPI_STATE_C1) + state->target_residency *= 3; + + state->flags = MWAIT2flg(cx->address); + if (cx->type > ACPI_STATE_C2) + state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; + + state->enter = intel_idle; + state->enter_s2idle = intel_idle_s2idle; } } +static bool __init intel_idle_off_by_default(u32 mwait_hint) +{ + int cstate, limit; + + /* + * If there are no _CST C-states, do not disable any C-states by + * default. + */ + if (!acpi_state_table.count) + return false; + + limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); + /* + * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of + * the interesting states are ACPI_CSTATE_FFH. + */ + for (cstate = 1; cstate < limit; cstate++) { + if (acpi_state_table.states[cstate].address == mwait_hint) + return false; + } + return true; +} +#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ +static inline bool intel_idle_acpi_cst_extract(void) { return false; } +static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } +static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ + /* * ivt_idle_state_table_update(void) * * Tune IVT multi-socket targets * Assumption: num_sockets == (max_package_num + 1) */ -static void ivt_idle_state_table_update(void) +static void __init ivt_idle_state_table_update(void) { /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ int cpu, package_num, num_sockets = 1; @@ -1187,15 +1292,17 @@ static void ivt_idle_state_table_update(void) /* else, 1 and 2 socket systems use default ivt_cstates */ } -/* - * Translate IRTL (Interrupt Response Time Limit) MSR to usec +/** + * irtl_2_usec - IRTL to microseconds conversion. + * @irtl: IRTL MSR value. + * + * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds. */ - -static unsigned int irtl_ns_units[] = { - 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; - -static unsigned long long irtl_2_usec(unsigned long long irtl) +static unsigned long long __init irtl_2_usec(unsigned long long irtl) { + static const unsigned int irtl_ns_units[] __initconst = { + 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 + }; unsigned long long ns; if (!irtl) @@ -1203,15 +1310,16 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) ns = irtl_ns_units[(irtl >> 10) & 0x7]; - return div64_u64((irtl & 0x3FF) * ns, 1000); + return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC); } + /* * bxt_idle_state_table_update(void) * * On BXT, we trust the IRTL to show the definitive maximum latency * We use the same value for target_residency. */ -static void bxt_idle_state_table_update(void) +static void __init bxt_idle_state_table_update(void) { unsigned long long msr; unsigned int usec; @@ -1258,7 +1366,7 @@ static void bxt_idle_state_table_update(void) * On SKL-H (model 0x5e) disable C8 and C9 if: * C10 is enabled and SGX disabled */ -static void sklh_idle_state_table_update(void) +static void __init sklh_idle_state_table_update(void) { unsigned long long msr; unsigned int eax, ebx, ecx, edx; @@ -1284,7 +1392,7 @@ static void sklh_idle_state_table_update(void) /* if SGX is present */ if (ebx & (1 << 2)) { - rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); + rdmsrl(MSR_IA32_FEAT_CTL, msr); /* if SGX is enabled */ if (msr & (1 << 18)) @@ -1294,16 +1402,28 @@ static void sklh_idle_state_table_update(void) skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */ skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ } -/* - * intel_idle_state_table_update() - * - * Update the default state_table for this CPU-id - */ -static void intel_idle_state_table_update(void) +static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) { - switch (boot_cpu_data.x86_model) { + unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; + unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & + MWAIT_SUBSTATE_MASK; + + /* Ignore the C-state if there are NO sub-states in CPUID for it. */ + if (num_substates == 0) + return false; + + if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle states deeper than C2"); + return true; +} + +static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) +{ + int cstate; + + switch (boot_cpu_data.x86_model) { case INTEL_FAM6_IVYBRIDGE_X: ivt_idle_state_table_update(); break; @@ -1315,62 +1435,36 @@ static void intel_idle_state_table_update(void) sklh_idle_state_table_update(); break; } -} - -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states - */ -static void __init intel_idle_cpuidle_driver_init(void) -{ - int cstate; - struct cpuidle_driver *drv = &intel_idle_driver; - - intel_idle_state_table_update(); - - cpuidle_poll_state_init(drv); - drv->state_count = 1; for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { - int num_substates, mwait_hint, mwait_cstate; + unsigned int mwait_hint; - if ((cpuidle_state_table[cstate].enter == NULL) && - (cpuidle_state_table[cstate].enter_s2idle == NULL)) + if (intel_idle_max_cstate_reached(cstate)) break; - if (cstate + 1 > max_cstate) { - pr_info("max_cstate %d reached\n", max_cstate); + if (!cpuidle_state_table[cstate].enter && + !cpuidle_state_table[cstate].enter_s2idle) break; - } - - mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); - mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint); - - /* number of sub-states for this state in CPUID.MWAIT */ - num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4)) - & MWAIT_SUBSTATE_MASK; - - /* if NO sub-states for this state in CPUID, skip it */ - if (num_substates == 0) - continue; - /* if state marked as disabled, skip it */ + /* If marked as unusable, skip this state. */ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) { pr_debug("state %s is disabled\n", cpuidle_state_table[cstate].name); continue; } + mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); + if (!intel_idle_verify_cstate(mwait_hint)) + continue; - if (((mwait_cstate + 1) > 2) && - !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - mark_tsc_unstable("TSC halts in idle" - " states deeper than C2"); + /* Structure copy. */ + drv->states[drv->state_count] = cpuidle_state_table[cstate]; - drv->states[drv->state_count] = /* structure copy */ - cpuidle_state_table[cstate]; + if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) && + !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)) + drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF; - drv->state_count += 1; + drv->state_count++; } if (icpu->byt_auto_demotion_disable_flag) { @@ -1379,6 +1473,38 @@ static void __init intel_idle_cpuidle_driver_init(void) } } +/* + * intel_idle_cpuidle_driver_init() + * allocate, initialize cpuidle_states + */ +static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) +{ + cpuidle_poll_state_init(drv); + drv->state_count = 1; + + if (icpu) + intel_idle_init_cstates_icpu(drv); + else + intel_idle_init_cstates_acpi(drv); +} + +static void auto_demotion_disable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); + msr_bits &= ~(icpu->auto_demotion_disable_flags); + wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); +} + +static void c1e_promotion_disable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits &= ~0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +} /* * intel_idle_cpu_init() @@ -1397,6 +1523,9 @@ static int intel_idle_cpu_init(unsigned int cpu) return -EIO; } + if (!icpu) + return 0; + if (icpu->auto_demotion_disable_flags) auto_demotion_disable(); @@ -1411,7 +1540,7 @@ static int intel_idle_cpu_online(unsigned int cpu) struct cpuidle_device *dev; if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) - __setup_broadcast_timer(true); + tick_broadcast_enable(); /* * Some systems can hotplug a cpu at runtime after @@ -1425,23 +1554,74 @@ static int intel_idle_cpu_online(unsigned int cpu) return 0; } +/** + * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices. + */ +static void __init intel_idle_cpuidle_devices_uninit(void) +{ + int i; + + for_each_online_cpu(i) + cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); +} + static int __init intel_idle_init(void) { + const struct x86_cpu_id *id; + unsigned int eax, ebx, ecx; int retval; /* Do not load intel_idle at all for now if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; - retval = intel_idle_probe(); - if (retval) - return retval; + if (max_cstate == 0) { + pr_debug("disabled\n"); + return -EPERM; + } + + id = x86_match_cpu(intel_idle_ids); + if (id) { + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_debug("Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } + } else { + id = x86_match_cpu(intel_mwait_ids); + if (!id) + return -ENODEV; + } + + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return -ENODEV; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); + + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || + !mwait_substates) + return -ENODEV; + + pr_debug("MWAIT substates: 0x%x\n", mwait_substates); + + icpu = (const struct idle_cpu *)id->driver_data; + if (icpu) { + cpuidle_state_table = icpu->state_table; + if (icpu->use_acpi) + intel_idle_acpi_cst_extract(); + } else if (!intel_idle_acpi_cst_extract()) { + return -ENODEV; + } + + pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", + boot_cpu_data.x86_model); intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (intel_idle_cpuidle_devices == NULL) + if (!intel_idle_cpuidle_devices) return -ENOMEM; - intel_idle_cpuidle_driver_init(); + intel_idle_cpuidle_driver_init(&intel_idle_driver); + retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { struct cpuidle_driver *drv = cpuidle_get_driver(); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 7b837641f166..7320275c7e56 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -992,6 +992,7 @@ static const struct iio_trigger_ops st_accel_trigger_ops = { #define ST_ACCEL_TRIGGER_OPS NULL #endif +#ifdef CONFIG_ACPI static const struct iio_mount_matrix * get_mount_matrix(const struct iio_dev *indio_dev, const struct iio_chan_spec *chan) @@ -1012,7 +1013,6 @@ static const struct iio_chan_spec_ext_info mount_matrix_ext_info[] = { static int apply_acpi_orientation(struct iio_dev *indio_dev, struct iio_chan_spec *channels) { -#ifdef CONFIG_ACPI struct st_sensor_data *adata = iio_priv(indio_dev); struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_device *adev; @@ -1140,10 +1140,14 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev, out: kfree(buffer.pointer); return ret; +} #else /* !CONFIG_ACPI */ +static int apply_acpi_orientation(struct iio_dev *indio_dev, + struct iio_chan_spec *channels) +{ return 0; -#endif } +#endif /* * st_accel_get_settings() - get sensor settings from device name diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index edc6f1cc90b2..306bf15023a7 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -39,6 +39,8 @@ #define AD7124_STATUS_POR_FLAG_MSK BIT(4) /* AD7124_ADC_CONTROL */ +#define AD7124_ADC_CTRL_REF_EN_MSK BIT(8) +#define AD7124_ADC_CTRL_REF_EN(x) FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x) #define AD7124_ADC_CTRL_PWR_MSK GENMASK(7, 6) #define AD7124_ADC_CTRL_PWR(x) FIELD_PREP(AD7124_ADC_CTRL_PWR_MSK, x) #define AD7124_ADC_CTRL_MODE_MSK GENMASK(5, 2) @@ -424,7 +426,10 @@ static int ad7124_init_channel_vref(struct ad7124_state *st, break; case AD7124_INT_REF: st->channel_config[channel_number].vref_mv = 2500; - break; + st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK; + st->adc_control |= AD7124_ADC_CTRL_REF_EN(1); + return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, + 2, st->adc_control); default: dev_err(&st->sd.spi->dev, "Invalid reference %d\n", refsel); return -EINVAL; @@ -489,13 +494,11 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, st->channel_config[channel].buf_negative = of_property_read_bool(child, "adi,buffered-negative"); - *chan = ad7124_channel_template; - chan->address = channel; - chan->scan_index = channel; - chan->channel = ain[0]; - chan->channel2 = ain[1]; - - chan++; + chan[channel] = ad7124_channel_template; + chan[channel].address = channel; + chan[channel].scan_index = channel; + chan[channel].channel = ain[0]; + chan[channel].channel2 = ain[1]; } return 0; diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index f5ba94c03a8d..e4683a68522a 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -85,7 +85,7 @@ err_unlock: static int ad7606_read_samples(struct ad7606_state *st) { - unsigned int num = st->chip_info->num_channels; + unsigned int num = st->chip_info->num_channels - 1; u16 *data = st->data; int ret; diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c index 5c2b3446fa4a..2c6f60edb7ce 100644 --- a/drivers/iio/adc/ad7949.c +++ b/drivers/iio/adc/ad7949.c @@ -89,6 +89,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, unsigned int channel) { int ret; + int i; int bits_per_word = ad7949_adc->resolution; int mask = GENMASK(ad7949_adc->resolution, 0); struct spi_message msg; @@ -100,12 +101,23 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val, }, }; - ret = ad7949_spi_write_cfg(ad7949_adc, - channel << AD7949_OFFSET_CHANNEL_SEL, - AD7949_MASK_CHANNEL_SEL); - if (ret) - return ret; + /* + * 1: write CFG for sample N and read old data (sample N-2) + * 2: if CFG was not changed since sample N-1 then we'll get good data + * at the next xfer, so we bail out now, otherwise we write something + * and we read garbage (sample N-1 configuration). + */ + for (i = 0; i < 2; i++) { + ret = ad7949_spi_write_cfg(ad7949_adc, + channel << AD7949_OFFSET_CHANNEL_SEL, + AD7949_MASK_CHANNEL_SEL); + if (ret) + return ret; + if (channel == ad7949_adc->current_channel) + break; + } + /* 3: write something and read actual data */ ad7949_adc->buffer = 0; spi_message_init_with_transfers(&msg, tx, 1); ret = spi_sync(ad7949_adc->spi, &msg); diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c index 67d096f8180d..c35a1beb817c 100644 --- a/drivers/iio/adc/intel_mrfld_adc.c +++ b/drivers/iio/adc/intel_mrfld_adc.c @@ -185,7 +185,7 @@ static int mrfld_adc_probe(struct platform_device *pdev) int irq; int ret; - indio_dev = devm_iio_device_alloc(dev, sizeof(*indio_dev)); + indio_dev = devm_iio_device_alloc(dev, sizeof(struct mrfld_adc)); if (!indio_dev) return -ENOMEM; diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c index e171db20c04a..02834ca3e1ce 100644 --- a/drivers/iio/adc/max1027.c +++ b/drivers/iio/adc/max1027.c @@ -478,7 +478,13 @@ static int max1027_probe(struct spi_device *spi) st->trig->ops = &max1027_trigger_ops; st->trig->dev.parent = &spi->dev; iio_trigger_set_drvdata(st->trig, indio_dev); - iio_trigger_register(st->trig); + ret = devm_iio_trigger_register(&indio_dev->dev, + st->trig); + if (ret < 0) { + dev_err(&indio_dev->dev, + "Failed to register iio trigger\n"); + return ret; + } ret = devm_request_threaded_irq(&spi->dev, spi->irq, iio_trigger_generic_data_rdy_poll, diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index da073d72f649..e480529b3f04 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -89,6 +89,12 @@ #define MAX9611_TEMP_SCALE_NUM 1000000 #define MAX9611_TEMP_SCALE_DIV 2083 +/* + * Conversion time is 2 ms (typically) at Ta=25 degreeC + * No maximum value is known, so play it safe. + */ +#define MAX9611_CONV_TIME_US_RANGE 3000, 3300 + struct max9611_dev { struct device *dev; struct i2c_client *i2c_client; @@ -236,11 +242,9 @@ static int max9611_read_single(struct max9611_dev *max9611, return ret; } - /* - * need a delay here to make register configuration - * stabilize. 1 msec at least, from empirical testing. - */ - usleep_range(1000, 2000); + /* need a delay here to make register configuration stabilize. */ + + usleep_range(MAX9611_CONV_TIME_US_RANGE); ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr); if (ret < 0) { @@ -507,7 +511,7 @@ static int max9611_init(struct max9611_dev *max9611) MAX9611_REG_CTRL2, 0); return ret; } - usleep_range(1000, 2000); + usleep_range(MAX9611_CONV_TIME_US_RANGE); return 0; } diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index fa4586037bb8..0b91de4df8f4 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -65,6 +65,7 @@ config IAQCORE config PMS7003 tristate "Plantower PMS7003 particulate matter sensor" depends on SERIAL_DEV_BUS + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Plantower PMS7003 particulate diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 963ff043eecf..7ecd2ffa3132 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -229,7 +229,7 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev, *val2 = 65536; return IIO_VAL_FRACTIONAL; } else { - *val = 100; + *val = 100000; *val2 = 65536; return IIO_VAL_FRACTIONAL; } diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 45e77b308238..0686e41bb8a1 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -117,6 +117,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6050, .config = &chip_config_6050, .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU6500_WHOAMI_VALUE, @@ -124,6 +125,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU6515_WHOAMI_VALUE, @@ -131,6 +133,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU6000_WHOAMI_VALUE, @@ -138,6 +141,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6050, .config = &chip_config_6050, .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU9150_WHOAMI_VALUE, @@ -145,6 +149,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6050, .config = &chip_config_6050, .fifo_size = 1024, + .temp = {INV_MPU6050_TEMP_OFFSET, INV_MPU6050_TEMP_SCALE}, }, { .whoami = INV_MPU9250_WHOAMI_VALUE, @@ -152,6 +157,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_MPU9255_WHOAMI_VALUE, @@ -159,6 +165,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_MPU6500_TEMP_OFFSET, INV_MPU6500_TEMP_SCALE}, }, { .whoami = INV_ICM20608_WHOAMI_VALUE, @@ -166,6 +173,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6500, .config = &chip_config_6050, .fifo_size = 512, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, }, { .whoami = INV_ICM20602_WHOAMI_VALUE, @@ -173,6 +181,7 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_icm20602, .config = &chip_config_6050, .fifo_size = 1008, + .temp = {INV_ICM20608_TEMP_OFFSET, INV_ICM20608_TEMP_SCALE}, }, }; @@ -481,12 +490,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: - *val = 0; - if (st->chip_type == INV_ICM20602) - *val2 = INV_ICM20602_TEMP_SCALE; - else - *val2 = INV_MPU6050_TEMP_SCALE; - + *val = st->hw->temp.scale / 1000000; + *val2 = st->hw->temp.scale % 1000000; return IIO_VAL_INT_PLUS_MICRO; case IIO_MAGN: return inv_mpu_magn_get_scale(st, chan, val, val2); @@ -496,11 +501,7 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_OFFSET: switch (chan->type) { case IIO_TEMP: - if (st->chip_type == INV_ICM20602) - *val = INV_ICM20602_TEMP_OFFSET; - else - *val = INV_MPU6050_TEMP_OFFSET; - + *val = st->hw->temp.offset; return IIO_VAL_INT; default: return -EINVAL; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index f1fb7b6bdab1..b096e010d4ee 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -107,6 +107,7 @@ struct inv_mpu6050_chip_config { * @reg: register map of the chip. * @config: configuration of the chip. * @fifo_size: size of the FIFO in bytes. + * @temp: offset and scale to apply to raw temperature. */ struct inv_mpu6050_hw { u8 whoami; @@ -114,6 +115,10 @@ struct inv_mpu6050_hw { const struct inv_mpu6050_reg_map *reg; const struct inv_mpu6050_chip_config *config; size_t fifo_size; + struct { + int offset; + int scale; + } temp; }; /* @@ -279,16 +284,19 @@ struct inv_mpu6050_state { #define INV_MPU6050_REG_UP_TIME_MIN 5000 #define INV_MPU6050_REG_UP_TIME_MAX 10000 -#define INV_MPU6050_TEMP_OFFSET 12421 -#define INV_MPU6050_TEMP_SCALE 2941 +#define INV_MPU6050_TEMP_OFFSET 12420 +#define INV_MPU6050_TEMP_SCALE 2941176 #define INV_MPU6050_MAX_GYRO_FS_PARAM 3 #define INV_MPU6050_MAX_ACCL_FS_PARAM 3 #define INV_MPU6050_THREE_AXIS 3 #define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3 #define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3 -#define INV_ICM20602_TEMP_OFFSET 8170 -#define INV_ICM20602_TEMP_SCALE 3060 +#define INV_MPU6500_TEMP_OFFSET 7011 +#define INV_MPU6500_TEMP_SCALE 2995178 + +#define INV_ICM20608_TEMP_OFFSET 8170 +#define INV_ICM20608_TEMP_SCALE 3059976 /* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */ #define INV_MPU6050_OUTPUT_DATA_SIZE 32 diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h index c605b153be41..dc55d7dff3eb 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h @@ -320,7 +320,6 @@ enum st_lsm6dsx_fifo_mode { * @odr: Output data rate of the sensor [Hz]. * @watermark: Sensor watermark level. * @sip: Number of samples in a given pattern. - * @decimator: FIFO decimation factor. * @ts_ref: Sensor timestamp reference for hw one. * @ext_info: Sensor settings if it is connected to i2c controller */ @@ -334,7 +333,6 @@ struct st_lsm6dsx_sensor { u16 watermark; u8 sip; - u8 decimator; s64 ts_ref; struct { @@ -351,9 +349,9 @@ struct st_lsm6dsx_sensor { * @fifo_lock: Mutex to prevent concurrent access to the hw FIFO. * @conf_lock: Mutex to prevent concurrent FIFO configuration update. * @page_lock: Mutex to prevent concurrent memory page configuration. - * @fifo_mode: FIFO operating mode supported by the device. * @suspend_mask: Suspended sensor bitmask. * @enable_mask: Enabled sensor bitmask. + * @fifo_mask: Enabled hw FIFO bitmask. * @ts_gain: Hw timestamp rate after internal calibration. * @ts_sip: Total number of timestamp samples in a given pattern. * @sip: Total number of samples (acc/gyro/ts) in a given pattern. @@ -373,9 +371,9 @@ struct st_lsm6dsx_hw { struct mutex conf_lock; struct mutex page_lock; - enum st_lsm6dsx_fifo_mode fifo_mode; u8 suspend_mask; u8 enable_mask; + u8 fifo_mask; s64 ts_gain; u8 ts_sip; u8 sip; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index d416990ae309..cb536b81a1c2 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -78,14 +78,20 @@ struct st_lsm6dsx_decimator_entry st_lsm6dsx_decimator_table[] = { { 32, 0x7 }, }; -static int st_lsm6dsx_get_decimator_val(u8 val) +static int +st_lsm6dsx_get_decimator_val(struct st_lsm6dsx_sensor *sensor, u32 max_odr) { const int max_size = ARRAY_SIZE(st_lsm6dsx_decimator_table); + u32 decimator = max_odr / sensor->odr; int i; - for (i = 0; i < max_size; i++) - if (st_lsm6dsx_decimator_table[i].decimator == val) + if (decimator > 1) + decimator = round_down(decimator, 2); + + for (i = 0; i < max_size; i++) { + if (st_lsm6dsx_decimator_table[i].decimator == decimator) break; + } return i == max_size ? 0 : st_lsm6dsx_decimator_table[i].val; } @@ -111,6 +117,13 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw, } } +static u8 st_lsm6dsx_get_sip(struct st_lsm6dsx_sensor *sensor, u32 min_odr) +{ + u8 sip = sensor->odr / min_odr; + + return sip > 1 ? round_down(sip, 2) : sip; +} + static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw) { const struct st_lsm6dsx_reg *ts_dec_reg; @@ -131,12 +144,10 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw) sensor = iio_priv(hw->iio_devs[i]); /* update fifo decimators and sample in pattern */ if (hw->enable_mask & BIT(sensor->id)) { - sensor->sip = sensor->odr / min_odr; - sensor->decimator = max_odr / sensor->odr; - data = st_lsm6dsx_get_decimator_val(sensor->decimator); + sensor->sip = st_lsm6dsx_get_sip(sensor, min_odr); + data = st_lsm6dsx_get_decimator_val(sensor, max_odr); } else { sensor->sip = 0; - sensor->decimator = 0; data = 0; } ts_sip = max_t(u16, ts_sip, sensor->sip); @@ -176,17 +187,10 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw, enum st_lsm6dsx_fifo_mode fifo_mode) { unsigned int data; - int err; data = FIELD_PREP(ST_LSM6DSX_FIFO_MODE_MASK, fifo_mode); - err = st_lsm6dsx_update_bits_locked(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR, - ST_LSM6DSX_FIFO_MODE_MASK, data); - if (err < 0) - return err; - - hw->fifo_mode = fifo_mode; - - return 0; + return st_lsm6dsx_update_bits_locked(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR, + ST_LSM6DSX_FIFO_MODE_MASK, data); } static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor, @@ -608,11 +612,17 @@ int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw) int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable) { struct st_lsm6dsx_hw *hw = sensor->hw; + u8 fifo_mask; int err; mutex_lock(&hw->conf_lock); - if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) { + if (enable) + fifo_mask = hw->fifo_mask | BIT(sensor->id); + else + fifo_mask = hw->fifo_mask & ~BIT(sensor->id); + + if (hw->fifo_mask) { err = st_lsm6dsx_flush_fifo(hw); if (err < 0) goto out; @@ -642,15 +652,19 @@ int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable) if (err < 0) goto out; - if (hw->enable_mask) { + if (fifo_mask) { /* reset hw ts counter */ err = st_lsm6dsx_reset_hw_ts(hw); if (err < 0) goto out; err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT); + if (err < 0) + goto out; } + hw->fifo_mask = fifo_mask; + out: mutex_unlock(&hw->conf_lock); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 11b2c7bc8041..b921dd9e108f 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1301,7 +1301,8 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id, for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) { for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) { - if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) + if (st_lsm6dsx_sensor_settings[i].id[j].name && + id == st_lsm6dsx_sensor_settings[i].id[j].hw_id) break; } if (j < ST_LSM6DSX_MAX_ID) @@ -1447,8 +1448,9 @@ st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr) return st_lsm6dsx_update_bits_locked(hw, reg->addr, reg->mask, data); } -int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor, - bool enable) +static int +__st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor, + bool enable) { struct st_lsm6dsx_hw *hw = sensor->hw; u32 odr = enable ? sensor->odr : 0; @@ -1466,6 +1468,26 @@ int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor, return 0; } +static int +st_lsm6dsx_check_events(struct st_lsm6dsx_sensor *sensor, bool enable) +{ + struct st_lsm6dsx_hw *hw = sensor->hw; + + if (sensor->id == ST_LSM6DSX_ID_GYRO || enable) + return 0; + + return hw->enable_event; +} + +int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor, + bool enable) +{ + if (st_lsm6dsx_check_events(sensor, enable)) + return 0; + + return __st_lsm6dsx_sensor_set_enable(sensor, enable); +} + static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, u8 addr, int *val) { @@ -1661,7 +1683,7 @@ st_lsm6dsx_write_event_config(struct iio_dev *iio_dev, struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev); struct st_lsm6dsx_hw *hw = sensor->hw; u8 enable_event; - int err = 0; + int err; if (type != IIO_EV_TYPE_THRESH) return -EINVAL; @@ -1689,7 +1711,8 @@ st_lsm6dsx_write_event_config(struct iio_dev *iio_dev, return err; mutex_lock(&hw->conf_lock); - err = st_lsm6dsx_sensor_set_enable(sensor, state); + if (enable_event || !(hw->fifo_mask & BIT(sensor->id))) + err = __st_lsm6dsx_sensor_set_enable(sensor, state); mutex_unlock(&hw->conf_lock); if (err < 0) return err; @@ -2300,7 +2323,7 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev) hw->suspend_mask |= BIT(sensor->id); } - if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS) + if (hw->fifo_mask) err = st_lsm6dsx_flush_fifo(hw); return err; @@ -2336,7 +2359,7 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev) hw->suspend_mask &= ~BIT(sensor->id); } - if (hw->enable_mask) + if (hw->fifo_mask) err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT); return err; diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index c193d64e5217..112225c0e486 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -566,7 +566,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const unsigned long *mask, bool timestamp) { unsigned bytes = 0; - int length, i; + int length, i, largest = 0; /* How much space will the demuxed element take? */ for_each_set_bit(i, mask, @@ -574,13 +574,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, length = iio_storage_bytes_for_si(indio_dev, i); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } if (timestamp) { length = iio_storage_bytes_for_timestamp(indio_dev); bytes = ALIGN(bytes, length); bytes += length; + largest = max(largest, length); } + + bytes = ALIGN(bytes, largest); return bytes; } diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index 16dacea9eadf..b0e241aaefb4 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -163,7 +163,6 @@ static int vcnl4200_init(struct vcnl4000_data *data) if (ret < 0) return ret; - data->al_scale = 24000; data->vcnl4200_al.reg = VCNL4200_AL_DATA; data->vcnl4200_ps.reg = VCNL4200_PS_DATA; switch (id) { @@ -172,11 +171,13 @@ static int vcnl4200_init(struct vcnl4000_data *data) /* show 54ms in total. */ data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000); data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000); + data->al_scale = 24000; break; case VCNL4040_PROD_ID: /* Integration time is 80ms, add 10ms. */ data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000); data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000); + data->al_scale = 120000; break; } data->vcnl4200_al.last_measurement = ktime_set(0, 0); diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index ddf47023364b..d39c0d6b77f1 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -444,8 +444,10 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new( else temp = __convert_to_raw(temp, resolution); } else { - of_property_read_u32_index(np, propname, index, - (u32 *)&temp); + u32 t32; + + of_property_read_u32_index(np, propname, index, &t32); + temp = t32; } for (j = 0; j < n_size; j++) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 25f2b70fd8ef..43a6f07e0afe 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4763,6 +4763,7 @@ err_ib: err: unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); + unregister_pernet_subsys(&cma_pernet_operations); err_wq: destroy_workqueue(cma_wq); return ret; diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 8434ec082c3a..2257d7f7810f 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -286,6 +286,9 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) struct rdma_counter *counter; int ret; + if (!qp->res.valid) + return 0; + if (!rdma_is_port_valid(dev, port)) return -EINVAL; diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c index f509c478b469..b7cb59844ece 100644 --- a/drivers/infiniband/core/ib_core_uverbs.c +++ b/drivers/infiniband/core/ib_core_uverbs.c @@ -238,28 +238,32 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry) EXPORT_SYMBOL(rdma_user_mmap_entry_remove); /** - * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa + * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa + * in a given range. * * @ucontext: associated user context. * @entry: the entry to insert into the mmap_xa * @length: length of the address that will be mmapped + * @min_pgoff: minimum pgoff to be returned + * @max_pgoff: maximum pgoff to be returned * * This function should be called by drivers that use the rdma_user_mmap * interface for implementing their mmap syscall A database of mmap offsets is * handled in the core and helper functions are provided to insert entries * into the database and extract entries when the user calls mmap with the - * given offset. The function allocates a unique page offset that should be - * provided to user, the user will use the offset to retrieve information such - * as address to be mapped and how. + * given offset. The function allocates a unique page offset in a given range + * that should be provided to user, the user will use the offset to retrieve + * information such as address to be mapped and how. * * Return: 0 on success and -ENOMEM on failure */ -int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, - struct rdma_user_mmap_entry *entry, - size_t length) +int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, + struct rdma_user_mmap_entry *entry, + size_t length, u32 min_pgoff, + u32 max_pgoff) { struct ib_uverbs_file *ufile = ucontext->ufile; - XA_STATE(xas, &ucontext->mmap_xa, 0); + XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); u32 xa_first, xa_last, npages; int err; u32 i; @@ -285,7 +289,7 @@ int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, entry->npages = npages; while (true) { /* First find an empty index */ - xas_find_marked(&xas, U32_MAX, XA_FREE_MARK); + xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); if (xas.xa_node == XAS_RESTART) goto err_unlock; @@ -332,4 +336,30 @@ err_unlock: mutex_unlock(&ufile->umap_lock); return -ENOMEM; } +EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range); + +/** + * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa. + * + * @ucontext: associated user context. + * @entry: the entry to insert into the mmap_xa + * @length: length of the address that will be mmapped + * + * This function should be called by drivers that use the rdma_user_mmap + * interface for handling user mmapped addresses. The database is handled in + * the core and helper functions are provided to insert entries into the + * database and extract entries when the user calls mmap with the given offset. + * The function allocates a unique page offset that should be provided to user, + * the user will use the offset to retrieve information such as address to + * be mapped and how. + * + * Return: 0 on success and -ENOMEM on failure + */ +int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, + struct rdma_user_mmap_entry *entry, + size_t length) +{ + return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0, + U32_MAX); +} EXPORT_SYMBOL(rdma_user_mmap_entry_insert); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 9b6ca15a183c..ad5112a2325f 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) int rc; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } if (mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 958c1ff9c515..020f70e6865e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -442,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, goto fail; } /* Unconditionally map 8 bytes to support 57500 series */ - nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8); + nq->bar_reg_iomem = ioremap(nq_base + nq->bar_reg_off, 8); if (!nq->bar_reg_iomem) { rc = -ENOMEM; goto fail; @@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Add qp to flush list of the CQ */ bnxt_qplib_add_flush_qp(qp); } else { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { - /* Before we complete, do WA 9060 */ - if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, - cqe_sq_cons)) { - *lib_qp = qp; - goto out; - } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 5cdfa84faf85..1291b12287a5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -717,7 +717,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, if (!res_base) return -ENOMEM; - rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + + rcfw->cmdq_bar_reg_iomem = ioremap(res_base + RCFW_COMM_BASE_OFFSET, RCFW_COMM_SIZE); if (!rcfw->cmdq_bar_reg_iomem) { @@ -739,7 +739,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, "CREQ BAR region %d resc start is 0!\n", rcfw->creq_bar_reg); /* Unconditionally map 8 bytes to support 57500 series */ - rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, + rcfw->creq_bar_reg_iomem = ioremap(res_base + cp_bar_reg_off, 8); if (!rcfw->creq_bar_reg_iomem) { dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n", diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index bdbde8e22420..60ea1b924b67 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -704,7 +704,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, return -ENOMEM; } - dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset, + dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset, dbr_len); if (!dpit->dbr_bar_reg_iomem) { dev_err(&res->pdev->dev, diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index c9d294caa27a..50c22575aed6 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -145,7 +145,7 @@ static inline bool is_rdma_read_cap(struct efa_dev *dev) } #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ - FIELD_SIZEOF(typeof(x), fld) <= (sz)) + sizeof_field(typeof(x), fld) <= (sz)) #define is_reserved_cleared(reserved) \ !memchr_inv(reserved, 0, sizeof(reserved)) diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c index adb4a1ba921b..5836fe7b2817 100644 --- a/drivers/infiniband/hw/hfi1/iowait.c +++ b/drivers/infiniband/hw/hfi1/iowait.c @@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit, void iowait_cancel_work(struct iowait *w) { cancel_work_sync(&iowait_get_ib_work(w)->iowork); - cancel_work_sync(&iowait_get_tid_work(w)->iowork); + /* Make sure that the iowork for TID RDMA is used */ + if (iowait_get_tid_work(w)->iowork.func) + cancel_work_sync(&iowait_get_tid_work(w)->iowork); } /** diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 61362bd6d3ce..1a6268d61977 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -161,7 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) return -EINVAL; } - dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY); + dd->kregbase1 = ioremap(addr, RCV_ARRAY); if (!dd->kregbase1) { dd_dev_err(dd, "UC mapping of kregbase1 failed\n"); return -ENOMEM; @@ -179,7 +179,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count); dd->base2_start = RCV_ARRAY + rcv_array_count * 8; - dd->kregbase2 = ioremap_nocache( + dd->kregbase2 = ioremap( addr + dd->base2_start, TXE_PIO_SEND - dd->base2_start); if (!dd->kregbase2) { diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 5774dfc22e18..a51525647ac8 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -848,7 +848,7 @@ static const struct rhashtable_params sdma_rht_params = { .nelem_hint = NR_CPUS_HINT, .head_offset = offsetof(struct sdma_rht_node, node), .key_offset = offsetof(struct sdma_rht_node, cpu_id), - .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), + .key_len = sizeof_field(struct sdma_rht_node, cpu_id), .max_size = NR_CPUS, .min_size = 8, .automatic_shrinking = true, diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index e53f542b60af..8a2e0d9351e9 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) */ fpsn = full_flow_psn(flow, flow->flow_state.spsn); req->r_ack_psn = psn; + /* + * If resync_psn points to the last flow PSN for a + * segment and the new segment (likely from a new + * request) starts with a new generation number, we + * need to adjust resync_psn accordingly. + */ + if (flow->flow_state.generation != + (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT)) + resync_psn = mask_psn(fpsn - 1); flow->resync_npkts += delta_psn(mask_psn(resync_psn + 1), fpsn); /* diff --git a/drivers/infiniband/hw/hfi1/trace_tid.h b/drivers/infiniband/hw/hfi1/trace_tid.h index 343fb9894a82..985ffa9cc958 100644 --- a/drivers/infiniband/hw/hfi1/trace_tid.h +++ b/drivers/infiniband/hw/hfi1/trace_tid.h @@ -138,10 +138,10 @@ TRACE_EVENT(/* put_tid */ TP_ARGS(dd, index, type, pa, order), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd) - __field(unsigned long, pa); - __field(u32, index); - __field(u32, type); - __field(u16, order); + __field(unsigned long, pa) + __field(u32, index) + __field(u32, type) + __field(u16, order) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd); diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h index 09eb0c9ada00..769e5e4710c6 100644 --- a/drivers/infiniband/hw/hfi1/trace_tx.h +++ b/drivers/infiniband/hw/hfi1/trace_tx.h @@ -588,7 +588,7 @@ TRACE_EVENT(hfi1_sdma_user_reqinfo, TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i), TP_ARGS(dd, ctxt, subctxt, i), TP_STRUCT__entry( - DD_DEV_ENTRY(dd); + DD_DEV_ENTRY(dd) __field(u16, ctxt) __field(u8, subctxt) __field(u8, ver_opcode) diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index b0e9bf7cd150..d36e3e14896d 100644 --- a/drivers/infiniband/hw/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h @@ -107,9 +107,9 @@ enum { HFI1_HAS_GRH = (1 << 0), }; -#define LRH_16B_BYTES (FIELD_SIZEOF(struct hfi1_16b_header, lrh)) +#define LRH_16B_BYTES (sizeof_field(struct hfi1_16b_header, lrh)) #define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32)) -#define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh)) +#define LRH_9B_BYTES (sizeof_field(struct ib_header, lrh)) #define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32)) /* 24Bits for qpn, upper 8Bits reserved */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 86375947bc67..dbd96d029d8b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context) static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct i40iw_ucontext *ucontext; - u64 db_addr_offset; - u64 push_offset; + u64 db_addr_offset, push_offset, pfn; ucontext = to_ucontext(context); if (ucontext->iwdev->sc_dev.is_pf) { @@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_private_data = ucontext; } else { if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); } - if (io_remap_pfn_range(vma, vma->vm_start, - vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; + pfn = vma->vm_pgoff + + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> + PAGE_SHIFT); - return 0; + return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + vma->vm_page_prot, NULL); } /** diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0b5dc1d5928f..34055cbab38c 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -3018,16 +3018,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ibdev->ib_active = false; flush_workqueue(wq); - mlx4_ib_close_sriov(ibdev); - mlx4_ib_mad_cleanup(ibdev); - ib_unregister_device(&ibdev->ib_dev); - mlx4_ib_diag_cleanup(ibdev); if (ibdev->iboe.nb.notifier_call) { if (unregister_netdevice_notifier(&ibdev->iboe.nb)) pr_warn("failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } + mlx4_ib_close_sriov(ibdev); + mlx4_ib_mad_cleanup(ibdev); + ib_unregister_device(&ibdev->ib_dev); + mlx4_ib_diag_cleanup(ibdev); + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); kfree(ibdev->ib_uc_qpns_bitmap); diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 4937947400cd..4c26492ab8a3 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -157,7 +157,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, return -ENOMEM; } -int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) +void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) { struct mlx5_core_dev *dev = dm->dev; u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); @@ -175,15 +175,13 @@ int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) MLX5_SET(dealloc_memic_in, in, memic_size, length); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return; - if (!err) { - spin_lock(&dm->lock); - bitmap_clear(dm->memic_alloc_pages, - start_page_idx, num_pages); - spin_unlock(&dm->lock); - } - - return err; + spin_lock(&dm->lock); + bitmap_clear(dm->memic_alloc_pages, + start_page_idx, num_pages); + spin_unlock(&dm->lock); } int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out) diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index 169cab4915e3..945ebce73613 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -46,7 +46,7 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev, void *in, int in_size); int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, u64 length, u32 alignment); -int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length); +void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length); void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid); void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 51100350b688..df88bfbd7fed 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -40,7 +40,7 @@ #include <linux/slab.h> #include <linux/bitmap.h> #if defined(CONFIG_X86) -#include <asm/pat.h> +#include <asm/memtype.h> #endif #include <linux/sched.h> #include <linux/sched/mm.h> @@ -2074,6 +2074,24 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, virt_to_page(dev->mdev->clock_info)); } +static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) +{ + struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); + struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); + struct mlx5_ib_dm *mdm; + + switch (mentry->mmap_flag) { + case MLX5_IB_MMAP_TYPE_MEMIC: + mdm = container_of(mentry, struct mlx5_ib_dm, mentry); + mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr, + mdm->size); + kfree(mdm); + break; + default: + WARN_ON(true); + } +} + static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, struct vm_area_struct *vma, struct mlx5_ib_ucontext *context) @@ -2186,26 +2204,55 @@ free_bfreg: return err; } -static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +static int add_dm_mmap_entry(struct ib_ucontext *context, + struct mlx5_ib_dm *mdm, + u64 address) +{ + mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC; + mdm->mentry.address = address; + return rdma_user_mmap_entry_insert_range( + context, &mdm->mentry.rdma_entry, + mdm->size, + MLX5_IB_MMAP_DEVICE_MEM << 16, + (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1); +} + +static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) { - struct mlx5_ib_ucontext *mctx = to_mucontext(context); - struct mlx5_ib_dev *dev = to_mdev(context->device); - u16 page_idx = get_extended_index(vma->vm_pgoff); - size_t map_size = vma->vm_end - vma->vm_start; - u32 npages = map_size >> PAGE_SHIFT; + unsigned long idx; + u8 command; + + command = get_command(vma->vm_pgoff); + idx = get_extended_index(vma->vm_pgoff); + + return (command << 16 | idx); +} + +static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, + struct vm_area_struct *vma, + struct ib_ucontext *ucontext) +{ + struct mlx5_user_mmap_entry *mentry; + struct rdma_user_mmap_entry *entry; + unsigned long pgoff; + pgprot_t prot; phys_addr_t pfn; + int ret; - if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) != - page_idx + npages) + pgoff = mlx5_vma_to_pgoff(vma); + entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff); + if (!entry) return -EINVAL; - pfn = ((dev->mdev->bar_addr + - MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >> - PAGE_SHIFT) + - page_idx; - return rdma_user_mmap_io(context, vma, pfn, map_size, - pgprot_writecombine(vma->vm_page_prot), - NULL); + mentry = to_mmmap(entry); + pfn = (mentry->address >> PAGE_SHIFT); + prot = pgprot_writecombine(vma->vm_page_prot); + ret = rdma_user_mmap_io(ucontext, vma, pfn, + entry->npages * PAGE_SIZE, + prot, + entry); + rdma_user_mmap_entry_put(&mentry->rdma_entry); + return ret; } static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) @@ -2248,11 +2295,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm case MLX5_IB_MMAP_CLOCK_INFO: return mlx5_ib_mmap_clock_info_page(dev, vma, context); - case MLX5_IB_MMAP_DEVICE_MEM: - return dm_mmap(ibcontext, vma); - default: - return -EINVAL; + return mlx5_ib_mmap_offset(dev, vma, ibcontext); } return 0; @@ -2288,8 +2332,9 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx, { struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; u64 start_offset; - u32 page_idx; + u16 page_idx; int err; + u64 address; dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); @@ -2298,28 +2343,30 @@ static int handle_alloc_dm_memic(struct ib_ucontext *ctx, if (err) return err; - page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) - - MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >> - PAGE_SHIFT; + address = dm->dev_addr & PAGE_MASK; + err = add_dm_mmap_entry(ctx, dm, address); + if (err) + goto err_dealloc; + page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF; err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, - &page_idx, sizeof(page_idx)); + &page_idx, + sizeof(page_idx)); if (err) - goto err_dealloc; + goto err_copy; start_offset = dm->dev_addr & ~PAGE_MASK; err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, &start_offset, sizeof(start_offset)); if (err) - goto err_dealloc; - - bitmap_set(to_mucontext(ctx)->dm_pages, page_idx, - DIV_ROUND_UP(dm->size, PAGE_SIZE)); + goto err_copy; return 0; +err_copy: + rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); err_dealloc: mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); @@ -2423,23 +2470,13 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; - struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm; struct mlx5_ib_dm *dm = to_mdm(ibdm); - u32 page_idx; int ret; switch (dm->type) { case MLX5_IB_UAPI_DM_TYPE_MEMIC: - ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); - if (ret) - return ret; - - page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) - - MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >> - PAGE_SHIFT; - bitmap_clear(ctx->dm_pages, page_idx, - DIV_ROUND_UP(dm->size, PAGE_SIZE)); - break; + rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); + return 0; case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, dm->size, ctx->devx_uid, dm->dev_addr, @@ -3544,10 +3581,6 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, } INIT_LIST_HEAD(&handler->list); - if (dst) { - memcpy(&dest_arr[0], dst, sizeof(*dst)); - dest_num++; - } for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { err = parse_flow_attr(dev->mdev, spec, @@ -3560,6 +3593,11 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, ib_flow += ((union ib_flow_spec *)ib_flow)->size; } + if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { + memcpy(&dest_arr[0], dst, sizeof(*dst)); + dest_num++; + } + if (!flow_is_multicast_only(flow_attr)) set_underlay_qp(dev, spec, underlay_qpn); @@ -3600,10 +3638,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { - if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { + if (!dest_num) rule_dst = NULL; - dest_num = 0; - } } else { if (is_egress) flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; @@ -6236,6 +6272,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .map_mr_sg = mlx5_ib_map_mr_sg, .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, .mmap = mlx5_ib_mmap, + .mmap_free = mlx5_ib_mmap_free, .modify_cq = mlx5_ib_modify_cq, .modify_device = mlx5_ib_modify_device, .modify_port = mlx5_ib_modify_port, diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 5986953ec2fa..b06f32ff5748 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -118,6 +118,10 @@ enum { MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, }; +enum mlx5_ib_mmap_type { + MLX5_IB_MMAP_TYPE_MEMIC = 1, +}; + #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \ (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) @@ -135,7 +139,6 @@ struct mlx5_ib_ucontext { u32 tdn; u64 lib_caps; - DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES); u16 devx_uid; /* For RoCE LAG TX affinity */ atomic_t tx_port_affinity; @@ -556,6 +559,12 @@ enum mlx5_ib_mtt_access_flags { MLX5_IB_MTT_WRITE = (1 << 1), }; +struct mlx5_user_mmap_entry { + struct rdma_user_mmap_entry rdma_entry; + u8 mmap_flag; + u64 address; +}; + struct mlx5_ib_dm { struct ib_dm ibdm; phys_addr_t dev_addr; @@ -567,6 +576,7 @@ struct mlx5_ib_dm { } icm_dm; /* other dm types specific params should be added here */ }; + struct mlx5_user_mmap_entry mentry; }; #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) @@ -1101,6 +1111,13 @@ to_mflow_act(struct ib_flow_action *ibact) return container_of(ibact, struct mlx5_ib_flow_action, ib_action); } +static inline struct mlx5_user_mmap_entry * +to_mmmap(struct rdma_user_mmap_entry *rdma_entry) +{ + return container_of(rdma_entry, + struct mlx5_user_mmap_entry, rdma_entry); +} + int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, struct ib_udata *udata, unsigned long virt, struct mlx5_db *db); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index dd4843379f51..91d64dd71a8a 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6630,7 +6630,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) /* vl15 buffers start just after the 4k buffers */ vl15off = dd->physaddr + (dd->piobufbase >> 32) + dd->piobcnt4k * dd->align4k; - dd->piovl15base = ioremap_nocache(vl15off, + dd->piovl15base = ioremap(vl15off, NUM_VL15_BUFS * dd->align4k); if (!dd->piovl15base) { ret = -ENOMEM; diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index d4fd8a6cff7b..43c8ee1f46e0 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1759,7 +1759,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) qib_userlen = dd->ureg_align * dd->cfgctxts; /* Sanity checks passed, now create the new mappings */ - qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); + qib_kregbase = ioremap(qib_physaddr, qib_kreglen); if (!qib_kregbase) goto bail; @@ -1768,7 +1768,7 @@ int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) goto bail_kregbase; if (qib_userlen) { - qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, + qib_userbase = ioremap(qib_physaddr + dd->uregbase, qib_userlen); if (!qib_userbase) goto bail_piobase; diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 864f2af171f7..3dc6ce033319 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -145,7 +145,7 @@ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev, addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); - dd->kregbase = ioremap_nocache(addr, len); + dd->kregbase = ioremap(addr, len); if (!dd->kregbase) return -ENOMEM; diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index f9a492ed900b..831ad578a7b2 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -389,7 +389,7 @@ void rxe_rcv(struct sk_buff *skb) calc_icrc = rxe_icrc_hdr(pkt, skb); calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt), - payload_size(pkt)); + payload_size(pkt) + bth_pad(pkt)); calc_icrc = (__force u32)cpu_to_be32(~calc_icrc); if (unlikely(calc_icrc != pack_icrc)) { if (skb->protocol == htons(ETH_P_IPV6)) diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index c5d9b558fa90..e5031172c019 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (err) return err; } + if (bth_pad(pkt)) { + u8 *pad = payload_addr(pkt) + paylen; + + memset(pad, 0, bth_pad(pkt)); + crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt)); + } } p = payload_addr(pkt) + paylen + bth_pad(pkt); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 1cbfbd98eb22..c4a8195bf670 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -732,6 +732,13 @@ static enum resp_states read_reply(struct rxe_qp *qp, if (err) pr_err("Failed copying memory\n"); + if (bth_pad(&ack_pkt)) { + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + u8 *pad = payload_addr(&ack_pkt) + payload; + + memset(pad, 0, bth_pad(&ack_pkt)); + icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt)); + } p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); *p = ~icrc; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a1a035270cab..b273e421e910 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn) } } -static void -isert_wait4cmds(struct iscsi_conn *conn) -{ - isert_info("iscsi_conn %p\n", conn); - - if (conn->sess) { - target_sess_cmd_list_set_waiting(conn->sess->se_sess); - target_wait_for_sess_cmds(conn->sess->se_sess); - } -} - /** * isert_put_unsol_pending_cmds() - Drop commands waiting for * unsolicitate dataout @@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) ib_drain_qp(isert_conn->qp); isert_put_unsol_pending_cmds(conn); - isert_wait4cmds(conn); isert_wait4logout(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c index 62390e9e0023..8ad7da989a0e 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c @@ -63,7 +63,7 @@ struct vnic_stats { }; }; -#define VNIC_STAT(m) { FIELD_SIZEOF(struct opa_vnic_stats, m), \ +#define VNIC_STAT(m) { sizeof_field(struct opa_vnic_stats, m), \ offsetof(struct opa_vnic_stats, m) } static struct vnic_stats vnic_gstrings_stats[] = { diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index d7dd6fcf2db0..cb6e3a5f509c 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -224,13 +224,13 @@ static void __pass_event(struct evdev_client *client, */ client->tail = (client->head - 2) & (client->bufsize - 1); - client->buffer[client->tail].input_event_sec = - event->input_event_sec; - client->buffer[client->tail].input_event_usec = - event->input_event_usec; - client->buffer[client->tail].type = EV_SYN; - client->buffer[client->tail].code = SYN_DROPPED; - client->buffer[client->tail].value = 0; + client->buffer[client->tail] = (struct input_event) { + .input_event_sec = event->input_event_sec, + .input_event_usec = event->input_event_usec, + .type = EV_SYN, + .code = SYN_DROPPED, + .value = 0, + }; client->packet_head = client->tail; } @@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file) struct evdev_client *client; int error; - client = kzalloc(struct_size(client, buffer, bufsize), - GFP_KERNEL | __GFP_NOWARN); - if (!client) - client = vzalloc(struct_size(client, buffer, bufsize)); + client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL); if (!client) return -ENOMEM; diff --git a/drivers/input/input.c b/drivers/input/input.c index 55086279d044..ee6c3234df36 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -878,16 +878,18 @@ static int input_default_setkeycode(struct input_dev *dev, } } - __clear_bit(*old_keycode, dev->keybit); - __set_bit(ke->keycode, dev->keybit); - - for (i = 0; i < dev->keycodemax; i++) { - if (input_fetch_keycode(dev, i) == *old_keycode) { - __set_bit(*old_keycode, dev->keybit); - break; /* Setting the bit twice is useless, so break */ + if (*old_keycode <= KEY_MAX) { + __clear_bit(*old_keycode, dev->keybit); + for (i = 0; i < dev->keycodemax; i++) { + if (input_fetch_keycode(dev, i) == *old_keycode) { + __set_bit(*old_keycode, dev->keybit); + /* Setting the bit twice is useless, so break */ + break; + } } } + __set_bit(ke->keycode, dev->keybit); return 0; } @@ -943,9 +945,13 @@ int input_set_keycode(struct input_dev *dev, * Simulate keyup event if keycode is not present * in the keymap anymore */ - if (test_bit(EV_KEY, dev->evbit) && - !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && - __test_and_clear_bit(old_keycode, dev->key)) { + if (old_keycode > KEY_MAX) { + dev_warn(dev->dev.parent ?: &dev->dev, + "%s: got too big old keycode %#x\n", + __func__, old_keycode); + } else if (test_bit(EV_KEY, dev->evbit) && + !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && + __test_and_clear_bit(old_keycode, dev->key)) { struct input_value vals[] = { { EV_KEY, old_keycode, 0 }, input_value_sync diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c index 53799527dc75..9f809aeb785c 100644 --- a/drivers/input/keyboard/imx_sc_key.c +++ b/drivers/input/keyboard/imx_sc_key.c @@ -78,7 +78,13 @@ static void imx_sc_check_for_events(struct work_struct *work) return; } - state = (bool)msg.state; + /* + * The response data from SCU firmware is 4 bytes, + * but ONLY the first byte is the key state, other + * 3 bytes could be some dirty data, so we should + * ONLY take the first byte as key state. + */ + state = (bool)(msg.state & 0xff); if (state ^ priv->keystate) { priv->keystate = state; diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c index f7414091d94e..2fe9dcfe0a6f 100644 --- a/drivers/input/keyboard/pxa930_rotary.c +++ b/drivers/input/keyboard/pxa930_rotary.c @@ -107,7 +107,7 @@ static int pxa930_rotary_probe(struct platform_device *pdev) if (!r) return -ENOMEM; - r->mmio_base = ioremap_nocache(res->start, resource_size(res)); + r->mmio_base = ioremap(res->start, resource_size(res)); if (r->mmio_base == NULL) { dev_err(&pdev->dev, "failed to remap IO memory\n"); err = -ENXIO; diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 27ad73f43451..c155adebf96e 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c @@ -195,7 +195,7 @@ static int sh_keysc_probe(struct platform_device *pdev) memcpy(&priv->pdata, dev_get_platdata(&pdev->dev), sizeof(priv->pdata)); pdata = &priv->pdata; - priv->iomem_base = ioremap_nocache(res->start, resource_size(res)); + priv->iomem_base = ioremap(res->start, resource_size(res)); if (priv->iomem_base == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c index 83368f1e7c4e..4650f4a94989 100644 --- a/drivers/input/misc/keyspan_remote.c +++ b/drivers/input/misc/keyspan_remote.c @@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev) int retval = 0; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0); + 0x11, 0x40, 0x5601, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", __func__, retval); @@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x44, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x44, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", __func__, retval); @@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x22, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x22, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", __func__, retval); diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c index 4d875f2ac13d..ee55f22dbca5 100644 --- a/drivers/input/misc/max77650-onkey.c +++ b/drivers/input/misc/max77650-onkey.c @@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev) return input_register_device(onkey->input); } +static const struct of_device_id max77650_onkey_of_match[] = { + { .compatible = "maxim,max77650-onkey" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_onkey_of_match); + static struct platform_driver max77650_onkey_driver = { .driver = { .name = "max77650-onkey", + .of_match_table = max77650_onkey_of_match, }, .probe = max77650_onkey_probe, }; diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c index ecd762f93732..53ad25eaf1a2 100644 --- a/drivers/input/misc/pm8xxx-vibrator.c +++ b/drivers/input/misc/pm8xxx-vibrator.c @@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on) if (regs->enable_mask) rc = regmap_update_bits(vib->regmap, regs->enable_addr, - on ? regs->enable_mask : 0, val); + regs->enable_mask, on ? ~0 : 0); return rc; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index fd253781be71..f2593133e524 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -74,12 +74,16 @@ static int uinput_dev_event(struct input_dev *dev, struct uinput_device *udev = input_get_drvdata(dev); struct timespec64 ts; - udev->buff[udev->head].type = type; - udev->buff[udev->head].code = code; - udev->buff[udev->head].value = value; ktime_get_ts64(&ts); - udev->buff[udev->head].input_event_sec = ts.tv_sec; - udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC; + + udev->buff[udev->head] = (struct input_event) { + .input_event_sec = ts.tv_sec, + .input_event_usec = ts.tv_nsec / NSEC_PER_USEC, + .type = type, + .code = code, + .value = value, + }; + udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE; wake_up_interruptible(&udev->waitq); @@ -689,13 +693,14 @@ static ssize_t uinput_read(struct file *file, char __user *buffer, static __poll_t uinput_poll(struct file *file, poll_table *wait) { struct uinput_device *udev = file->private_data; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */ poll_wait(file, &udev->waitq, wait); if (udev->head != udev->tail) - return EPOLLIN | EPOLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; - return EPOLLOUT | EPOLLWRNORM; + return mask; } static int uinput_release(struct inode *inode, struct file *file) diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c index 41acde60b60f..3332b77eef2a 100644 --- a/drivers/input/mouse/pxa930_trkball.c +++ b/drivers/input/mouse/pxa930_trkball.c @@ -167,7 +167,7 @@ static int pxa930_trkball_probe(struct platform_device *pdev) goto failed; } - trkball->mmio_base = ioremap_nocache(res->start, resource_size(res)); + trkball->mmio_base = ioremap(res->start, resource_size(res)); if (!trkball->mmio_base) { dev_err(&pdev->dev, "failed to ioremap registers\n"); error = -ENXIO; diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index 0bc01cfc2b51..6b23e679606e 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -24,6 +24,12 @@ #define F54_NUM_TX_OFFSET 1 #define F54_NUM_RX_OFFSET 0 +/* + * The smbus protocol can read only 32 bytes max at a time. + * But this should be fine for i2c/spi as well. + */ +#define F54_REPORT_DATA_SIZE 32 + /* F54 commands */ #define F54_GET_REPORT 1 #define F54_FORCE_CAL 2 @@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work) int report_size; u8 command; int error; + int i; report_size = rmi_f54_get_report_size(f54); if (report_size == 0) { @@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work) rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n"); - fifo[0] = 0; - fifo[1] = 0; - error = rmi_write_block(fn->rmi_dev, - fn->fd.data_base_addr + F54_FIFO_OFFSET, - fifo, sizeof(fifo)); - if (error) { - dev_err(&fn->dev, "Failed to set fifo start offset\n"); - goto abort; - } + for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) { + int size = min(F54_REPORT_DATA_SIZE, report_size - i); + + fifo[0] = i & 0xff; + fifo[1] = i >> 8; + error = rmi_write_block(fn->rmi_dev, + fn->fd.data_base_addr + F54_FIFO_OFFSET, + fifo, sizeof(fifo)); + if (error) { + dev_err(&fn->dev, "Failed to set fifo start offset\n"); + goto abort; + } - error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr + - F54_REPORT_DATA_OFFSET, f54->report_data, - report_size); - if (error) { - dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n", - __func__, report_size, error); - goto abort; + error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr + + F54_REPORT_DATA_OFFSET, + f54->report_data + i, size); + if (error) { + dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n", + __func__, size, error); + goto abort; + } } abort: diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c index b313c579914f..2407ea43de59 100644 --- a/drivers/input/rmi4/rmi_smbus.c +++ b/drivers/input/rmi4/rmi_smbus.c @@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to write next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; + rmiaddr += SMB_MAX_COUNT; } exit: mutex_unlock(&rmi_smb->page_mutex); @@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to read next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; + rmiaddr += SMB_MAX_COUNT; } retval = 0; diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c index 96f9b5397367..2f9775de3c5b 100644 --- a/drivers/input/serio/gscps2.c +++ b/drivers/input/serio/gscps2.c @@ -349,7 +349,7 @@ static int __init gscps2_probe(struct parisc_device *dev) ps2port->port = serio; ps2port->padev = dev; - ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4); + ps2port->addr = ioremap(hpa, GSC_STATUS + 4); spin_lock_init(&ps2port->lock); gscps2_reset(ps2port); diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 2ca586fb914f..e08b0ef078e8 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) aiptek->inputdev = inputdev; aiptek->intf = intf; - aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber; + aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; aiptek->inDelay = 0; aiptek->endDelay = 0; aiptek->previousJitterable = 0; @@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); /* Verify that a device really has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "interface has %d endpoints, but must have minimum 1\n", - intf->altsetting[0].desc.bNumEndpoints); + intf->cur_altsetting->desc.bNumEndpoints); err = -EINVAL; goto fail3; } - endpoint = &intf->altsetting[0].endpoint[0].desc; + endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index 35031228a6d0..96d65575f75a 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c @@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface, } /* Sanity check that a device has an endpoint */ - if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { + if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&usbinterface->dev, "Invalid number of endpoints\n"); error = -EINVAL; goto err_free_urb; } - /* - * The endpoint is always altsetting 0, we know this since we know - * this device only has one interrupt endpoint - */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; + endpoint = &usbinterface->cur_altsetting->endpoint[0].desc; /* Some debug */ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); @@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface, if (usb_endpoint_xfer_int(endpoint)) dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n"); - dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen); + dev_dbg(&usbinterface->dev, "interface extra len:%d\n", + usbinterface->cur_altsetting->extralen); /* * Find the HID descriptor so we can find out the size of the @@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface, input_dev->dev.parent = &usbinterface->dev; /* Setup the URB, it will be posted later on open of input device */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; - usb_fill_int_urb(gtco->urbinfo, udev, usb_rcvintpipe(udev, diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c index a1f3a0cb197e..38f087404f7a 100644 --- a/drivers/input/tablet/pegasus_notetaker.c +++ b/drivers/input/tablet/pegasus_notetaker.c @@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf, return -ENODEV; /* Sanity check that the device has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "Invalid number of endpoints\n"); return -EINVAL; } diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 0af0fe8c40d7..742a7e96c1b5 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device *hwmon; + struct thermal_zone_device *thermal; int error; u32 reg; bool ts_attached; @@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev) if (IS_ERR(hwmon)) return PTR_ERR(hwmon); - devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops); + thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, + &sun4i_ts_tz_ops); + if (IS_ERR(thermal)) + return PTR_ERR(thermal); writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC); diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index 1dd47dda71cd..34d31c7ec8ba 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface, int error; /* Check if we really have the right interface. */ - iface_desc = &interface->altsetting[0]; + iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index c49afbea3458..2f9304d1db49 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -6,13 +6,13 @@ config INTERCONNECT_QCOM Support for Qualcomm's Network-on-Chip interconnect hardware. config INTERCONNECT_QCOM_MSM8974 - tristate "Qualcomm MSM8974 interconnect driver" - depends on INTERCONNECT_QCOM - depends on QCOM_SMD_RPM - select INTERCONNECT_QCOM_SMD_RPM - help - This is a driver for the Qualcomm Network-on-Chip on msm8974-based - platforms. + tristate "Qualcomm MSM8974 interconnect driver" + depends on INTERCONNECT_QCOM + depends on QCOM_SMD_RPM + select INTERCONNECT_QCOM_SMD_RPM + help + This is a driver for the Qualcomm Network-on-Chip on msm8974-based + platforms. config INTERCONNECT_QCOM_QCS404 tristate "Qualcomm QCS404 interconnect driver" diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c index ce599a0c83d9..bf8bd1aee358 100644 --- a/drivers/interconnect/qcom/msm8974.c +++ b/drivers/interconnect/qcom/msm8974.c @@ -652,7 +652,7 @@ static int msm8974_icc_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct icc_onecell_data *data; struct icc_provider *provider; - struct icc_node *node; + struct icc_node *node, *tmp; size_t num_nodes, i; int ret; @@ -732,7 +732,7 @@ static int msm8974_icc_probe(struct platform_device *pdev) return 0; err_del_icc: - list_for_each_entry(node, &provider->nodes, node_list) { + list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) { icc_node_del(node); icc_node_destroy(node->id); } @@ -748,9 +748,9 @@ static int msm8974_icc_remove(struct platform_device *pdev) { struct msm8974_icc_provider *qp = platform_get_drvdata(pdev); struct icc_provider *provider = &qp->provider; - struct icc_node *n; + struct icc_node *n, *tmp; - list_for_each_entry(n, &provider->nodes, node_list) { + list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { icc_node_del(n); icc_node_destroy(n->id); } diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c index b4966d8f3348..8e0735a87040 100644 --- a/drivers/interconnect/qcom/qcs404.c +++ b/drivers/interconnect/qcom/qcs404.c @@ -414,7 +414,7 @@ static int qnoc_probe(struct platform_device *pdev) struct icc_provider *provider; struct qcom_icc_node **qnodes; struct qcom_icc_provider *qp; - struct icc_node *node; + struct icc_node *node, *tmp; size_t num_nodes, i; int ret; @@ -494,7 +494,7 @@ static int qnoc_probe(struct platform_device *pdev) return 0; err: - list_for_each_entry(node, &provider->nodes, node_list) { + list_for_each_entry_safe(node, tmp, &provider->nodes, node_list) { icc_node_del(node); icc_node_destroy(node->id); } @@ -508,9 +508,9 @@ static int qnoc_remove(struct platform_device *pdev) { struct qcom_icc_provider *qp = platform_get_drvdata(pdev); struct icc_provider *provider = &qp->provider; - struct icc_node *n; + struct icc_node *n, *tmp; - list_for_each_entry(n, &provider->nodes, node_list) { + list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { icc_node_del(n); icc_node_destroy(n->id); } diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c index 502a6c22b41e..387267ee9648 100644 --- a/drivers/interconnect/qcom/sdm845.c +++ b/drivers/interconnect/qcom/sdm845.c @@ -868,9 +868,9 @@ static int qnoc_remove(struct platform_device *pdev) { struct qcom_icc_provider *qp = platform_get_drvdata(pdev); struct icc_provider *provider = &qp->provider; - struct icc_node *n; + struct icc_node *n, *tmp; - list_for_each_entry(n, &provider->nodes, node_list) { + list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { icc_node_del(n); icc_node_destroy(n->id); } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 568c52317757..823cc4ef51fd 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -440,7 +440,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) return NULL; } - return (u8 __iomem *)ioremap_nocache(address, end); + return (u8 __iomem *)ioremap(address, end); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) @@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, static void init_iommu_perf_ctr(struct amd_iommu *iommu) { struct pci_dev *pdev = iommu->dev; - u64 val = 0xabcd, val2 = 0; + u64 val = 0xabcd, val2 = 0, save_reg = 0; if (!iommu_feature(iommu, FEATURE_PC)) return; amd_iommu_pc_present = true; + /* save the value to restore, if writable */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) + goto pc_false; + /* Check if the performance counters can be written to */ if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || - (val != val2)) { - pci_err(pdev, "Unable to write to IOMMU perf counter.\n"); - amd_iommu_pc_present = false; - return; - } + (val != val2)) + goto pc_false; + + /* restore */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) + goto pc_false; pci_info(pdev, "IOMMU performance counters supported\n"); val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); iommu->max_banks = (u8) ((val >> 12) & 0x3f); iommu->max_counters = (u8) ((val >> 7) & 0xf); + + return; + +pc_false: + pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; + return; } static ssize_t amd_iommu_show_cap(struct device *dev, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 0cc702a70a96..a2e96a5fd9a7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -19,6 +19,7 @@ #include <linux/iova.h> #include <linux/irq.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> @@ -44,7 +45,6 @@ struct iommu_dma_cookie { dma_addr_t msi_iova; }; struct list_head msi_page_list; - spinlock_t msi_lock; /* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; @@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { - spin_lock_init(&cookie->msi_lock); INIT_LIST_HEAD(&cookie->msi_page_list); cookie->type = type; } @@ -399,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, - size_t size, dma_addr_t dma_limit, struct device *dev) + size_t size, u64 dma_limit, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; @@ -424,7 +423,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); if (domain->geometry.force_aperture) - dma_limit = min(dma_limit, domain->geometry.aperture_end); + dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); /* Try to get PCI devices a SAC address */ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) @@ -477,7 +476,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot, dma_addr_t dma_mask) + size_t size, int prot, u64 dma_mask) { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (msi_page->phys == msi_addr) return msi_page; - msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); + msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); if (!msi_page) return NULL; @@ -1204,25 +1203,22 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { struct device *dev = msi_desc_to_dev(desc); struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_dma_cookie *cookie; struct iommu_dma_msi_page *msi_page; - unsigned long flags; + static DEFINE_MUTEX(msi_prepare_lock); /* see below */ if (!domain || !domain->iova_cookie) { desc->iommu_cookie = NULL; return 0; } - cookie = domain->iova_cookie; - /* - * We disable IRQs to rule out a possible inversion against - * irq_desc_lock if, say, someone tries to retarget the affinity - * of an MSI from within an IPI handler. + * In fact the whole prepare operation should already be serialised by + * irq_domain_mutex further up the callchain, but that's pretty subtle + * on its own, so consider this locking as failsafe documentation... */ - spin_lock_irqsave(&cookie->msi_lock, flags); + mutex_lock(&msi_prepare_lock); msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); - spin_unlock_irqrestore(&cookie->msi_lock, flags); + mutex_unlock(&msi_prepare_lock); msi_desc_set_iommu_cookie(desc, msi_page); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0c8d81f56a30..932267f49f9a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; - if (info) + if (info && info != DEFER_DEVICE_DOMAIN_INFO + && info != DUMMY_DEVICE_DOMAIN_INFO) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -5478,9 +5479,6 @@ static int intel_iommu_map(struct iommu_domain *domain, int prot = 0; int ret; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return -EINVAL; - if (iommu_prot & IOMMU_READ) prot |= DMA_PTE_READ; if (iommu_prot & IOMMU_WRITE) @@ -5523,8 +5521,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, /* Cope with horrid API which requires us to unmap more than the size argument if it happens to be a large-page mapping. */ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)); - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) size = VTD_PAGE_SIZE << level_to_offset_bits(level); @@ -5556,9 +5552,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, int level = 0; u64 phys = 0; - if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN) - return 0; - pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); if (pte) phys = dma_pte_addr(pte); @@ -5632,8 +5625,10 @@ static int intel_iommu_add_device(struct device *dev) group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto unlink; + } iommu_group_put(group); @@ -5659,7 +5654,8 @@ static int intel_iommu_add_device(struct device *dev) if (!get_private_domain_for_dev(dev)) { dev_warn(dev, "Failed to get a private domain.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto unlink; } dev_info(dev, @@ -5674,6 +5670,10 @@ static int intel_iommu_add_device(struct device *dev) } return 0; + +unlink: + iommu_device_unlink(&iommu->iommu, dev); + return ret; } static void intel_iommu_remove_device(struct device *dev) @@ -5736,8 +5736,8 @@ static void intel_iommu_get_resv_regions(struct device *device, struct pci_dev *pdev = to_pci_dev(device); if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { - reg = iommu_alloc_resv_region(0, 1UL << 24, 0, - IOMMU_RESV_DIRECT); + reg = iommu_alloc_resv_region(0, 1UL << 24, prot, + IOMMU_RESV_DIRECT_RELAXABLE); if (reg) list_add_tail(®->list, head); } @@ -5825,6 +5825,13 @@ static void intel_iommu_apply_resv_region(struct device *dev, WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); } +static struct iommu_group *intel_iommu_device_group(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_device_group(dev); + return generic_device_group(dev); +} + #ifdef CONFIG_INTEL_IOMMU_SVM struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) { @@ -5997,7 +6004,7 @@ const struct iommu_ops intel_iommu_ops = { .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = intel_iommu_put_resv_regions, .apply_resv_region = intel_iommu_apply_resv_region, - .device_group = pci_device_group, + .device_group = intel_iommu_device_group, .dev_has_feat = intel_iommu_dev_has_feat, .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 9b159132405d..dca88f9fdf29 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -104,11 +104,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d { struct qi_desc desc; - /* - * Do PASID granu IOTLB invalidation if page selective capability is - * not available. - */ - if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { + if (pages == -1) { desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index db7bfd4f2d20..3ead597e1c57 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -312,8 +312,8 @@ int iommu_insert_resv_region(struct iommu_resv_region *new, list_for_each_entry_safe(iter, tmp, regions, list) { phys_addr_t top_end, iter_end = iter->start + iter->length - 1; - /* no merge needed on elements of different types than @nr */ - if (iter->type != nr->type) { + /* no merge needed on elements of different types than @new */ + if (iter->type != new->type) { list_move_tail(&iter->list, &stack); continue; } @@ -751,6 +751,7 @@ err_put_group: mutex_unlock(&group->mutex); dev->iommu_group = NULL; kobject_put(group->devices_kobj); + sysfs_remove_link(group->devices_kobj, device->name); err_free_name: kfree(device->name); err_remove_link: @@ -2282,13 +2283,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type) goto out; } - iommu_group_create_direct_mappings(group, dev); - /* Make the domain the default for this group */ if (group->default_domain) iommu_domain_free(group->default_domain); group->default_domain = domain; + iommu_group_create_direct_mappings(group, dev); + dev_info(dev, "Using iommu %s mapping\n", type == IOMMU_DOMAIN_DMA ? "dma" : "direct"); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 41c605b0058f..c7a914b9bbbc 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -233,7 +233,7 @@ static DEFINE_MUTEX(iova_cache_mutex); struct iova *alloc_iova_mem(void) { - return kmem_cache_alloc(iova_cache, GFP_ATOMIC); + return kmem_cache_zalloc(iova_cache, GFP_ATOMIC); } EXPORT_SYMBOL(alloc_iova_mem); diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index d246d74ec3a5..23445ebfda5c 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -298,7 +298,7 @@ static int tpci200_register(struct tpci200_board *tpci200) /* Map internal tpci200 driver user space */ tpci200->info->interface_regs = - ioremap_nocache(pci_resource_start(tpci200->info->pdev, + ioremap(pci_resource_start(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR), TPCI200_IFACE_SIZE); if (!tpci200->info->interface_regs) { @@ -541,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, ret = -EBUSY; goto out_err_pci_request; } - tpci200->info->cfg_regs = ioremap_nocache( + tpci200->info->cfg_regs = ioremap( pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), pci_resource_len(pdev, TPCI200_CFG_MEM_BAR)); if (!tpci200->info->cfg_regs) { diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c index 9c2a4b5d30cf..d480a514c983 100644 --- a/drivers/ipack/devices/ipoctal.c +++ b/drivers/ipack/devices/ipoctal.c @@ -276,7 +276,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, ipoctal->board_id = ipoctal->dev->id_device; region = &ipoctal->dev->region[IPACK_IO_SPACE]; - addr = devm_ioremap_nocache(&ipoctal->dev->dev, + addr = devm_ioremap(&ipoctal->dev->dev, region->start, region->size); if (!addr) { dev_err(&ipoctal->dev->dev, @@ -292,7 +292,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, region = &ipoctal->dev->region[IPACK_INT_SPACE]; ipoctal->int_space = - devm_ioremap_nocache(&ipoctal->dev->dev, + devm_ioremap(&ipoctal->dev->dev, region->start, region->size); if (!ipoctal->int_space) { dev_err(&ipoctal->dev->dev, @@ -303,7 +303,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, region = &ipoctal->dev->region[IPACK_MEM8_SPACE]; ipoctal->mem8_space = - devm_ioremap_nocache(&ipoctal->dev->dev, + devm_ioremap(&ipoctal->dev->dev, region->start, 0x8000); if (!ipoctal->mem8_space) { dev_err(&ipoctal->dev->dev, diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 697e6a8ccaae..1006c694d9fb 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -457,6 +457,12 @@ config IMX_IRQSTEER help Support for the i.MX IRQSTEER interrupt multiplexer/remapper. +config IMX_INTMUX + def_bool y if ARCH_MXC + select IRQ_DOMAIN + help + Support for the i.MX INTMUX interrupt multiplexer. + config LS1X_IRQ bool "Loongson-1 Interrupt Controller" depends on MACH_LOONGSON32 @@ -490,6 +496,7 @@ config TI_SCI_INTA_IRQCHIP config SIFIVE_PLIC bool "SiFive Platform-Level Interrupt Controller" depends on RISCV + select IRQ_DOMAIN_HIERARCHY help This enables support for the PLIC chip found in SiFive (and potentially other) RISC-V systems. The PLIC controls devices @@ -499,4 +506,11 @@ config SIFIVE_PLIC If you don't know what to do here, say Y. +config EXYNOS_IRQ_COMBINER + bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST + depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST + help + Say yes here to add support for the IRQ combiner devices embedded + in Samsung Exynos chips. + endmenu diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index e806dda690ea..eae0d78cbf22 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o -obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o +obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o @@ -87,7 +87,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o -obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o +obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o @@ -100,6 +100,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o +obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o obj-$(CONFIG_MADERA_IRQ) += irq-madera.o obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c new file mode 100644 index 000000000000..c90a3346b985 --- /dev/null +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller + * Copyright 2019 IBM Corporation + * + * Eddie James <eajames@linux.ibm.com> + */ + +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> + +#define ASPEED_SCU_IC_REG 0x018 +#define ASPEED_SCU_IC_SHIFT 0 +#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT) +#define ASPEED_SCU_IC_NUM_IRQS 7 +#define ASPEED_SCU_IC_STATUS_SHIFT 16 + +#define ASPEED_AST2600_SCU_IC0_REG 0x560 +#define ASPEED_AST2600_SCU_IC0_SHIFT 0 +#define ASPEED_AST2600_SCU_IC0_ENABLE \ + GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT) +#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6 + +#define ASPEED_AST2600_SCU_IC1_REG 0x570 +#define ASPEED_AST2600_SCU_IC1_SHIFT 4 +#define ASPEED_AST2600_SCU_IC1_ENABLE \ + GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT) +#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2 + +struct aspeed_scu_ic { + unsigned long irq_enable; + unsigned long irq_shift; + unsigned int num_irqs; + unsigned int reg; + struct regmap *scu; + struct irq_domain *irq_domain; +}; + +static void aspeed_scu_ic_irq_handler(struct irq_desc *desc) +{ + unsigned int irq; + unsigned int sts; + unsigned long bit; + unsigned long enabled; + unsigned long max; + unsigned long status; + struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT; + + chained_irq_enter(chip, desc); + + /* + * The SCU IC has just one register to control its operation and read + * status. The interrupt enable bits occupy the lower 16 bits of the + * register, while the interrupt status bits occupy the upper 16 bits. + * The status bit for a given interrupt is always 16 bits shifted from + * the enable bit for the same interrupt. + * Therefore, perform the IRQ operations in the enable bit space by + * shifting the status down to get the mapping and then back up to + * clear the bit. + */ + regmap_read(scu_ic->scu, scu_ic->reg, &sts); + enabled = sts & scu_ic->irq_enable; + status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled; + + bit = scu_ic->irq_shift; + max = scu_ic->num_irqs + bit; + + for_each_set_bit_from(bit, &status, max) { + irq = irq_find_mapping(scu_ic->irq_domain, + bit - scu_ic->irq_shift); + generic_handle_irq(irq); + + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, + BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT)); + } + + chained_irq_exit(chip, desc); +} + +static void aspeed_scu_ic_irq_mask(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) | + (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the mask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0); +} + +static void aspeed_scu_ic_irq_unmask(struct irq_data *data) +{ + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); + unsigned int mask = bit | + (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT); + + /* + * Status bits are cleared by writing 1. In order to prevent the unmask + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ + regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit); +} + +static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data, + const struct cpumask *dest, + bool force) +{ + return -EINVAL; +} + +static struct irq_chip aspeed_scu_ic_chip = { + .name = "aspeed-scu-ic", + .irq_mask = aspeed_scu_ic_irq_mask, + .irq_unmask = aspeed_scu_ic_irq_unmask, + .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, +}; + +static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops aspeed_scu_ic_domain_ops = { + .map = aspeed_scu_ic_map, +}; + +static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, + struct device_node *node) +{ + int irq; + int rc = 0; + + if (!node->parent) { + rc = -ENODEV; + goto err; + } + + scu_ic->scu = syscon_node_to_regmap(node->parent); + if (IS_ERR(scu_ic->scu)) { + rc = PTR_ERR(scu_ic->scu); + goto err; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq < 0) { + rc = irq; + goto err; + } + + scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs, + &aspeed_scu_ic_domain_ops, + scu_ic); + if (!scu_ic->irq_domain) { + rc = -ENOMEM; + goto err; + } + + irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler, + scu_ic); + + return 0; + +err: + kfree(scu_ic); + + return rc; +} + +static int __init aspeed_scu_ic_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE; + scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT; + scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS; + scu_ic->reg = ASPEED_SCU_IC_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE; + scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT; + scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS; + scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node, + struct device_node *parent) +{ + struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + + if (!scu_ic) + return -ENOMEM; + + scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE; + scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT; + scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS; + scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG; + + return aspeed_scu_ic_of_init_common(scu_ic, node); +} + +IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", + aspeed_ast2600_scu_ic0_of_init); +IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", + aspeed_ast2600_scu_ic1_of_init); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e05673bcd52b..f71758632f8d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -106,6 +106,7 @@ struct its_node { u64 typer; u64 cbaser_save; u32 ctlr_save; + u32 mpidr; struct list_head its_device_list; u64 flags; unsigned long list_nr; @@ -116,12 +117,22 @@ struct its_node { }; #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) #define ITS_ITT_ALIGN SZ_256 /* The maximum number of VPEID bits supported by VLPI commands */ -#define ITS_MAX_VPEID_BITS (16) +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) /* Convert page order to size in bytes */ @@ -216,11 +227,27 @@ static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, return &its_dev->event_map.vlpi_maps[event]; } -static struct its_collection *irq_to_col(struct irq_data *d) +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int irq_to_cpuid(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map = get_vlpi_map(d); - return dev_event_to_col(its_dev, its_get_event_id(d)); + if (map) + return map->vpe->col_idx; + + return its_dev->event_map.col_map[its_get_event_id(d)]; } static struct its_collection *valid_col(struct its_collection *col) @@ -322,6 +349,10 @@ struct its_cmd_desc { u16 seq_num; u16 its_list; } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; }; }; @@ -438,6 +469,38 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); } +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + static inline void its_fixup_cmd(struct its_cmd_block *cmd) { /* Let's fixup BE commands */ @@ -621,19 +684,45 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, struct its_cmd_block *cmd, struct its_cmd_desc *desc) { - unsigned long vpt_addr; + unsigned long vpt_addr, vconf_addr; u64 target; - - vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); - target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + bool alloc; its_encode_cmd(cmd, GITS_CMD_VMAPP); its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_target(cmd, target); its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_size(cmd, LPI_NRBITS - 1); + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* We can only signal PTZ when alloc==1. Why do we have two bits? */ + its_encode_ptz(cmd, alloc); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vmapp_cmd.vpe); @@ -645,7 +734,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, { u32 db; - if (desc->its_vmapti_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -668,7 +757,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, { u32 db; - if (desc->its_vmovi_cmd.db_enabled) + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; else db = 1023; @@ -698,6 +787,11 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); its_encode_target(cmd, target); + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + its_fixup_cmd(cmd); return valid_vpe(its, desc->its_vmovp_cmd.vpe); @@ -757,6 +851,21 @@ static struct its_vpe *its_build_vclear_cmd(struct its_node *its, return valid_vpe(its, map->vpe); } +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + static u64 its_cmd_ptr_to_offset(struct its_node *its, struct its_cmd_block *ptr) { @@ -1165,20 +1274,17 @@ static void its_send_vclear(struct its_device *dev, u32 event_id) its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); } -/* - * irqchip functions - assumes MSI, mostly. - */ -static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) { - struct its_device *its_dev = irq_data_get_irq_chip_data(d); - u32 event = its_get_event_id(d); - - if (!irqd_is_forwarded_to_vcpu(d)) - return NULL; + struct its_cmd_desc desc; - return dev_event_to_vlpi_map(its_dev, event); + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); } +/* + * irqchip functions - assumes MSI, mostly. + */ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) { struct its_vlpi_map *map = get_vlpi_map(d); @@ -1221,13 +1327,25 @@ static void wait_for_syncr(void __iomem *rdbase) static void direct_lpi_inv(struct irq_data *d) { - struct its_collection *col; + struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } /* Target the redistributor this LPI is currently routed to */ - col = irq_to_col(d); - rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base; - gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR); + rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); wait_for_syncr(rdbase); } @@ -1237,7 +1355,8 @@ static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) struct its_device *its_dev = irq_data_get_irq_chip_data(d); lpi_write_config(d, clr, set); - if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d)) + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) direct_lpi_inv(d); else if (!irqd_is_forwarded_to_vcpu(d)) its_send_inv(its_dev, its_get_event_id(d)); @@ -1251,6 +1370,13 @@ static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) u32 event = its_get_event_id(d); struct its_vlpi_map *map; + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; + map = dev_event_to_vlpi_map(its_dev, event); if (map->db_enabled == enable) @@ -2090,6 +2216,65 @@ static bool its_parse_indirect_baser(struct its_node *its, return indirect; } +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + static void its_free_tables(struct its_node *its) { int i; @@ -2132,6 +2317,17 @@ static int its_alloc_tables(struct its_node *its) break; case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + if ((sibling = find_sibling_its(its))) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + indirect = its_parse_indirect_baser(its, baser, psz, &order, ITS_MAX_VPEID_BITS); @@ -2153,6 +2349,220 @@ static int its_alloc_tables(struct its_node *its) return 0; } +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + u32 tmp; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + tmp = compute_common_aff(val); + if (tmp != aff) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gits_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + /* fall through */ + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_page = page; + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + static int its_alloc_collections(struct its_node *its) { int i; @@ -2244,7 +2654,7 @@ static int __init allocate_lpi_tables(void) return 0; } -static u64 its_clear_vpend_valid(void __iomem *vlpi_base) +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) { u32 count = 1000000; /* 1s! */ bool clean; @@ -2252,6 +2662,8 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); do { @@ -2264,6 +2676,11 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) } } while (!clean && count); + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + val |= GICR_VPENDBASER_PendingLast; + } + return val; } @@ -2352,7 +2769,7 @@ static void its_cpu_init_lpis(void) val |= GICR_CTLR_ENABLE_LPIS; writel_relaxed(val, rbase + GICR_CTLR); - if (gic_rdists->has_vlpis) { + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); /* @@ -2372,10 +2789,20 @@ static void its_cpu_init_lpis(void) * ancient programming gets left in and has possibility of * corrupting memory. */ - val = its_clear_vpend_valid(vlpi_base); + val = its_clear_vpend_valid(vlpi_base, 0, 0); WARN_ON(val & GICR_VPENDBASER_Dirty); } + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + /* Make sure the GIC has seen the above */ dsb(sy); out: @@ -2859,7 +3286,7 @@ static const struct irq_domain_ops its_domain_ops = { /* * This is insane. * - * If a GICv4 doesn't implement Direct LPIs (which is extremely + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely * likely), the only way to perform an invalidate is to use a fake * device to issue an INV command, implying that the LPI has first * been mapped to some event on that device. Since this is not exactly @@ -2867,9 +3294,20 @@ static const struct irq_domain_ops its_domain_ops = { * only issue an UNMAP if we're short on available slots. * * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). */ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already unmapped? */ if (vpe->vpe_proxy_event == -1) return; @@ -2892,6 +3330,10 @@ static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (!gic_rdists->has_direct_lpi) { unsigned long flags; @@ -2903,6 +3345,10 @@ static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) { + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + /* Already mapped? */ if (vpe->vpe_proxy_event != -1) return; @@ -2925,6 +3371,10 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) unsigned long flags; struct its_collection *target_col; + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + if (gic_rdists->has_direct_lpi) { void __iomem *rdbase; @@ -2951,7 +3401,7 @@ static int its_vpe_set_affinity(struct irq_data *d, bool force) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - int cpu = cpumask_first(mask_val); + int from, cpu = cpumask_first(mask_val); /* * Changing affinity is mega expensive, so let's be as lazy as @@ -2959,14 +3409,24 @@ static int its_vpe_set_affinity(struct irq_data *d, * into the proxy device, we need to move the doorbell * interrupt to its new location. */ - if (vpe->col_idx != cpu) { - int from = vpe->col_idx; + if (vpe->col_idx == cpu) + goto out; - vpe->col_idx = cpu; - its_send_vmovp(vpe); - its_vpe_db_proxy_move(vpe, from, cpu); - } + from = vpe->col_idx; + vpe->col_idx = cpu; + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK_DONE; @@ -3009,16 +3469,10 @@ static void its_vpe_deschedule(struct its_vpe *vpe) void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); u64 val; - val = its_clear_vpend_valid(vlpi_base); + val = its_clear_vpend_valid(vlpi_base, 0, 0); - if (unlikely(val & GICR_VPENDBASER_Dirty)) { - pr_err_ratelimited("ITS virtual pending table not cleaning\n"); - vpe->idai = false; - vpe->pending_last = true; - } else { - vpe->idai = !!(val & GICR_VPENDBASER_IDAI); - vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); - } + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); } static void its_vpe_invall(struct its_vpe *vpe) @@ -3151,6 +3605,139 @@ static struct irq_chip its_vpe_irq_chip = { .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its = NULL; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + */ + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + u64 val; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + static int its_vpe_id_alloc(void) { return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); @@ -3186,7 +3773,10 @@ static int its_vpe_init(struct its_vpe *vpe) vpe->vpe_id = vpe_id; vpe->vpt_page = vpt_page; - vpe->vpe_proxy_event = -1; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; return 0; } @@ -3228,6 +3818,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { + struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; struct page *vprop_page; @@ -3255,6 +3846,9 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->nr_db_lpis = nr_ids; vm->vprop_page = vprop_page; + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + for (i = 0; i < nr_irqs; i++) { vm->vpes[i]->vpe_db_lpi = base + i; err = its_vpe_init(vm->vpes[i]); @@ -3265,7 +3859,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (err) break; irq_domain_set_hwirq_and_chip(domain, virq + i, i, - &its_vpe_irq_chip, vm->vpes[i]); + irqchip, vm->vpes[i]); set_bit(i, bitmap); } @@ -3778,6 +4372,14 @@ static int __init its_probe_one(struct resource *res, } else { pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer); + its->mpidr = readl_relaxed(its_base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &res->start, its->mpidr, svpet); + } } its->numa_node = numa_node; @@ -4138,6 +4740,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, bool has_v4 = false; int err; + gic_rdists = rdists; + its_parent = parent_domain; of_node = to_of_node(handle); if (of_node) @@ -4150,8 +4754,6 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, return -ENXIO; } - gic_rdists = rdists; - err = allocate_lpi_tables(); if (err) return err; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index d6218012097b..286f98222878 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -858,8 +858,21 @@ static int __gic_update_rdist_properties(struct redist_region *region, void __iomem *ptr) { u64 typer = gic_read_typer(ptr + GICR_TYPER); + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); - gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); + + /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + gic_data.rdists.has_rvpeid); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); return 1; @@ -872,9 +885,10 @@ static void gic_update_rdist_properties(void) if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) gic_data.ppi_nr = 0; pr_info("%d PPIs implemented\n", gic_data.ppi_nr); - pr_info("%sVLPI support, %sdirect LPI support\n", + pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n", !gic_data.rdists.has_vlpis ? "no " : "", - !gic_data.rdists.has_direct_lpi ? "no " : ""); + !gic_data.rdists.has_direct_lpi ? "no " : "", + !gic_data.rdists.has_rvpeid ? "no " : ""); } /* Check whether it's single security state view */ @@ -1562,10 +1576,14 @@ static int __init gic_init_bases(void __iomem *dist_base, pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, &gic_data); irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + gic_data.rdists.has_rvpeid = true; gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c new file mode 100644 index 000000000000..c27577c81126 --- /dev/null +++ b/drivers/irqchip/irq-imx-intmux.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2017 NXP + +/* INTMUX Block Diagram + * + * ________________ + * interrupt source # 0 +---->| | + * | | | + * interrupt source # 1 +++-->| | + * ... | | | channel # 0 |--------->interrupt out # 0 + * ... | | | | + * ... | | | | + * interrupt source # X-1 +++-->|________________| + * | | | + * | | | + * | | | ________________ + * +---->| | + * | | | | | + * | +-->| | + * | | | | channel # 1 |--------->interrupt out # 1 + * | | +>| | + * | | | | | + * | | | |________________| + * | | | + * | | | + * | | | ... + * | | | ... + * | | | + * | | | ________________ + * +---->| | + * | | | | + * +-->| | + * | | channel # N |--------->interrupt out # N + * +>| | + * | | + * |________________| + * + * + * N: Interrupt Channel Instance Number (N=7) + * X: Interrupt Source Number for each channel (X=32) + * + * The INTMUX interrupt multiplexer has 8 channels, each channel receives 32 + * interrupt sources and generates 1 interrupt output. + * + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/spinlock.h> + +#define CHANIER(n) (0x10 + (0x40 * n)) +#define CHANIPR(n) (0x20 + (0x40 * n)) + +#define CHAN_MAX_NUM 0x8 + +struct intmux_irqchip_data { + int chanidx; + int irq; + struct irq_domain *domain; +}; + +struct intmux_data { + raw_spinlock_t lock; + void __iomem *regs; + struct clk *ipg_clk; + int channum; + struct intmux_irqchip_data irqchip_data[]; +}; + +static void imx_intmux_irq_mask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* disable the interrupt source of this channel */ + val &= ~BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static void imx_intmux_irq_unmask(struct irq_data *d) +{ + struct intmux_irqchip_data *irqchip_data = d->chip_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long flags; + void __iomem *reg; + u32 val; + + raw_spin_lock_irqsave(&data->lock, flags); + reg = data->regs + CHANIER(idx); + val = readl_relaxed(reg); + /* enable the interrupt source of this channel */ + val |= BIT(d->hwirq); + writel_relaxed(val, reg); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static struct irq_chip imx_intmux_irq_chip = { + .name = "intmux", + .irq_mask = imx_intmux_irq_mask, + .irq_unmask = imx_intmux_irq_unmask, +}; + +static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, h->host_data); + irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq); + + return 0; +} + +static int imx_intmux_irq_xlate(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + + /* + * two cells needed in interrupt specifier: + * the 1st cell: hw interrupt number + * the 2nd cell: channel index + */ + if (WARN_ON(intsize != 2)) + return -EINVAL; + + if (WARN_ON(intspec[1] >= data->channum)) + return -EINVAL; + + *out_hwirq = intspec[0]; + *out_type = IRQ_TYPE_LEVEL_HIGH; + + return 0; +} + +static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + struct intmux_irqchip_data *irqchip_data = d->host_data; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return false; + + return irqchip_data->chanidx == fwspec->param[1]; +} + +static const struct irq_domain_ops imx_intmux_domain_ops = { + .map = imx_intmux_irq_map, + .xlate = imx_intmux_irq_xlate, + .select = imx_intmux_irq_select, +}; + +static void imx_intmux_irq_handler(struct irq_desc *desc) +{ + struct intmux_irqchip_data *irqchip_data = irq_desc_get_handler_data(desc); + int idx = irqchip_data->chanidx; + struct intmux_data *data = container_of(irqchip_data, struct intmux_data, + irqchip_data[idx]); + unsigned long irqstat; + int pos, virq; + + chained_irq_enter(irq_desc_get_chip(desc), desc); + + /* read the interrupt source pending status of this channel */ + irqstat = readl_relaxed(data->regs + CHANIPR(idx)); + + for_each_set_bit(pos, &irqstat, 32) { + virq = irq_find_mapping(irqchip_data->domain, pos); + if (virq) + generic_handle_irq(virq); + } + + chained_irq_exit(irq_desc_get_chip(desc), desc); +} + +static int imx_intmux_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct irq_domain *domain; + struct intmux_data *data; + int channum; + int i, ret; + + channum = platform_irq_count(pdev); + if (channum == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (channum > CHAN_MAX_NUM) { + dev_err(&pdev->dev, "supports up to %d multiplex channels\n", + CHAN_MAX_NUM); + return -EINVAL; + } + + data = devm_kzalloc(&pdev->dev, sizeof(*data) + + channum * sizeof(data->irqchip_data[0]), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->regs)) { + dev_err(&pdev->dev, "failed to initialize reg\n"); + return PTR_ERR(data->regs); + } + + data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(data->ipg_clk)) { + ret = PTR_ERR(data->ipg_clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); + return ret; + } + + data->channum = channum; + raw_spin_lock_init(&data->lock); + + ret = clk_prepare_enable(data->ipg_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); + return ret; + } + + for (i = 0; i < channum; i++) { + data->irqchip_data[i].chanidx = i; + + data->irqchip_data[i].irq = irq_of_parse_and_map(np, i); + if (data->irqchip_data[i].irq <= 0) { + ret = -EINVAL; + dev_err(&pdev->dev, "failed to get irq\n"); + goto out; + } + + domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, + &data->irqchip_data[i]); + if (!domain) { + ret = -ENOMEM; + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + goto out; + } + data->irqchip_data[i].domain = domain; + + /* disable all interrupt sources of this channel firstly */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + imx_intmux_irq_handler, + &data->irqchip_data[i]); + } + + platform_set_drvdata(pdev, data); + + return 0; +out: + clk_disable_unprepare(data->ipg_clk); + return ret; +} + +static int imx_intmux_remove(struct platform_device *pdev) +{ + struct intmux_data *data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < data->channum; i++) { + /* disable all interrupt sources of this channel */ + writel_relaxed(0, data->regs + CHANIER(i)); + + irq_set_chained_handler_and_data(data->irqchip_data[i].irq, + NULL, NULL); + + irq_domain_remove(data->irqchip_data[i].domain); + } + + clk_disable_unprepare(data->ipg_clk); + + return 0; +} + +static const struct of_device_id imx_intmux_id[] = { + { .compatible = "fsl,imx-intmux", }, + { /* sentinel */ }, +}; + +static struct platform_driver imx_intmux_driver = { + .driver = { + .name = "imx-intmux", + .of_match_table = imx_intmux_id, + }, + .probe = imx_intmux_probe, + .remove = imx_intmux_remove, +}; +builtin_platform_driver(imx_intmux_driver); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index 01d18b39069e..c5589ee0dfb3 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -17,7 +17,6 @@ #include <linux/delay.h> #include <asm/io.h> -#include <asm/mach-jz4740/irq.h> struct ingenic_intc_data { void __iomem *base; @@ -50,7 +49,7 @@ static irqreturn_t intc_cascade(int irq, void *data) while (pending) { int bit = __fls(pending); - irq = irq_find_mapping(domain, bit + (i * 32)); + irq = irq_linear_revmap(domain, bit + (i * 32)); generic_handle_irq(irq); pending &= ~BIT(bit); } @@ -97,8 +96,7 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } - domain = irq_domain_add_legacy(node, num_chips * 32, - JZ4740_IRQ_BASE, 0, + domain = irq_domain_add_linear(node, num_chips * 32, &irq_generic_chip_ops, NULL); if (!domain) { err = -ENOMEM; diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 3f09f658e8e2..6b566bba263b 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -374,6 +374,7 @@ static struct platform_driver mbigen_platform_driver = { .name = "Hisilicon MBIGEN-V2", .of_match_table = mbigen_of_match, .acpi_match_table = ACPI_PTR(mbigen_acpi_match), + .suppress_bind_attrs = true, }, .probe = mbigen_device_probe, }; diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 829084b568fa..ccc7f823911b 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -24,50 +24,101 @@ #define REG_PIN_47_SEL 0x08 #define REG_FILTER_SEL 0x0c +/* use for A1 like chips */ +#define REG_PIN_A1_SEL 0x04 + /* * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by * bits 24 to 31. Tests on the actual HW show that these bits are * stuck at 0. Bits 8 to 15 are responsive and have the expected * effect. */ -#define REG_EDGE_POL_EDGE(x) BIT(x) -#define REG_EDGE_POL_LOW(x) BIT(16 + (x)) -#define REG_BOTH_EDGE(x) BIT(8 + (x)) -#define REG_EDGE_POL_MASK(x) ( \ - REG_EDGE_POL_EDGE(x) | \ - REG_EDGE_POL_LOW(x) | \ - REG_BOTH_EDGE(x)) +#define REG_EDGE_POL_EDGE(params, x) BIT((params)->edge_single_offset + (x)) +#define REG_EDGE_POL_LOW(params, x) BIT((params)->pol_low_offset + (x)) +#define REG_BOTH_EDGE(params, x) BIT((params)->edge_both_offset + (x)) +#define REG_EDGE_POL_MASK(params, x) ( \ + REG_EDGE_POL_EDGE(params, x) | \ + REG_EDGE_POL_LOW(params, x) | \ + REG_BOTH_EDGE(params, x)) #define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) #define REG_FILTER_SEL_SHIFT(x) ((x) * 4) +struct meson_gpio_irq_controller; +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl); +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq); +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl); + +struct irq_ctl_ops { + void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq); + void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl); +}; + struct meson_gpio_irq_params { unsigned int nr_hwirq; bool support_edge_both; + unsigned int edge_both_offset; + unsigned int edge_single_offset; + unsigned int pol_low_offset; + unsigned int pin_sel_mask; + struct irq_ctl_ops ops; }; +#define INIT_MESON_COMMON(irqs, init, sel) \ + .nr_hwirq = irqs, \ + .ops = { \ + .gpio_irq_init = init, \ + .gpio_irq_sel_pin = sel, \ + }, + +#define INIT_MESON8_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \ + meson8_gpio_irq_sel_pin) \ + .edge_single_offset = 0, \ + .pol_low_offset = 16, \ + .pin_sel_mask = 0xff, \ + +#define INIT_MESON_A1_COMMON_DATA(irqs) \ + INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \ + meson_a1_gpio_irq_sel_pin) \ + .support_edge_both = true, \ + .edge_both_offset = 16, \ + .edge_single_offset = 8, \ + .pol_low_offset = 0, \ + .pin_sel_mask = 0x7f, \ + static const struct meson_gpio_irq_params meson8_params = { - .nr_hwirq = 134, + INIT_MESON8_COMMON_DATA(134) }; static const struct meson_gpio_irq_params meson8b_params = { - .nr_hwirq = 119, + INIT_MESON8_COMMON_DATA(119) }; static const struct meson_gpio_irq_params gxbb_params = { - .nr_hwirq = 133, + INIT_MESON8_COMMON_DATA(133) }; static const struct meson_gpio_irq_params gxl_params = { - .nr_hwirq = 110, + INIT_MESON8_COMMON_DATA(110) }; static const struct meson_gpio_irq_params axg_params = { - .nr_hwirq = 100, + INIT_MESON8_COMMON_DATA(100) }; static const struct meson_gpio_irq_params sm1_params = { - .nr_hwirq = 100, + INIT_MESON8_COMMON_DATA(100) .support_edge_both = true, + .edge_both_offset = 8, +}; + +static const struct meson_gpio_irq_params a1_params = { + INIT_MESON_A1_COMMON_DATA(62) }; static const struct of_device_id meson_irq_gpio_matches[] = { @@ -78,6 +129,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = { { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, + { .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params }, { } }; @@ -100,9 +152,43 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, writel_relaxed(tmp, ctl->base + reg); } -static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel) +static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) +{ +} + +static void meson8_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, unsigned long hwirq) +{ + unsigned int reg_offset; + unsigned int bit_offset; + + reg_offset = (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; + bit_offset = REG_PIN_SEL_SHIFT(channel); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl, + unsigned int channel, + unsigned long hwirq) { - return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; + unsigned int reg_offset; + unsigned int bit_offset; + + bit_offset = ((channel % 2) == 0) ? 0 : 16; + reg_offset = REG_PIN_A1_SEL + ((channel / 2) << 2); + + meson_gpio_irq_update_bits(ctl, reg_offset, + ctl->params->pin_sel_mask << bit_offset, + hwirq << bit_offset); +} + +/* For a1 or later chips like a1 there is a switch to enable/disable irq */ +static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl) +{ + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, BIT(31), BIT(31)); } static int @@ -110,7 +196,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, unsigned long hwirq, u32 **channel_hwirq) { - unsigned int reg, idx; + unsigned int idx; spin_lock(&ctl->lock); @@ -129,10 +215,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, * Setup the mux of the channel to route the signal of the pad * to the appropriate input of the GIC */ - reg = meson_gpio_irq_channel_to_reg(idx); - meson_gpio_irq_update_bits(ctl, reg, - 0xff << REG_PIN_SEL_SHIFT(idx), - hwirq << REG_PIN_SEL_SHIFT(idx)); + ctl->params->ops.gpio_irq_sel_pin(ctl, idx, hwirq); /* * Get the hwirq number assigned to this channel through @@ -173,7 +256,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, { u32 val = 0; unsigned int idx; + const struct meson_gpio_irq_params *params; + params = ctl->params; idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); /* @@ -190,22 +275,22 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, * precedence over the other edge/polarity settings */ if (type == IRQ_TYPE_EDGE_BOTH) { - if (!ctl->params->support_edge_both) + if (!params->support_edge_both) return -EINVAL; - val |= REG_BOTH_EDGE(idx); + val |= REG_BOTH_EDGE(params, idx); } else { if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_EDGE(idx); + val |= REG_EDGE_POL_EDGE(params, idx); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) - val |= REG_EDGE_POL_LOW(idx); + val |= REG_EDGE_POL_LOW(params, idx); } spin_lock(&ctl->lock); meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, - REG_EDGE_POL_MASK(idx), val); + REG_EDGE_POL_MASK(params, idx), val); spin_unlock(&ctl->lock); @@ -371,6 +456,8 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node, return ret; } + ctl->params->ops.gpio_irq_init(ctl); + return 0; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index f3985469c221..d70507133c1d 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -716,7 +716,7 @@ static int __init gic_of_init(struct device_node *node, __sync(); } - mips_gic_base = ioremap_nocache(gic_base, gic_len); + mips_gic_base = ioremap(gic_base, gic_len); gicconfig = read_gic_config(); gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index a166d30deea2..f747e2209ea9 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -45,17 +45,6 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) handle_IRQ(irq, regs); } -static int nvic_irq_domain_translate(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *hwirq, unsigned int *type) -{ - if (WARN_ON(fwspec->param_count < 1)) - return -EINVAL; - *hwirq = fwspec->param[0]; - *type = IRQ_TYPE_NONE; - return 0; -} - static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -64,7 +53,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = arg; - ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type); + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); if (ret) return ret; @@ -75,7 +64,7 @@ static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, } static const struct irq_domain_ops nvic_irq_domain_ops = { - .translate = nvic_irq_domain_translate, + .translate = irq_domain_translate_onecell, .alloc = nvic_irq_domain_alloc, .free = irq_domain_free_irqs_top, }; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index f82bc60a6793..6e5e3172796b 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -460,7 +460,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) goto err0; } - i->iomem = devm_ioremap_nocache(dev, io[k]->start, + i->iomem = devm_ioremap(dev, io[k]->start, resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 8df547d2d935..aa4af886e43a 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -154,15 +154,37 @@ static struct irq_chip plic_chip = { static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq); - irq_set_chip_data(irq, NULL); + irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); irq_set_noprobe(irq); return 0; } +static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type; + struct irq_fwspec *fwspec = arg; + + ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = plic_irqdomain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + static const struct irq_domain_ops plic_irqdomain_ops = { - .map = plic_irqdomain_map, - .xlate = irq_domain_xlate_onecell, + .translate = irq_domain_translate_onecell, + .alloc = plic_irq_domain_alloc, + .free = irq_domain_free_irqs_top, }; static struct irq_domain *plic_irqdomain; @@ -256,7 +278,7 @@ static int __init plic_init(struct device_node *node, * Skip contexts other than external interrupts for our * privilege level. */ - if (parent.args[0] != IRQ_EXT) + if (parent.args[0] != RV_IRQ_EXT) continue; hartid = plic_find_hart_id(parent.np); diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c index b7e0ae1af8fa..e8922fa03379 100644 --- a/drivers/leds/leds-as3645a.c +++ b/drivers/leds/leds-as3645a.c @@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash, switch (id) { case AS_LED_FLASH: flash->flash_node = child; + fwnode_handle_get(child); break; case AS_LED_INDICATOR: flash->indicator_node = child; + fwnode_handle_get(child); break; default: dev_warn(&flash->client->dev, "unknown LED %u encountered, ignoring\n", id); break; } - fwnode_handle_get(child); } if (!flash->flash_node) { diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index a5c73f3d5f79..2bf74595610f 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) struct gpio_led led = {}; const char *state = NULL; + /* + * Acquire gpiod from DT with uninitialized label, which + * will be updated after LED class device is registered, + * Only then the final LED name is known. + */ led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child, GPIOD_ASIS, - led.name); + NULL); if (IS_ERR(led.gpiod)) { fwnode_handle_put(child); return ERR_CAST(led.gpiod); @@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) fwnode_handle_put(child); return ERR_PTR(ret); } + /* Set gpiod label to match the corresponding LED name. */ + gpiod_set_consumer_name(led_dat->gpiod, + led_dat->cdev.dev->kobj.name); priv->num_leds++; } diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c index 0507c6575c08..491268bb34a7 100644 --- a/drivers/leds/leds-lm3532.c +++ b/drivers/leds/leds-lm3532.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // TI LM3532 LED driver // Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// http://www.ti.com/lit/ds/symlink/lm3532.pdf #include <linux/i2c.h> #include <linux/leds.h> @@ -623,7 +624,7 @@ static int lm3532_parse_node(struct lm3532_data *priv) led->num_leds = fwnode_property_count_u32(child, "led-sources"); if (led->num_leds > LM3532_MAX_LED_STRINGS) { - dev_err(&priv->client->dev, "To many LED string defined\n"); + dev_err(&priv->client->dev, "Too many LED string defined\n"); continue; } diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c index 4c2d0b3c6dad..a0d4b725c917 100644 --- a/drivers/leds/leds-max77650.c +++ b/drivers/leds/leds-max77650.c @@ -135,9 +135,16 @@ err_node_put: return rv; } +static const struct of_device_id max77650_led_of_match[] = { + { .compatible = "maxim,max77650-led" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_led_of_match); + static struct platform_driver max77650_led_driver = { .driver = { .name = "max77650-led", + .of_match_table = max77650_led_of_match, }, .probe = max77650_led_probe, }; diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c index db5af83f0cec..b6447c1721b4 100644 --- a/drivers/leds/leds-rb532.c +++ b/drivers/leds/leds-rb532.c @@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev, { if (brightness) set_latch_u5(LO_ULED, 0); - else set_latch_u5(0, LO_ULED); } diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c index 718729c89440..3abcafe46278 100644 --- a/drivers/leds/trigger/ledtrig-pattern.c +++ b/drivers/leds/trigger/ledtrig-pattern.c @@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void) module_init(pattern_trig_init); module_exit(pattern_trig_exit); -MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com"); -MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org"); +MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>"); +MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>"); MODULE_DESCRIPTION("LED Pattern trigger"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h index 9534503b69d9..47b67c6bff7a 100644 --- a/drivers/lightnvm/pblk-trace.h +++ b/drivers/lightnvm/pblk-trace.h @@ -46,7 +46,7 @@ TRACE_EVENT(pblk_chunk_reset, TP_STRUCT__entry( __string(name, name) __field(u64, ppa) - __field(int, state); + __field(int, state) ), TP_fast_assign( @@ -72,7 +72,7 @@ TRACE_EVENT(pblk_chunk_state, TP_STRUCT__entry( __string(name, name) __field(u64, ppa) - __field(int, state); + __field(int, state) ), TP_fast_assign( @@ -98,7 +98,7 @@ TRACE_EVENT(pblk_line_state, TP_STRUCT__entry( __string(name, name) __field(int, line) - __field(int, state); + __field(int, state) ), TP_fast_assign( @@ -121,7 +121,7 @@ TRACE_EVENT(pblk_state, TP_STRUCT__entry( __string(name, name) - __field(int, state); + __field(int, state) ), TP_fast_assign( diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 9198c1b480d9..adf26a21fcd1 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -301,6 +301,7 @@ struct cached_dev { struct block_device *bdev; struct cache_sb sb; + struct cache_sb_disk *sb_disk; struct bio sb_bio; struct bio_vec sb_bv[1]; struct closure sb_write; @@ -403,6 +404,7 @@ enum alloc_reserve { struct cache { struct cache_set *set; struct cache_sb sb; + struct cache_sb_disk *sb_disk; struct bio sb_bio; struct bio_vec sb_bv[1]; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index cffcdc9feefb..4385303836d8 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1257,6 +1257,11 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, * Our temporary buffer is the same size as the btree node's * buffer, we can just swap buffers instead of doing a big * memcpy() + * + * Don't worry event 'out' is allocated from mempool, it can + * still be swapped here. Because state->pool is a page mempool + * creaated by by mempool_init_page_pool(), which allocates + * pages by alloc_pages() indeed. */ out->magic = b->set->data->magic; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 14d6c33b0957..fa872df4e770 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -734,34 +734,32 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, i = 0; btree_cache_used = c->btree_cache_used; - list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { + list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { if (nr <= 0) goto out; - if (++i > 3 && - !mca_reap(b, 0, false)) { + if (!mca_reap(b, 0, false)) { mca_data_free(b); rw_unlock(true, b); freed++; } nr--; + i++; } - for (; (nr--) && i < btree_cache_used; i++) { - if (list_empty(&c->btree_cache)) + list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { + if (nr <= 0 || i >= btree_cache_used) goto out; - b = list_first_entry(&c->btree_cache, struct btree, list); - list_rotate_left(&c->btree_cache); - - if (!b->accessed && - !mca_reap(b, 0, false)) { + if (!mca_reap(b, 0, false)) { mca_bucket_free(b); mca_data_free(b); rw_unlock(true, b); freed++; - } else - b->accessed = 0; + } + + nr--; + i++; } out: mutex_unlock(&c->bucket_lock); @@ -1069,7 +1067,6 @@ retry: BUG_ON(!b->written); b->parent = parent; - b->accessed = 1; for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { prefetch(b->keys.set[i].tree); @@ -1160,7 +1157,6 @@ retry: goto retry; } - b->accessed = 1; b->parent = parent; bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 76cfd121a486..f4dcca449391 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -121,8 +121,6 @@ struct btree { /* Key/pointer for this btree node */ BKEY_PADDED(key); - /* Single bit - set when accessed, cleared by shrinker */ - unsigned long accessed; unsigned long seq; struct rw_semaphore lock; struct cache_set *c; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index be2a2a201603..33ddc5269e8d 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -417,10 +417,14 @@ err: /* Journalling */ +#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask)) + static void btree_flush_write(struct cache_set *c) { struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR]; - unsigned int i, n; + unsigned int i, nr, ref_nr; + atomic_t *fifo_front_p, *now_fifo_front_p; + size_t mask; if (c->journal.btree_flushing) return; @@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c) c->journal.btree_flushing = true; spin_unlock(&c->journal.flush_write_lock); + /* get the oldest journal entry and check its refcount */ + spin_lock(&c->journal.lock); + fifo_front_p = &fifo_front(&c->journal.pin); + ref_nr = atomic_read(fifo_front_p); + if (ref_nr <= 0) { + /* + * do nothing if no btree node references + * the oldest journal entry + */ + spin_unlock(&c->journal.lock); + goto out; + } + spin_unlock(&c->journal.lock); + + mask = c->journal.pin.mask; + nr = 0; atomic_long_inc(&c->flush_write); memset(btree_nodes, 0, sizeof(btree_nodes)); - n = 0; mutex_lock(&c->bucket_lock); list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { + /* + * It is safe to get now_fifo_front_p without holding + * c->journal.lock here, because we don't need to know + * the exactly accurate value, just check whether the + * front pointer of c->journal.pin is changed. + */ + now_fifo_front_p = &fifo_front(&c->journal.pin); + /* + * If the oldest journal entry is reclaimed and front + * pointer of c->journal.pin changes, it is unnecessary + * to scan c->btree_cache anymore, just quit the loop and + * flush out what we have already. + */ + if (now_fifo_front_p != fifo_front_p) + break; + /* + * quit this loop if all matching btree nodes are + * scanned and record in btree_nodes[] already. + */ + ref_nr = atomic_read(fifo_front_p); + if (nr >= ref_nr) + break; + if (btree_node_journal_flush(b)) pr_err("BUG: flush_write bit should not be set here!"); @@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c) continue; } + /* + * Only select the btree node which exactly references + * the oldest journal entry. + * + * If the journal entry pointed by fifo_front_p is + * reclaimed in parallel, don't worry: + * - the list_for_each_xxx loop will quit when checking + * next now_fifo_front_p. + * - If there are matched nodes recorded in btree_nodes[], + * they are clean now (this is why and how the oldest + * journal entry can be reclaimed). These selected nodes + * will be ignored and skipped in the folowing for-loop. + */ + if (nr_to_fifo_front(btree_current_write(b)->journal, + fifo_front_p, + mask) != 0) { + mutex_unlock(&b->write_lock); + continue; + } + set_btree_node_journal_flush(b); mutex_unlock(&b->write_lock); - btree_nodes[n++] = b; - if (n == BTREE_FLUSH_NR) + btree_nodes[nr++] = b; + /* + * To avoid holding c->bucket_lock too long time, + * only scan for BTREE_FLUSH_NR matched btree nodes + * at most. If there are more btree nodes reference + * the oldest journal entry, try to flush them next + * time when btree_flush_write() is called. + */ + if (nr == BTREE_FLUSH_NR) break; } mutex_unlock(&c->bucket_lock); - for (i = 0; i < n; i++) { + for (i = 0; i < nr; i++) { b = btree_nodes[i]; if (!b) { pr_err("BUG: btree_nodes[%d] is NULL", i); @@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c) mutex_unlock(&b->write_lock); } +out: spin_lock(&c->journal.flush_write_lock); c->journal.btree_flushing = false; spin_unlock(&c->journal.flush_write_lock); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 77e9869345e7..3dea1d5acd5c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -15,7 +15,6 @@ #include "writeback.h" #include <linux/blkdev.h> -#include <linux/buffer_head.h> #include <linux/debugfs.h> #include <linux/genhd.h> #include <linux/idr.h> @@ -60,17 +59,18 @@ struct workqueue_struct *bch_journal_wq; /* Superblock */ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, - struct page **res) + struct cache_sb_disk **res) { const char *err; - struct cache_sb *s; - struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); + struct cache_sb_disk *s; + struct page *page; unsigned int i; - if (!bh) + page = read_cache_page_gfp(bdev->bd_inode->i_mapping, + SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); + if (IS_ERR(page)) return "IO error"; - - s = (struct cache_sb *) bh->b_data; + s = page_address(page) + offset_in_page(SB_OFFSET); sb->offset = le64_to_cpu(s->offset); sb->version = le64_to_cpu(s->version); @@ -188,12 +188,10 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, } sb->last_mount = (u32)ktime_get_real_seconds(); - err = NULL; - - get_page(bh->b_page); - *res = bh->b_page; + *res = s; + return NULL; err: - put_bh(bh); + put_page(page); return err; } @@ -207,15 +205,15 @@ static void write_bdev_super_endio(struct bio *bio) closure_put(&dc->sb_write); } -static void __write_super(struct cache_sb *sb, struct bio *bio) +static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, + struct bio *bio) { - struct cache_sb *out = page_address(bio_first_page_all(bio)); unsigned int i; + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; bio->bi_iter.bi_sector = SB_SECTOR; - bio->bi_iter.bi_size = SB_SIZE; - bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); - bch_bio_map(bio, NULL); + __bio_add_page(bio, virt_to_page(out), SB_SIZE, + offset_in_page(out)); out->offset = cpu_to_le64(sb->offset); out->version = cpu_to_le64(sb->version); @@ -257,14 +255,14 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) down(&dc->sb_write_mutex); closure_init(cl, parent); - bio_reset(bio); + bio_init(bio, dc->sb_bv, 1); bio_set_dev(bio, dc->bdev); bio->bi_end_io = write_bdev_super_endio; bio->bi_private = dc; closure_get(cl); /* I/O request sent to backing device */ - __write_super(&dc->sb, bio); + __write_super(&dc->sb, dc->sb_disk, bio); closure_return_with_destructor(cl, bch_write_bdev_super_unlock); } @@ -306,13 +304,13 @@ void bcache_write_super(struct cache_set *c) SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); - bio_reset(bio); + bio_init(bio, ca->sb_bv, 1); bio_set_dev(bio, ca->bdev); bio->bi_end_io = write_super_endio; bio->bi_private = ca; closure_get(cl); - __write_super(&ca->sb, bio); + __write_super(&ca->sb, ca->sb_disk, bio); } closure_return_with_destructor(cl, bcache_write_super_unlock); @@ -1275,6 +1273,9 @@ static void cached_dev_free(struct closure *cl) mutex_unlock(&bch_register_lock); + if (dc->sb_disk) + put_page(virt_to_page(dc->sb_disk)); + if (!IS_ERR_OR_NULL(dc->bdev)) blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); @@ -1350,7 +1351,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) /* Cached device - bcache superblock */ -static int register_bdev(struct cache_sb *sb, struct page *sb_page, +static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, struct block_device *bdev, struct cached_dev *dc) { @@ -1362,11 +1363,7 @@ static int register_bdev(struct cache_sb *sb, struct page *sb_page, memcpy(&dc->sb, sb, sizeof(struct cache_sb)); dc->bdev = bdev; dc->bdev->bd_holder = dc; - - bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1); - bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; - get_page(sb_page); - + dc->sb_disk = sb_disk; if (cached_dev_init(dc, sb->block_size << 9)) goto err; @@ -2136,8 +2133,8 @@ void bch_cache_release(struct kobject *kobj) for (i = 0; i < RESERVE_NR; i++) free_fifo(&ca->free[i]); - if (ca->sb_bio.bi_inline_vecs[0].bv_page) - put_page(bio_first_page_all(&ca->sb_bio)); + if (ca->sb_disk) + put_page(virt_to_page(ca->sb_disk)); if (!IS_ERR_OR_NULL(ca->bdev)) blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); @@ -2259,7 +2256,7 @@ err_free: return ret; } -static int register_cache(struct cache_sb *sb, struct page *sb_page, +static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, struct block_device *bdev, struct cache *ca) { const char *err = NULL; /* must be set for any error case */ @@ -2269,10 +2266,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; - - bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1); - bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; - get_page(sb_page); + ca->sb_disk = sb_disk; if (blk_queue_discard(bdev_get_queue(bdev))) ca->discard = CACHE_DISCARD(&ca->sb); @@ -2372,29 +2366,35 @@ static bool bch_is_open(struct block_device *bdev) static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, const char *buffer, size_t size) { - ssize_t ret = -EINVAL; - const char *err = "cannot allocate memory"; + const char *err; char *path = NULL; - struct cache_sb *sb = NULL; - struct block_device *bdev = NULL; - struct page *sb_page = NULL; + struct cache_sb *sb; + struct cache_sb_disk *sb_disk; + struct block_device *bdev; + ssize_t ret; + ret = -EBUSY; + err = "failed to reference bcache module"; if (!try_module_get(THIS_MODULE)) - return -EBUSY; + goto out; /* For latest state of bcache_is_reboot */ smp_mb(); + err = "bcache is in reboot"; if (bcache_is_reboot) - return -EBUSY; + goto out_module_put; + ret = -ENOMEM; + err = "cannot allocate memory"; path = kstrndup(buffer, size, GFP_KERNEL); if (!path) - goto err; + goto out_module_put; sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); if (!sb) - goto err; + goto out_free_path; + ret = -EINVAL; err = "failed to open device"; bdev = blkdev_get_by_path(strim(path), FMODE_READ|FMODE_WRITE|FMODE_EXCL, @@ -2411,57 +2411,63 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!IS_ERR(bdev)) bdput(bdev); if (attr == &ksysfs_register_quiet) - goto quiet_out; + goto done; } - goto err; + goto out_free_sb; } err = "failed to set blocksize"; if (set_blocksize(bdev, 4096)) - goto err_close; + goto out_blkdev_put; - err = read_super(sb, bdev, &sb_page); + err = read_super(sb, bdev, &sb_disk); if (err) - goto err_close; + goto out_blkdev_put; err = "failed to register device"; if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); if (!dc) - goto err_close; + goto out_put_sb_page; mutex_lock(&bch_register_lock); - ret = register_bdev(sb, sb_page, bdev, dc); + ret = register_bdev(sb, sb_disk, bdev, dc); mutex_unlock(&bch_register_lock); /* blkdev_put() will be called in cached_dev_free() */ if (ret < 0) - goto err; + goto out_free_sb; } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) - goto err_close; + goto out_put_sb_page; /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(sb, sb_page, bdev, ca) != 0) - goto err; + if (register_cache(sb, sb_disk, bdev, ca) != 0) + goto out_free_sb; } -quiet_out: - ret = size; -out: - if (sb_page) - put_page(sb_page); + +done: kfree(sb); kfree(path); module_put(THIS_MODULE); - return ret; + return size; -err_close: - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -err: - pr_info("error %s: %s", path, err); - goto out; +out_put_sb_page: + put_page(virt_to_page(sb_disk)); +out_blkdev_put: + blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); +out_free_sb: + kfree(sb); +out_free_path: + kfree(path); + path = NULL; +out_module_put: + module_put(THIS_MODULE); +out: + pr_info("error %s: %s", path?path:"", err); + return ret; } diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c index 08c552e5e41b..c05b12110456 100644 --- a/drivers/md/dm-clone-metadata.c +++ b/drivers/md/dm-clone-metadata.c @@ -67,23 +67,34 @@ struct superblock_disk { * To save constantly doing look ups on disk we keep an in core copy of the * on-disk bitmap, the region_map. * - * To further reduce metadata I/O overhead we use a second bitmap, the dmap - * (dirty bitmap), which tracks the dirty words, i.e. longs, of the region_map. + * In order to track which regions are hydrated during a metadata transaction, + * we use a second set of bitmaps, the dmap (dirty bitmap), which includes two + * bitmaps, namely dirty_regions and dirty_words. The dirty_regions bitmap + * tracks the regions that got hydrated during the current metadata + * transaction. The dirty_words bitmap tracks the dirty words, i.e. longs, of + * the dirty_regions bitmap. + * + * This allows us to precisely track the regions that were hydrated during the + * current metadata transaction and update the metadata accordingly, when we + * commit the current transaction. This is important because dm-clone should + * only commit the metadata of regions that were properly flushed to the + * destination device beforehand. Otherwise, in case of a crash, we could end + * up with a corrupted dm-clone device. * * When a region finishes hydrating dm-clone calls * dm_clone_set_region_hydrated(), or for discard requests * dm_clone_cond_set_range(), which sets the corresponding bits in region_map * and dmap. * - * During a metadata commit we scan the dmap for dirty region_map words (longs) - * and update accordingly the on-disk metadata. Thus, we don't have to flush to - * disk the whole region_map. We can just flush the dirty region_map words. + * During a metadata commit we scan dmap->dirty_words and dmap->dirty_regions + * and update the on-disk metadata accordingly. Thus, we don't have to flush to + * disk the whole region_map. We can just flush the dirty region_map bits. * - * We use a dirty bitmap, which is smaller than the original region_map, to - * reduce the amount of memory accesses during a metadata commit. As dm-bitset - * accesses the on-disk bitmap in 64-bit word granularity, there is no - * significant benefit in tracking the dirty region_map bits with a smaller - * granularity. + * We use the helper dmap->dirty_words bitmap, which is smaller than the + * original region_map, to reduce the amount of memory accesses during a + * metadata commit. Moreover, as dm-bitset also accesses the on-disk bitmap in + * 64-bit word granularity, the dirty_words bitmap helps us avoid useless disk + * accesses. * * We could update directly the on-disk bitmap, when dm-clone calls either * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), buts this @@ -92,12 +103,13 @@ struct superblock_disk { * e.g., in a hooked overwrite bio's completion routine, and further reduce the * I/O completion latency. * - * We maintain two dirty bitmaps. During a metadata commit we atomically swap - * the currently used dmap with the unused one. This allows the metadata update - * functions to run concurrently with an ongoing commit. + * We maintain two dirty bitmap sets. During a metadata commit we atomically + * swap the currently used dmap with the unused one. This allows the metadata + * update functions to run concurrently with an ongoing commit. */ struct dirty_map { unsigned long *dirty_words; + unsigned long *dirty_regions; unsigned int changed; }; @@ -115,6 +127,9 @@ struct dm_clone_metadata { struct dirty_map dmap[2]; struct dirty_map *current_dmap; + /* Protected by lock */ + struct dirty_map *committing_dmap; + /* * In core copy of the on-disk bitmap to save constantly doing look ups * on disk. @@ -461,34 +476,53 @@ static size_t bitmap_size(unsigned long nr_bits) return BITS_TO_LONGS(nr_bits) * sizeof(long); } -static int dirty_map_init(struct dm_clone_metadata *cmd) +static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words, + unsigned long nr_regions) { - cmd->dmap[0].changed = 0; - cmd->dmap[0].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL); + dmap->changed = 0; - if (!cmd->dmap[0].dirty_words) { - DMERR("Failed to allocate dirty bitmap"); + dmap->dirty_words = kvzalloc(bitmap_size(nr_words), GFP_KERNEL); + if (!dmap->dirty_words) + return -ENOMEM; + + dmap->dirty_regions = kvzalloc(bitmap_size(nr_regions), GFP_KERNEL); + if (!dmap->dirty_regions) { + kvfree(dmap->dirty_words); return -ENOMEM; } - cmd->dmap[1].changed = 0; - cmd->dmap[1].dirty_words = kvzalloc(bitmap_size(cmd->nr_words), GFP_KERNEL); + return 0; +} + +static void __dirty_map_exit(struct dirty_map *dmap) +{ + kvfree(dmap->dirty_words); + kvfree(dmap->dirty_regions); +} + +static int dirty_map_init(struct dm_clone_metadata *cmd) +{ + if (__dirty_map_init(&cmd->dmap[0], cmd->nr_words, cmd->nr_regions)) { + DMERR("Failed to allocate dirty bitmap"); + return -ENOMEM; + } - if (!cmd->dmap[1].dirty_words) { + if (__dirty_map_init(&cmd->dmap[1], cmd->nr_words, cmd->nr_regions)) { DMERR("Failed to allocate dirty bitmap"); - kvfree(cmd->dmap[0].dirty_words); + __dirty_map_exit(&cmd->dmap[0]); return -ENOMEM; } cmd->current_dmap = &cmd->dmap[0]; + cmd->committing_dmap = NULL; return 0; } static void dirty_map_exit(struct dm_clone_metadata *cmd) { - kvfree(cmd->dmap[0].dirty_words); - kvfree(cmd->dmap[1].dirty_words); + __dirty_map_exit(&cmd->dmap[0]); + __dirty_map_exit(&cmd->dmap[1]); } static int __load_bitset_in_core(struct dm_clone_metadata *cmd) @@ -633,21 +667,23 @@ unsigned long dm_clone_find_next_unhydrated_region(struct dm_clone_metadata *cmd return find_next_zero_bit(cmd->region_map, cmd->nr_regions, start); } -static int __update_metadata_word(struct dm_clone_metadata *cmd, unsigned long word) +static int __update_metadata_word(struct dm_clone_metadata *cmd, + unsigned long *dirty_regions, + unsigned long word) { int r; unsigned long index = word * BITS_PER_LONG; unsigned long max_index = min(cmd->nr_regions, (word + 1) * BITS_PER_LONG); while (index < max_index) { - if (test_bit(index, cmd->region_map)) { + if (test_bit(index, dirty_regions)) { r = dm_bitset_set_bit(&cmd->bitset_info, cmd->bitset_root, index, &cmd->bitset_root); - if (r) { DMERR("dm_bitset_set_bit failed"); return r; } + __clear_bit(index, dirty_regions); } index++; } @@ -721,7 +757,7 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) if (word == cmd->nr_words) break; - r = __update_metadata_word(cmd, word); + r = __update_metadata_word(cmd, dmap->dirty_regions, word); if (r) return r; @@ -743,15 +779,17 @@ static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap) return 0; } -int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) +int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd) { - int r = -EPERM; + int r = 0; struct dirty_map *dmap, *next_dmap; down_write(&cmd->lock); - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { + r = -EPERM; goto out; + } /* Get current dirty bitmap */ dmap = cmd->current_dmap; @@ -763,7 +801,7 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) * The last commit failed, so we don't have a clean dirty-bitmap to * use. */ - if (WARN_ON(next_dmap->changed)) { + if (WARN_ON(next_dmap->changed || cmd->committing_dmap)) { r = -EINVAL; goto out; } @@ -773,11 +811,33 @@ int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) cmd->current_dmap = next_dmap; spin_unlock_irq(&cmd->bitmap_lock); - /* - * No one is accessing the old dirty bitmap anymore, so we can flush - * it. - */ - r = __flush_dmap(cmd, dmap); + /* Set old dirty bitmap as currently committing */ + cmd->committing_dmap = dmap; +out: + up_write(&cmd->lock); + + return r; +} + +int dm_clone_metadata_commit(struct dm_clone_metadata *cmd) +{ + int r = -EPERM; + + down_write(&cmd->lock); + + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) + goto out; + + if (WARN_ON(!cmd->committing_dmap)) { + r = -EINVAL; + goto out; + } + + r = __flush_dmap(cmd, cmd->committing_dmap); + if (!r) { + /* Clear committing dmap */ + cmd->committing_dmap = NULL; + } out: up_write(&cmd->lock); @@ -802,6 +862,7 @@ int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long re dmap = cmd->current_dmap; __set_bit(word, dmap->dirty_words); + __set_bit(region_nr, dmap->dirty_regions); __set_bit(region_nr, cmd->region_map); dmap->changed = 1; @@ -830,6 +891,7 @@ int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start, if (!test_bit(region_nr, cmd->region_map)) { word = region_nr / BITS_PER_LONG; __set_bit(word, dmap->dirty_words); + __set_bit(region_nr, dmap->dirty_regions); __set_bit(region_nr, cmd->region_map); dmap->changed = 1; } diff --git a/drivers/md/dm-clone-metadata.h b/drivers/md/dm-clone-metadata.h index 3fe50a781c11..14af1ebd853f 100644 --- a/drivers/md/dm-clone-metadata.h +++ b/drivers/md/dm-clone-metadata.h @@ -75,7 +75,23 @@ void dm_clone_metadata_close(struct dm_clone_metadata *cmd); /* * Commit dm-clone metadata to disk. + * + * We use a two phase commit: + * + * 1. dm_clone_metadata_pre_commit(): Prepare the current transaction for + * committing. After this is called, all subsequent metadata updates, done + * through either dm_clone_set_region_hydrated() or + * dm_clone_cond_set_range(), will be part of the **next** transaction. + * + * 2. dm_clone_metadata_commit(): Actually commit the current transaction to + * disk and start a new transaction. + * + * This allows dm-clone to flush the destination device after step (1) to + * ensure that all freshly hydrated regions, for which we are updating the + * metadata, are properly written to non-volatile storage and won't be lost in + * case of a crash. */ +int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd); int dm_clone_metadata_commit(struct dm_clone_metadata *cmd); /* @@ -112,6 +128,7 @@ int dm_clone_metadata_abort(struct dm_clone_metadata *cmd); * Switches metadata to a read only mode. Once read-only mode has been entered * the following functions will return -EPERM: * + * dm_clone_metadata_pre_commit() * dm_clone_metadata_commit() * dm_clone_set_region_hydrated() * dm_clone_cond_set_range() diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index b3d89072d21c..d1e1b5b56b1b 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -86,6 +86,12 @@ struct clone { struct dm_clone_metadata *cmd; + /* + * bio used to flush the destination device, before committing the + * metadata. + */ + struct bio flush_bio; + /* Region hydration hash table */ struct hash_table_bucket *ht; @@ -1108,10 +1114,13 @@ static bool need_commit_due_to_time(struct clone *clone) /* * A non-zero return indicates read-only or fail mode. */ -static int commit_metadata(struct clone *clone) +static int commit_metadata(struct clone *clone, bool *dest_dev_flushed) { int r = 0; + if (dest_dev_flushed) + *dest_dev_flushed = false; + mutex_lock(&clone->commit_lock); if (!dm_clone_changed_this_transaction(clone->cmd)) @@ -1122,8 +1131,26 @@ static int commit_metadata(struct clone *clone) goto out; } - r = dm_clone_metadata_commit(clone->cmd); + r = dm_clone_metadata_pre_commit(clone->cmd); + if (unlikely(r)) { + __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r); + goto out; + } + bio_reset(&clone->flush_bio); + bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev); + clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + + r = submit_bio_wait(&clone->flush_bio); + if (unlikely(r)) { + __metadata_operation_failed(clone, "flush destination device", r); + goto out; + } + + if (dest_dev_flushed) + *dest_dev_flushed = true; + + r = dm_clone_metadata_commit(clone->cmd); if (unlikely(r)) { __metadata_operation_failed(clone, "dm_clone_metadata_commit", r); goto out; @@ -1194,6 +1221,7 @@ static void process_deferred_bios(struct clone *clone) static void process_deferred_flush_bios(struct clone *clone) { struct bio *bio; + bool dest_dev_flushed; struct bio_list bios = BIO_EMPTY_LIST; struct bio_list bio_completions = BIO_EMPTY_LIST; @@ -1213,7 +1241,7 @@ static void process_deferred_flush_bios(struct clone *clone) !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone))) return; - if (commit_metadata(clone)) { + if (commit_metadata(clone, &dest_dev_flushed)) { bio_list_merge(&bios, &bio_completions); while ((bio = bio_list_pop(&bios))) @@ -1227,8 +1255,17 @@ static void process_deferred_flush_bios(struct clone *clone) while ((bio = bio_list_pop(&bio_completions))) bio_endio(bio); - while ((bio = bio_list_pop(&bios))) - generic_make_request(bio); + while ((bio = bio_list_pop(&bios))) { + if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) { + /* We just flushed the destination device as part of + * the metadata commit, so there is no reason to send + * another flush. + */ + bio_endio(bio); + } else { + generic_make_request(bio); + } + } } static void do_worker(struct work_struct *work) @@ -1400,7 +1437,7 @@ static void clone_status(struct dm_target *ti, status_type_t type, /* Commit to ensure statistics aren't out-of-date */ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) - (void) commit_metadata(clone); + (void) commit_metadata(clone, NULL); r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks); @@ -1834,6 +1871,7 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) bio_list_init(&clone->deferred_flush_completions); clone->hydration_offset = 0; atomic_set(&clone->hydrations_in_flight, 0); + bio_init(&clone->flush_bio, NULL, 0); clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); if (!clone->wq) { @@ -1907,6 +1945,7 @@ static void clone_dtr(struct dm_target *ti) struct clone *clone = ti->private; mutex_destroy(&clone->commit_lock); + bio_uninit(&clone->flush_bio); for (i = 0; i < clone->nr_ctr_args; i++) kfree(clone->ctr_args[i]); @@ -1961,7 +2000,7 @@ static void clone_postsuspend(struct dm_target *ti) wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight)); flush_workqueue(clone->wq); - (void) commit_metadata(clone); + (void) commit_metadata(clone, NULL); } static void clone_resume(struct dm_target *ti) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index dbcc1e41cd57..e0c32793c248 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -599,45 +599,10 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) return pgpath; } -static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) -{ - struct pgpath *pgpath; - unsigned long flags; - - /* Do we need to select a new pgpath? */ - /* - * FIXME: currently only switching path if no path (due to failure, etc) - * - which negates the point of using a path selector - */ - pgpath = READ_ONCE(m->current_pgpath); - if (!pgpath) - pgpath = choose_pgpath(m, bio->bi_iter.bi_size); - - if (!pgpath) { - if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { - /* Queue for the daemon to resubmit */ - spin_lock_irqsave(&m->lock, flags); - bio_list_add(&m->queued_bios, bio); - spin_unlock_irqrestore(&m->lock, flags); - queue_work(kmultipathd, &m->process_queued_bios); - - return ERR_PTR(-EAGAIN); - } - return NULL; - } - - return pgpath; -} - static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio) { - struct pgpath *pgpath; - - if (!m->hw_handler_name) - pgpath = __map_bio_fast(m, bio); - else - pgpath = __map_bio(m, bio); + struct pgpath *pgpath = __map_bio(m, bio); if (IS_ERR(pgpath)) return DM_MAPIO_SUBMITTED; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3c50c4e4da8f..963d3774c93e 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -17,7 +17,7 @@ #include <linux/dm-bufio.h> #define DM_MSG_PREFIX "persistent snapshot" -#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ +#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */ #define DM_PREFETCH_CHUNKS 12 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 4c68a7b93d5e..b88d6d701f5b 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -189,6 +189,15 @@ struct dm_pool_metadata { sector_t data_block_size; /* + * Pre-commit callback. + * + * This allows the thin provisioning target to run a callback before + * the metadata are committed. + */ + dm_pool_pre_commit_fn pre_commit_fn; + void *pre_commit_context; + + /* * We reserve a section of the metadata for commit overhead. * All reported space does *not* include this. */ @@ -826,6 +835,14 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) if (unlikely(!pmd->in_service)) return 0; + if (pmd->pre_commit_fn) { + r = pmd->pre_commit_fn(pmd->pre_commit_context); + if (r < 0) { + DMERR("pre-commit callback failed"); + return r; + } + } + r = __write_changed_details(pmd); if (r < 0) return r; @@ -892,6 +909,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, pmd->in_service = false; pmd->bdev = bdev; pmd->data_block_size = data_block_size; + pmd->pre_commit_fn = NULL; + pmd->pre_commit_context = NULL; r = __create_persistent_data_objects(pmd, format_device); if (r) { @@ -2044,6 +2063,16 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, return r; } +void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd, + dm_pool_pre_commit_fn fn, + void *context) +{ + pmd_write_lock_in_core(pmd); + pmd->pre_commit_fn = fn; + pmd->pre_commit_context = context; + pmd_write_unlock(pmd); +} + int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) { int r = -EINVAL; diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index f6be0d733c20..7ef56bd2a7e3 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -230,6 +230,13 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); */ void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd); +/* Pre-commit callback */ +typedef int (*dm_pool_pre_commit_fn)(void *context); + +void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd, + dm_pool_pre_commit_fn fn, + void *context); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 5a2c494cb552..57626c27a54b 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -328,6 +328,7 @@ struct pool_c { dm_block_t low_water_blocks; struct pool_features requested_pf; /* Features requested during table load */ struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ + struct bio flush_bio; }; /* @@ -2383,8 +2384,16 @@ static void process_deferred_bios(struct pool *pool) while ((bio = bio_list_pop(&bio_completions))) bio_endio(bio); - while ((bio = bio_list_pop(&bios))) - generic_make_request(bio); + while ((bio = bio_list_pop(&bios))) { + /* + * The data device was flushed as part of metadata commit, + * so complete redundant flushes immediately. + */ + if (bio->bi_opf & REQ_PREFLUSH) + bio_endio(bio); + else + generic_make_request(bio); + } } static void do_worker(struct work_struct *ws) @@ -3115,6 +3124,7 @@ static void pool_dtr(struct dm_target *ti) __pool_dec(pt->pool); dm_put_device(ti, pt->metadata_dev); dm_put_device(ti, pt->data_dev); + bio_uninit(&pt->flush_bio); kfree(pt); mutex_unlock(&dm_thin_pool_table.mutex); @@ -3180,6 +3190,29 @@ static void metadata_low_callback(void *context) dm_table_event(pool->ti->table); } +/* + * We need to flush the data device **before** committing the metadata. + * + * This ensures that the data blocks of any newly inserted mappings are + * properly written to non-volatile storage and won't be lost in case of a + * crash. + * + * Failure to do so can result in data corruption in the case of internal or + * external snapshots and in the case of newly provisioned blocks, when block + * zeroing is enabled. + */ +static int metadata_pre_commit_callback(void *context) +{ + struct pool_c *pt = context; + struct bio *flush_bio = &pt->flush_bio; + + bio_reset(flush_bio); + bio_set_dev(flush_bio, pt->data_dev->bdev); + flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + + return submit_bio_wait(flush_bio); +} + static sector_t get_dev_size(struct block_device *bdev) { return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; @@ -3348,6 +3381,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) pt->data_dev = data_dev; pt->low_water_blocks = low_water_blocks; pt->adjusted_pf = pt->requested_pf = pf; + bio_init(&pt->flush_bio, NULL, 0); ti->num_flush_bios = 1; /* @@ -3374,6 +3408,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) if (r) goto out_flags_changed; + dm_pool_register_pre_commit_callback(pt->pool->pmd, + metadata_pre_commit_callback, + pt); + pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 3ad18246fcb3..e230052c2107 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1019,8 +1019,6 @@ void md_bitmap_unplug(struct bitmap *bitmap) /* look at each page to see if there are any set bits that need to be * flushed out to disk */ for (i = 0; i < bitmap->storage.file_pages; i++) { - if (!bitmap->storage.filemap) - return; dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); need_write = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); @@ -1338,7 +1336,8 @@ void md_bitmap_daemon_work(struct mddev *mddev) BITMAP_PAGE_DIRTY)) /* bitmap_unplug will handle the rest */ break; - if (test_and_clear_page_attr(bitmap, j, + if (bitmap->storage.filemap && + test_and_clear_page_attr(bitmap, j, BITMAP_PAGE_NEEDWRITE)) { write_page(bitmap, bitmap->storage.filemap[j], 0); } @@ -1790,8 +1789,8 @@ void md_bitmap_destroy(struct mddev *mddev) return; md_bitmap_wait_behind_writes(mddev); - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; + if (!mddev->serialize_policy) + mddev_destroy_serial_pool(mddev, NULL, true); mutex_lock(&mddev->bitmap_info.mutex); spin_lock(&mddev->lock); @@ -1908,7 +1907,7 @@ int md_bitmap_load(struct mddev *mddev) goto out; rdev_for_each(rdev, mddev) - mddev_create_wb_pool(mddev, rdev, true); + mddev_create_serial_pool(mddev, rdev, true); if (mddev_is_clustered(mddev)) md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); @@ -2475,16 +2474,16 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len) if (backlog > COUNTER_MAX) return -EINVAL; mddev->bitmap_info.max_write_behind = backlog; - if (!backlog && mddev->wb_info_pool) { - /* wb_info_pool is not needed if backlog is zero */ - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; - } else if (backlog && !mddev->wb_info_pool) { - /* wb_info_pool is needed since backlog is not zero */ + if (!backlog && mddev->serial_info_pool) { + /* serial_info_pool is not needed if backlog is zero */ + if (!mddev->serialize_policy) + mddev_destroy_serial_pool(mddev, NULL, false); + } else if (backlog && !mddev->serial_info_pool) { + /* serial_info_pool is needed since backlog is not zero */ struct md_rdev *rdev; rdev_for_each(rdev, mddev) - mddev_create_wb_pool(mddev, rdev, false); + mddev_create_serial_pool(mddev, rdev, false); } if (old_mwb != backlog) md_bitmap_update_sb(mddev->bitmap); diff --git a/drivers/md/md.c b/drivers/md/md.c index 805b33e27496..4824d50526fa 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -125,74 +125,165 @@ static inline int speed_max(struct mddev *mddev) mddev->sync_speed_max : sysctl_speed_limit_max; } -static int rdev_init_wb(struct md_rdev *rdev) +static void rdev_uninit_serial(struct md_rdev *rdev) { - if (rdev->bdev->bd_queue->nr_hw_queues == 1) + if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) + return; + + kvfree(rdev->serial); + rdev->serial = NULL; +} + +static void rdevs_uninit_serial(struct mddev *mddev) +{ + struct md_rdev *rdev; + + rdev_for_each(rdev, mddev) + rdev_uninit_serial(rdev); +} + +static int rdev_init_serial(struct md_rdev *rdev) +{ + /* serial_nums equals with BARRIER_BUCKETS_NR */ + int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); + struct serial_in_rdev *serial = NULL; + + if (test_bit(CollisionCheck, &rdev->flags)) return 0; - spin_lock_init(&rdev->wb_list_lock); - INIT_LIST_HEAD(&rdev->wb_list); - init_waitqueue_head(&rdev->wb_io_wait); - set_bit(WBCollisionCheck, &rdev->flags); + serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, + GFP_KERNEL); + if (!serial) + return -ENOMEM; - return 1; + for (i = 0; i < serial_nums; i++) { + struct serial_in_rdev *serial_tmp = &serial[i]; + + spin_lock_init(&serial_tmp->serial_lock); + serial_tmp->serial_rb = RB_ROOT_CACHED; + init_waitqueue_head(&serial_tmp->serial_io_wait); + } + + rdev->serial = serial; + set_bit(CollisionCheck, &rdev->flags); + + return 0; +} + +static int rdevs_init_serial(struct mddev *mddev) +{ + struct md_rdev *rdev; + int ret = 0; + + rdev_for_each(rdev, mddev) { + ret = rdev_init_serial(rdev); + if (ret) + break; + } + + /* Free all resources if pool is not existed */ + if (ret && !mddev->serial_info_pool) + rdevs_uninit_serial(mddev); + + return ret; } /* - * Create wb_info_pool if rdev is the first multi-queue device flaged - * with writemostly, also write-behind mode is enabled. + * rdev needs to enable serial stuffs if it meets the conditions: + * 1. it is multi-queue device flaged with writemostly. + * 2. the write-behind mode is enabled. */ -void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, - bool is_suspend) +static int rdev_need_serial(struct md_rdev *rdev) { - if (mddev->bitmap_info.max_write_behind == 0) - return; + return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && + rdev->bdev->bd_queue->nr_hw_queues != 1 && + test_bit(WriteMostly, &rdev->flags)); +} + +/* + * Init resource for rdev(s), then create serial_info_pool if: + * 1. rdev is the first device which return true from rdev_enable_serial. + * 2. rdev is NULL, means we want to enable serialization for all rdevs. + */ +void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend) +{ + int ret = 0; - if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev)) + if (rdev && !rdev_need_serial(rdev) && + !test_bit(CollisionCheck, &rdev->flags)) return; - if (mddev->wb_info_pool == NULL) { + if (!is_suspend) + mddev_suspend(mddev); + + if (!rdev) + ret = rdevs_init_serial(mddev); + else + ret = rdev_init_serial(rdev); + if (ret) + goto abort; + + if (mddev->serial_info_pool == NULL) { unsigned int noio_flag; - if (!is_suspend) - mddev_suspend(mddev); noio_flag = memalloc_noio_save(); - mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS, - sizeof(struct wb_info)); + mddev->serial_info_pool = + mempool_create_kmalloc_pool(NR_SERIAL_INFOS, + sizeof(struct serial_info)); memalloc_noio_restore(noio_flag); - if (!mddev->wb_info_pool) - pr_err("can't alloc memory pool for writemostly\n"); - if (!is_suspend) - mddev_resume(mddev); + if (!mddev->serial_info_pool) { + rdevs_uninit_serial(mddev); + pr_err("can't alloc memory pool for serialization\n"); + } } + +abort: + if (!is_suspend) + mddev_resume(mddev); } -EXPORT_SYMBOL_GPL(mddev_create_wb_pool); /* - * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck. + * Free resource from rdev(s), and destroy serial_info_pool under conditions: + * 1. rdev is the last device flaged with CollisionCheck. + * 2. when bitmap is destroyed while policy is not enabled. + * 3. for disable policy, the pool is destroyed only when no rdev needs it. */ -static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev) +void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend) { - if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags)) + if (rdev && !test_bit(CollisionCheck, &rdev->flags)) return; - if (mddev->wb_info_pool) { + if (mddev->serial_info_pool) { struct md_rdev *temp; - int num = 0; + int num = 0; /* used to track if other rdevs need the pool */ - /* - * Check if other rdevs need wb_info_pool. - */ - rdev_for_each(temp, mddev) - if (temp != rdev && - test_bit(WBCollisionCheck, &temp->flags)) + if (!is_suspend) + mddev_suspend(mddev); + rdev_for_each(temp, mddev) { + if (!rdev) { + if (!mddev->serialize_policy || + !rdev_need_serial(temp)) + rdev_uninit_serial(temp); + else + num++; + } else if (temp != rdev && + test_bit(CollisionCheck, &temp->flags)) num++; - if (!num) { - mddev_suspend(rdev->mddev); - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; - mddev_resume(rdev->mddev); } + + if (rdev) + rdev_uninit_serial(rdev); + + if (num) + pr_info("The mempool could be used by other devices\n"); + else { + mempool_destroy(mddev->serial_info_pool); + mddev->serial_info_pool = NULL; + } + if (!is_suspend) + mddev_resume(mddev); } } @@ -1159,6 +1250,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor /* not spare disk, or LEVEL_MULTIPATH */ if (sb->level == LEVEL_MULTIPATH || (rdev->desc_nr >= 0 && + rdev->desc_nr < MD_SB_DISKS && sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))) spare_disk = false; @@ -2336,7 +2428,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) pr_debug("md: bind<%s>\n", b); if (mddev->raid_disks) - mddev_create_wb_pool(mddev, rdev, false); + mddev_create_serial_pool(mddev, rdev, false); if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) goto fail; @@ -2374,7 +2466,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev) bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); list_del_rcu(&rdev->same_set); pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); - mddev_destroy_wb_pool(rdev->mddev, rdev); + mddev_destroy_serial_pool(rdev->mddev, rdev, false); rdev->mddev = NULL; sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); @@ -2887,10 +2979,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) } } else if (cmd_match(buf, "writemostly")) { set_bit(WriteMostly, &rdev->flags); - mddev_create_wb_pool(rdev->mddev, rdev, false); + mddev_create_serial_pool(rdev->mddev, rdev, false); err = 0; } else if (cmd_match(buf, "-writemostly")) { - mddev_destroy_wb_pool(rdev->mddev, rdev); + mddev_destroy_serial_pool(rdev->mddev, rdev, false); clear_bit(WriteMostly, &rdev->flags); err = 0; } else if (cmd_match(buf, "blocked")) { @@ -5276,6 +5368,57 @@ static struct md_sysfs_entry md_fail_last_dev = __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, fail_last_dev_store); +static ssize_t serialize_policy_show(struct mddev *mddev, char *page) +{ + if (mddev->pers == NULL || (mddev->pers->level != 1)) + return sprintf(page, "n/a\n"); + else + return sprintf(page, "%d\n", mddev->serialize_policy); +} + +/* + * Setting serialize_policy to true to enforce write IO is not reordered + * for raid1. + */ +static ssize_t +serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) +{ + int err; + bool value; + + err = kstrtobool(buf, &value); + if (err) + return err; + + if (value == mddev->serialize_policy) + return len; + + err = mddev_lock(mddev); + if (err) + return err; + if (mddev->pers == NULL || (mddev->pers->level != 1)) { + pr_err("md: serialize_policy is only effective for raid1\n"); + err = -EINVAL; + goto unlock; + } + + mddev_suspend(mddev); + if (value) + mddev_create_serial_pool(mddev, NULL, true); + else + mddev_destroy_serial_pool(mddev, NULL, true); + mddev->serialize_policy = value; + mddev_resume(mddev); +unlock: + mddev_unlock(mddev); + return err ?: len; +} + +static struct md_sysfs_entry md_serialize_policy = +__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, + serialize_policy_store); + + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_layout.attr, @@ -5293,6 +5436,7 @@ static struct attribute *md_default_attrs[] = { &max_corr_read_errors.attr, &md_consistency_policy.attr, &md_fail_last_dev.attr, + &md_serialize_policy.attr, NULL, }; @@ -5768,18 +5912,18 @@ int md_run(struct mddev *mddev) goto bitmap_abort; if (mddev->bitmap_info.max_write_behind > 0) { - bool creat_pool = false; + bool create_pool = false; rdev_for_each(rdev, mddev) { if (test_bit(WriteMostly, &rdev->flags) && - rdev_init_wb(rdev)) - creat_pool = true; - } - if (creat_pool && mddev->wb_info_pool == NULL) { - mddev->wb_info_pool = - mempool_create_kmalloc_pool(NR_WB_INFOS, - sizeof(struct wb_info)); - if (!mddev->wb_info_pool) { + rdev_init_serial(rdev)) + create_pool = true; + } + if (create_pool && mddev->serial_info_pool == NULL) { + mddev->serial_info_pool = + mempool_create_kmalloc_pool(NR_SERIAL_INFOS, + sizeof(struct serial_info)); + if (!mddev->serial_info_pool) { err = -ENOMEM; goto bitmap_abort; } @@ -6024,8 +6168,9 @@ static void __md_stop_writes(struct mddev *mddev) mddev->in_sync = 1; md_update_sb(mddev, 1); } - mempool_destroy(mddev->wb_info_pool); - mddev->wb_info_pool = NULL; + /* disable policy to guarantee rdevs free resources for serialization */ + mddev->serialize_policy = 0; + mddev_destroy_serial_pool(mddev, NULL, true); } void md_stop_writes(struct mddev *mddev) diff --git a/drivers/md/md.h b/drivers/md/md.h index 5f86f8adb0a4..acd681939112 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -32,6 +32,16 @@ * be retried. */ #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) + +/* + * The struct embedded in rdev is used to serialize IO. + */ +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + /* * MD's 'extended' device */ @@ -110,12 +120,7 @@ struct md_rdev { * in superblock. */ - /* - * The members for check collision of write behind IOs. - */ - struct list_head wb_list; - spinlock_t wb_list_lock; - wait_queue_head_t wb_io_wait; + struct serial_in_rdev *serial; /* used for raid1 io serialization */ struct work_struct del_work; /* used for delayed sysfs removal */ @@ -201,9 +206,9 @@ enum flag_bits { * it didn't fail, so don't use FailFast * any more for metadata */ - WBCollisionCheck, /* - * multiqueue device should check if there - * is collision between write behind bios. + CollisionCheck, /* + * check if there is collision between raid1 + * serial bios. */ }; @@ -263,12 +268,13 @@ enum mddev_sb_flags { MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ }; -#define NR_WB_INFOS 8 -/* record current range of write behind IOs */ -struct wb_info { - sector_t lo; - sector_t hi; - struct list_head list; +#define NR_SERIAL_INFOS 8 +/* record current range of serialize IOs */ +struct serial_info { + struct rb_node node; + sector_t start; /* start sector of rb node */ + sector_t last; /* end sector of rb node */ + sector_t _subtree_last; /* highest sector in subtree of rb node */ }; struct mddev { @@ -487,13 +493,14 @@ struct mddev { */ struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ - mempool_t *wb_info_pool; + mempool_t *serial_info_pool; void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; unsigned int good_device_nr; /* good device num within cluster raid */ bool has_superblocks:1; bool fail_last_dev:1; + bool serialize_policy:1; }; enum recovery_flags { @@ -737,8 +744,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); extern void md_kick_rdev_from_array(struct md_rdev * rdev); -extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, - bool is_suspend); +extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend); +extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev); diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 21ea537bd55e..eff04fa23dfa 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, struct btree_node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); - unsigned threshold = 2 * merge_threshold(left) + 1; + /* + * Ensure the number of entries in each child will be greater + * than or equal to (max_entries / 3 + 1), so no matter which + * child is used for removal, the number will still be not + * less than (max_entries / 3). + */ + unsigned int threshold = 2 * (merge_threshold(left) + 1); if (nr_left + nr_right < threshold) { /* diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index b7c20979bd19..322386ff5d22 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -87,7 +87,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) char b[BDEVNAME_SIZE]; char b2[BDEVNAME_SIZE]; struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); - unsigned short blksize = 512; + unsigned blksize = 512; *private_conf = ERR_PTR(-ENOMEM); if (!conf) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a409ab6f30bc..cd810e195086 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/ratelimit.h> +#include <linux/interval_tree_generic.h> #include <trace/events/block.h> @@ -50,55 +51,71 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr); #include "raid1-10.c" -static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) +INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last, + START, LAST, static inline, raid1_rb); + +static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio, + struct serial_info *si, int idx) { - struct wb_info *wi, *temp_wi; unsigned long flags; int ret = 0; - struct mddev *mddev = rdev->mddev; - - wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO); - - spin_lock_irqsave(&rdev->wb_list_lock, flags); - list_for_each_entry(temp_wi, &rdev->wb_list, list) { - /* collision happened */ - if (hi > temp_wi->lo && lo < temp_wi->hi) { - ret = -EBUSY; - break; - } + sector_t lo = r1_bio->sector; + sector_t hi = lo + r1_bio->sectors; + struct serial_in_rdev *serial = &rdev->serial[idx]; + + spin_lock_irqsave(&serial->serial_lock, flags); + /* collision happened */ + if (raid1_rb_iter_first(&serial->serial_rb, lo, hi)) + ret = -EBUSY; + else { + si->start = lo; + si->last = hi; + raid1_rb_insert(si, &serial->serial_rb); } - - if (!ret) { - wi->lo = lo; - wi->hi = hi; - list_add(&wi->list, &rdev->wb_list); - } else - mempool_free(wi, mddev->wb_info_pool); - spin_unlock_irqrestore(&rdev->wb_list_lock, flags); + spin_unlock_irqrestore(&serial->serial_lock, flags); return ret; } -static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) +static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio) +{ + struct mddev *mddev = rdev->mddev; + struct serial_info *si; + int idx = sector_to_idx(r1_bio->sector); + struct serial_in_rdev *serial = &rdev->serial[idx]; + + if (WARN_ON(!mddev->serial_info_pool)) + return; + si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); + wait_event(serial->serial_io_wait, + check_and_add_serial(rdev, r1_bio, si, idx) == 0); +} + +static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi) { - struct wb_info *wi; + struct serial_info *si; unsigned long flags; int found = 0; struct mddev *mddev = rdev->mddev; - - spin_lock_irqsave(&rdev->wb_list_lock, flags); - list_for_each_entry(wi, &rdev->wb_list, list) - if (hi == wi->hi && lo == wi->lo) { - list_del(&wi->list); - mempool_free(wi, mddev->wb_info_pool); + int idx = sector_to_idx(lo); + struct serial_in_rdev *serial = &rdev->serial[idx]; + + spin_lock_irqsave(&serial->serial_lock, flags); + for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi); + si; si = raid1_rb_iter_next(si, lo, hi)) { + if (si->start == lo && si->last == hi) { + raid1_rb_remove(si, &serial->serial_rb); + mempool_free(si, mddev->serial_info_pool); found = 1; break; } - + } if (!found) - WARN(1, "The write behind IO is not recorded\n"); - spin_unlock_irqrestore(&rdev->wb_list_lock, flags); - wake_up(&rdev->wb_io_wait); + WARN(1, "The write IO is not recorded for serialization\n"); + spin_unlock_irqrestore(&serial->serial_lock, flags); + wake_up(&serial->serial_io_wait); } /* @@ -430,6 +447,8 @@ static void raid1_end_write_request(struct bio *bio) int mirror = find_bio_disk(r1_bio, bio); struct md_rdev *rdev = conf->mirrors[mirror].rdev; bool discard_error; + sector_t lo = r1_bio->sector; + sector_t hi = r1_bio->sector + r1_bio->sectors; discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; @@ -499,12 +518,8 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { - if (test_bit(WBCollisionCheck, &rdev->flags)) { - sector_t lo = r1_bio->sector; - sector_t hi = r1_bio->sector + r1_bio->sectors; - - remove_wb(rdev, lo, hi); - } + if (test_bit(CollisionCheck, &rdev->flags)) + remove_serial(rdev, lo, hi); if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); @@ -527,7 +542,8 @@ static void raid1_end_write_request(struct bio *bio) call_bio_endio(r1_bio); } } - } + } else if (rdev->mddev->serialize_policy) + remove_serial(rdev, lo, hi); if (r1_bio->bios[mirror] == NULL) rdev_dec_pending(rdev, conf->mddev); @@ -1479,6 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, for (i = 0; i < disks; i++) { struct bio *mbio = NULL; + struct md_rdev *rdev = conf->mirrors[i].rdev; if (!r1_bio->bios[i]) continue; @@ -1506,18 +1523,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); if (r1_bio->behind_master_bio) { - struct md_rdev *rdev = conf->mirrors[i].rdev; - - if (test_bit(WBCollisionCheck, &rdev->flags)) { - sector_t lo = r1_bio->sector; - sector_t hi = r1_bio->sector + r1_bio->sectors; - - wait_event(rdev->wb_io_wait, - check_and_add_wb(rdev, lo, hi) == 0); - } + if (test_bit(CollisionCheck, &rdev->flags)) + wait_for_serialization(rdev, r1_bio); if (test_bit(WriteMostly, &rdev->flags)) atomic_inc(&r1_bio->behind_remaining); - } + } else if (mddev->serialize_policy) + wait_for_serialization(rdev, r1_bio); r1_bio->bios[i] = mbio; @@ -2782,7 +2793,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, write_targets++; } } - if (bio->bi_end_io) { + if (rdev && bio->bi_end_io) { atomic_inc(&rdev->nr_pending); bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio_set_dev(bio, rdev->bdev); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index cab5b1352892..d50238d0a85d 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -1360,7 +1360,7 @@ int ppl_init_log(struct r5conf *conf) return -EINVAL; } - max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) * + max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) * BITS_PER_BYTE; if (conf->raid_disks > max_disks) { pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n", diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f0fc538bfe59..ba00e9877f02 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) do_flush = false; } - if (!sh->batch_head) + if (!sh->batch_head || sh == sh->batch_head) set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); if ((!sh->batch_head || sh == sh->batch_head) && @@ -6598,7 +6598,6 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page) static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, - int *worker_cnt_per_group, struct r5worker_group **worker_groups); static ssize_t raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) @@ -6607,7 +6606,7 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) unsigned int new; int err; struct r5worker_group *new_groups, *old_groups; - int group_cnt, worker_cnt_per_group; + int group_cnt; if (len >= PAGE_SIZE) return -EINVAL; @@ -6630,13 +6629,11 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) if (old_groups) flush_workqueue(raid5_wq); - err = alloc_thread_groups(conf, new, - &group_cnt, &worker_cnt_per_group, - &new_groups); + err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); if (!err) { spin_lock_irq(&conf->device_lock); conf->group_cnt = group_cnt; - conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_cnt_per_group = new; conf->worker_groups = new_groups; spin_unlock_irq(&conf->device_lock); @@ -6672,16 +6669,13 @@ static struct attribute_group raid5_attrs_group = { .attrs = raid5_attrs, }; -static int alloc_thread_groups(struct r5conf *conf, int cnt, - int *group_cnt, - int *worker_cnt_per_group, +static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, struct r5worker_group **worker_groups) { int i, j, k; ssize_t size; struct r5worker *workers; - *worker_cnt_per_group = cnt; if (cnt == 0) { *group_cnt = 0; *worker_groups = NULL; @@ -6882,7 +6876,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) struct disk_info *disk; char pers_name[6]; int i; - int group_cnt, worker_cnt_per_group; + int group_cnt; struct r5worker_group *new_group; int ret; @@ -6928,10 +6922,9 @@ static struct r5conf *setup_conf(struct mddev *mddev) for (i = 0; i < PENDING_IO_MAX; i++) list_add(&conf->pending_data[i].sibling, &conf->free_list); /* Don't enable multi-threading by default*/ - if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, - &new_group)) { + if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { conf->group_cnt = group_cnt; - conf->worker_cnt_per_group = worker_cnt_per_group; + conf->worker_cnt_per_group = 0; conf->worker_groups = new_group; } else goto abort; diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 9340435a94a0..6c95dc471d4c 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -380,7 +380,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status) } else { list_del_init(&data->list); if (!(data->msg.tx_status & CEC_TX_STATUS_OK)) - data->adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + data->adap->transmit_queue_sz--; } if (data->msg.tx_status & CEC_TX_STATUS_OK) { @@ -432,6 +433,14 @@ static void cec_flush(struct cec_adapter *adap) * need to do anything special in that case. */ } + /* + * If something went wrong and this counter isn't what it should + * be, then this will reset it back to 0. Warn if it is not 0, + * since it indicates a bug, either in this framework or in a + * CEC driver. + */ + if (WARN_ON(adap->transmit_queue_sz)) + adap->transmit_queue_sz = 0; } /* @@ -456,7 +465,7 @@ int cec_thread_func(void *_adap) bool timeout = false; u8 attempts; - if (adap->transmitting) { + if (adap->transmit_in_progress) { int err; /* @@ -491,7 +500,7 @@ int cec_thread_func(void *_adap) goto unlock; } - if (adap->transmitting && timeout) { + if (adap->transmit_in_progress && timeout) { /* * If we timeout, then log that. Normally this does * not happen and it is an indication of a faulty CEC @@ -500,14 +509,18 @@ int cec_thread_func(void *_adap) * so much traffic on the bus that the adapter was * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s). */ - pr_warn("cec-%s: message %*ph timed out\n", adap->name, - adap->transmitting->msg.len, - adap->transmitting->msg.msg); + if (adap->transmitting) { + pr_warn("cec-%s: message %*ph timed out\n", adap->name, + adap->transmitting->msg.len, + adap->transmitting->msg.msg); + /* Just give up on this. */ + cec_data_cancel(adap->transmitting, + CEC_TX_STATUS_TIMEOUT); + } else { + pr_warn("cec-%s: transmit timed out\n", adap->name); + } adap->transmit_in_progress = false; adap->tx_timeouts++; - /* Just give up on this. */ - cec_data_cancel(adap->transmitting, - CEC_TX_STATUS_TIMEOUT); goto unlock; } @@ -522,7 +535,8 @@ int cec_thread_func(void *_adap) data = list_first_entry(&adap->transmit_queue, struct cec_data, list); list_del_init(&data->list); - adap->transmit_queue_sz--; + if (!WARN_ON(!data->adap->transmit_queue_sz)) + adap->transmit_queue_sz--; /* Make this the current transmitting message */ adap->transmitting = data; @@ -1085,11 +1099,11 @@ void cec_received_msg_ts(struct cec_adapter *adap, valid_la = false; else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED)) valid_la = false; - else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4)) + else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST)) valid_la = false; else if (cec_msg_is_broadcast(msg) && - adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 && - !(dir_fl & BCAST2_0)) + adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 && + !(dir_fl & BCAST1_4)) valid_la = false; } if (valid_la && min_len) { diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c index 04d51ca63223..4c8c96a35282 100644 --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c @@ -105,7 +105,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, if (nums[i-1] + 1 != nums[i]) goto fail_map; buf->vaddr = (__force void *) - ioremap_nocache(__pfn_to_phys(nums[0]), size + offset); + ioremap(__pfn_to_phys(nums[0]), size + offset); } else { buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, PAGE_KERNEL); diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index fd47bd07ffd8..2f1eeeb6e7c7 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -938,7 +938,7 @@ static int cx18_probe(struct pci_dev *pci_dev, /* map io memory */ CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE); - cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET, + cx->enc_mem = ioremap(cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE); if (!cx->enc_mem) { CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n"); diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 3f3f40ea890b..1f79700a6307 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -1042,7 +1042,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) /* map io memory */ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); - itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET, + itv->enc_mem = ioremap(itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE); if (!itv->enc_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n"); @@ -1056,7 +1056,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) if (itv->has_cx23415) { IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); - itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET, + itv->dec_mem = ioremap(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); if (!itv->dec_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n"); @@ -1075,7 +1075,7 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n", (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); itv->reg_mem = - ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); + ioremap(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (!itv->reg_mem) { IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n"); IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n"); diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 95a56cce9b65..1daf9e07cad7 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -37,7 +37,7 @@ #include <linux/ivtvfb.h> #ifdef CONFIG_X86_64 -#include <asm/pat.h> +#include <asm/memtype.h> #endif /* card parameters */ diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c index f299baf7cbe0..e06d113dfe96 100644 --- a/drivers/media/platform/davinci/dm355_ccdc.c +++ b/drivers/media/platform/davinci/dm355_ccdc.c @@ -883,7 +883,7 @@ static int dm355_ccdc_probe(struct platform_device *pdev) goto fail_nores; } - ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); + ccdc_cfg.base_addr = ioremap(res->start, resource_size(res)); if (!ccdc_cfg.base_addr) { status = -ENOMEM; goto fail_nomem; diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c index 2fc6c9c38f9c..c6378c4e0074 100644 --- a/drivers/media/platform/davinci/dm644x_ccdc.c +++ b/drivers/media/platform/davinci/dm644x_ccdc.c @@ -817,7 +817,7 @@ static int dm644x_ccdc_probe(struct platform_device *pdev) goto fail_nores; } - ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); + ccdc_cfg.base_addr = ioremap(res->start, resource_size(res)); if (!ccdc_cfg.base_addr) { status = -ENOMEM; goto fail_nomem; diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c index e2e7ab7b7f45..b49378b18e5d 100644 --- a/drivers/media/platform/davinci/isif.c +++ b/drivers/media/platform/davinci/isif.c @@ -1045,7 +1045,7 @@ static int isif_probe(struct platform_device *pdev) status = -EBUSY; goto fail_nobase_res; } - addr = ioremap_nocache(res->start, resource_size(res)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { status = -ENOMEM; goto fail_base_iomap; diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 97d660606d98..4dbdf3180d10 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -753,7 +753,7 @@ static const struct preview_update update_attrs[] = { preview_config_luma_enhancement, preview_enable_luma_enhancement, offsetof(struct prev_params, luma), - FIELD_SIZEOF(struct prev_params, luma), + sizeof_field(struct prev_params, luma), offsetof(struct omap3isp_prev_update_config, luma), }, /* OMAP3ISP_PREV_INVALAW */ { NULL, @@ -762,55 +762,55 @@ static const struct preview_update update_attrs[] = { preview_config_hmed, preview_enable_hmed, offsetof(struct prev_params, hmed), - FIELD_SIZEOF(struct prev_params, hmed), + sizeof_field(struct prev_params, hmed), offsetof(struct omap3isp_prev_update_config, hmed), }, /* OMAP3ISP_PREV_CFA */ { preview_config_cfa, NULL, offsetof(struct prev_params, cfa), - FIELD_SIZEOF(struct prev_params, cfa), + sizeof_field(struct prev_params, cfa), offsetof(struct omap3isp_prev_update_config, cfa), }, /* OMAP3ISP_PREV_CHROMA_SUPP */ { preview_config_chroma_suppression, preview_enable_chroma_suppression, offsetof(struct prev_params, csup), - FIELD_SIZEOF(struct prev_params, csup), + sizeof_field(struct prev_params, csup), offsetof(struct omap3isp_prev_update_config, csup), }, /* OMAP3ISP_PREV_WB */ { preview_config_whitebalance, NULL, offsetof(struct prev_params, wbal), - FIELD_SIZEOF(struct prev_params, wbal), + sizeof_field(struct prev_params, wbal), offsetof(struct omap3isp_prev_update_config, wbal), }, /* OMAP3ISP_PREV_BLKADJ */ { preview_config_blkadj, NULL, offsetof(struct prev_params, blkadj), - FIELD_SIZEOF(struct prev_params, blkadj), + sizeof_field(struct prev_params, blkadj), offsetof(struct omap3isp_prev_update_config, blkadj), }, /* OMAP3ISP_PREV_RGB2RGB */ { preview_config_rgb_blending, NULL, offsetof(struct prev_params, rgb2rgb), - FIELD_SIZEOF(struct prev_params, rgb2rgb), + sizeof_field(struct prev_params, rgb2rgb), offsetof(struct omap3isp_prev_update_config, rgb2rgb), }, /* OMAP3ISP_PREV_COLOR_CONV */ { preview_config_csc, NULL, offsetof(struct prev_params, csc), - FIELD_SIZEOF(struct prev_params, csc), + sizeof_field(struct prev_params, csc), offsetof(struct omap3isp_prev_update_config, csc), }, /* OMAP3ISP_PREV_YC_LIMIT */ { preview_config_yc_range, NULL, offsetof(struct prev_params, yclimit), - FIELD_SIZEOF(struct prev_params, yclimit), + sizeof_field(struct prev_params, yclimit), offsetof(struct omap3isp_prev_update_config, yclimit), }, /* OMAP3ISP_PREV_DEFECT_COR */ { preview_config_dcor, preview_enable_dcor, offsetof(struct prev_params, dcor), - FIELD_SIZEOF(struct prev_params, dcor), + sizeof_field(struct prev_params, dcor), offsetof(struct omap3isp_prev_update_config, dcor), }, /* Previously OMAP3ISP_PREV_GAMMABYPASS, not used anymore */ { NULL, @@ -828,13 +828,13 @@ static const struct preview_update update_attrs[] = { preview_config_noisefilter, preview_enable_noisefilter, offsetof(struct prev_params, nf), - FIELD_SIZEOF(struct prev_params, nf), + sizeof_field(struct prev_params, nf), offsetof(struct omap3isp_prev_update_config, nf), }, /* OMAP3ISP_PREV_GAMMA */ { preview_config_gammacorrn, preview_enable_gammacorrn, offsetof(struct prev_params, gamma), - FIELD_SIZEOF(struct prev_params, gamma), + sizeof_field(struct prev_params, gamma), offsetof(struct omap3isp_prev_update_config, gamma), }, /* OMAP3ISP_PREV_CONTRAST */ { preview_config_contrast, diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c index a99caac59f44..1ac0c70a5981 100644 --- a/drivers/media/platform/tegra-cec/tegra_cec.c +++ b/drivers/media/platform/tegra-cec/tegra_cec.c @@ -351,7 +351,7 @@ static int tegra_cec_probe(struct platform_device *pdev) if (cec->tegra_cec_irq <= 0) return -EBUSY; - cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start, + cec->cec_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!cec->cec_base) { diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index ac88ade94cda..59609556d969 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -116,6 +116,7 @@ struct pulse8 { unsigned int vers; struct completion cmd_done; struct work_struct work; + u8 work_result; struct delayed_work ping_eeprom_work; struct cec_msg rx_msg; u8 data[DATA_SIZE]; @@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work) { struct pulse8 *pulse8 = container_of(work, struct pulse8, work); + u8 result = pulse8->work_result; - switch (pulse8->data[0] & 0x3f) { + pulse8->work_result = 0; + switch (result & 0x3f) { case MSGCODE_FRAME_DATA: cec_received_msg(pulse8->adap, &pulse8->rx_msg); break; @@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, pulse8->escape = false; } else if (data == MSGEND) { struct cec_msg *msg = &pulse8->rx_msg; + u8 msgcode = pulse8->buf[0]; if (debug) dev_info(pulse8->dev, "received: %*ph\n", pulse8->idx, pulse8->buf); - pulse8->data[0] = pulse8->buf[0]; - switch (pulse8->buf[0] & 0x3f) { + switch (msgcode & 0x3f) { case MSGCODE_FRAME_START: msg->len = 1; msg->msg[0] = pulse8->buf[1]; @@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, if (msg->len == CEC_MAX_MSG_SIZE) break; msg->msg[msg->len++] = pulse8->buf[1]; - if (pulse8->buf[0] & MSGCODE_FRAME_EOM) + if (msgcode & MSGCODE_FRAME_EOM) { + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); + break; + } break; case MSGCODE_TRANSMIT_SUCCEEDED: case MSGCODE_TRANSMIT_FAILED_LINE: case MSGCODE_TRANSMIT_FAILED_ACK: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: + WARN_ON(pulse8->work_result); + pulse8->work_result = msgcode; schedule_work(&pulse8->work); break; case MSGCODE_HIGH_ERROR: diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 4e700583659b..003b7422aeef 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2652,7 +2652,7 @@ struct v4l2_ioctl_info { /* Zero struct from after the field to the end */ #define INFO_FL_CLEAR(v4l2_struct, field) \ ((offsetof(struct v4l2_struct, field) + \ - FIELD_SIZEOF(struct v4l2_struct, field)) << 16) + sizeof_field(struct v4l2_struct, field)) << 16) #define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) #define DEFINE_V4L_STUB_FUNC(_vidioc) \ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index f9ac22413000..1074b882c57c 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -100,19 +100,19 @@ struct buflist { * Function prototypes. Called from OS entry point mptctl_ioctl. * arg contents specific to function. */ -static int mptctl_fw_download(unsigned long arg); -static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); -static int mptctl_gettargetinfo(unsigned long arg); -static int mptctl_readtest(unsigned long arg); -static int mptctl_mpt_command(unsigned long arg); -static int mptctl_eventquery(unsigned long arg); -static int mptctl_eventenable(unsigned long arg); -static int mptctl_eventreport(unsigned long arg); -static int mptctl_replace_fw(unsigned long arg); - -static int mptctl_do_reset(unsigned long arg); -static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); -static int mptctl_hp_targetinfo(unsigned long arg); +static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg); + +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg); +static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd); +static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg); static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); static void mptctl_remove(struct pci_dev *); @@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); /* * Private function calls. */ -static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); -static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); +static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr); +static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen); static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, @@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { - return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); + return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { - return mptctl_gettargetinfo(arg); + return mptctl_gettargetinfo(iocp, arg); } else if (cmd == MPTTEST) { - return mptctl_readtest(arg); + return mptctl_readtest(iocp, arg); } else if (cmd == MPTEVENTQUERY) { - return mptctl_eventquery(arg); + return mptctl_eventquery(iocp, arg); } else if (cmd == MPTEVENTENABLE) { - return mptctl_eventenable(arg); + return mptctl_eventenable(iocp, arg); } else if (cmd == MPTEVENTREPORT) { - return mptctl_eventreport(arg); + return mptctl_eventreport(iocp, arg); } else if (cmd == MPTFWREPLACE) { - return mptctl_replace_fw(arg); + return mptctl_replace_fw(iocp, arg); } /* All of these commands require an interrupt or @@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; if (cmd == MPTFWDOWNLOAD) - ret = mptctl_fw_download(arg); + ret = mptctl_fw_download(iocp, arg); else if (cmd == MPTCOMMAND) - ret = mptctl_mpt_command(arg); + ret = mptctl_mpt_command(iocp, arg); else if (cmd == MPTHARDRESET) - ret = mptctl_do_reset(arg); + ret = mptctl_do_reset(iocp, arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) - ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); + ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) - ret = mptctl_hp_targetinfo(arg); + ret = mptctl_hp_targetinfo(iocp, arg); else ret = -EINVAL; @@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } -static int mptctl_do_reset(unsigned long arg) +static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; - MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " @@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg) return -EFAULT; } - if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", - __FILE__, __LINE__, krinfo.hdr.iocnum); - return -ENODEV; /* (-6) No such device or address */ - } - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); @@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_fw_download(unsigned long arg) +mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; @@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg) return -EFAULT; } - return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); + return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg) * -ENOMSG if FW upload returned bad status */ static int -mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) +mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; - MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; @@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; - if (mpt_verify_adapter(ioc, &iocp) < 0) { - printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", - ioc); - return -ENODEV; /* (-6) No such device or address */ - } else { - - /* Valid device. Get a message frame and construct the FW download message. - */ - if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) - return -EAGAIN; - } + /* Valid device. Get a message frame and construct the FW download message. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) + return -EAGAIN; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); @@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); - dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", - iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; @@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE * -ENODEV if no such device/adapter */ static int -mptctl_getiocinfo (unsigned long arg, unsigned int data_size) +mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; - int iocnum; unsigned int port; int cim_rev; struct scsi_device *sdev; @@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) return PTR_ERR(karg); } - if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - kfree(karg); - return -ENODEV; - } - /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " @@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) * -ENODEV if no such device/adapter */ static int -mptctl_gettargetinfo (unsigned long arg) +mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; - MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; - int iocnum; int numDevices = 0; int lun; int maxWordsLeft; @@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes @@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_readtest (unsigned long arg) +mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_test __user *uarg = (void __user *) arg; struct mpt_ioctl_test karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " @@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", ioc->name)); /* Fill in the data and return the structure to the calling @@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg) * -ENODEV if no such device/adapter */ static int -mptctl_eventquery (unsigned long arg) +mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " @@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; @@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventenable (unsigned long arg) +mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; - MPT_ADAPTER *ioc; - int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " @@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { @@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_eventreport (unsigned long arg) +mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; struct mpt_ioctl_eventreport karg; - MPT_ADAPTER *ioc; - int iocnum; int numBytes, maxEvents, max; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { @@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", ioc->name)); @@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mptctl_replace_fw (unsigned long arg) +mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; struct mpt_ioctl_replace_fw karg; - MPT_ADAPTER *ioc; - int iocnum; int newFwSize; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { @@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", ioc->name)); /* If caching FW, Free the old FW image @@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg) * -ENOMEM if memory allocation error */ static int -mptctl_mpt_command (unsigned long arg) +mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_command __user *uarg = (void __user *) arg; struct mpt_ioctl_command karg; - MPT_ADAPTER *ioc; - int iocnum; int rc; @@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - - rc = mptctl_do_mpt_command (karg, &uarg->MF); + rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF); return rc; } @@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg) * -EPERM if SCSI I/O and target is untagged */ static int -mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) +mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr) { - MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; @@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ - int iocnum, flagsLength; + int flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; @@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); @@ -2418,17 +2321,15 @@ done_free_mem: * -ENOMEM if memory allocation error */ static int -mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) +mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; - MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; - int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; @@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); @@ -2659,15 +2554,13 @@ retry_wait: * -ENOMEM if memory allocation error */ static int -mptctl_hp_targetinfo(unsigned long arg) +mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; - MPT_ADAPTER *ioc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; - int iocnum; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; @@ -2681,12 +2574,6 @@ mptctl_hp_targetinfo(unsigned long arg) return -EFAULT; } - if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || - (ioc == NULL)) { - printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", - __FILE__, __LINE__, iocnum); - return -ENODEV; - } if (karg.hdr.id >= MPT_MAX_FC_DEVICES) return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", @@ -2854,7 +2741,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); - ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); @@ -2908,7 +2795,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, /* Pass new structure to do_mpt_command */ - ret = mptctl_do_mpt_command (karg, &uarg->MF); + ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index fd7b2167103d..06038b325b02 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1512,7 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, bar = 1; len = pci_resource_len(pcidev, bar); base = pci_resource_start(pcidev, bar); - pcr->remap_addr = ioremap_nocache(base, len); + pcr->remap_addr = ioremap(base, len); if (!pcr->remap_addr) { ret = -ENOMEM; goto free_handle; diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 6d27ccfe0680..3c2d405bc79b 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) cdev = &edev->component[i]; if (cdev->dev == dev) { enclosure_remove_links(cdev); - device_del(&cdev->cdev); put_device(dev); cdev->dev = NULL; - return device_add(&cdev->cdev); + return 0; } } return -ENODEV; diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index 8850f475a413..0bf08678431b 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -824,8 +824,9 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) memset(args, 0, sizeof(*args)); if (rc < 0) { - dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n", - rc, seq); + dev_err_ratelimited(hdev->dev, + "Error %ld on waiting for CS handle %llu\n", + rc, seq); if (rc == -ERESTARTSYS) { args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED; rc = -EINTR; diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c index 17db7b3dfb4c..2df6fb87e7ff 100644 --- a/drivers/misc/habanalabs/context.c +++ b/drivers/misc/habanalabs/context.c @@ -176,7 +176,7 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq) spin_lock(&ctx->cs_lock); if (seq >= ctx->cs_sequence) { - dev_notice(hdev->dev, + dev_notice_ratelimited(hdev->dev, "Can't wait on seq %llu because current CS is at seq %llu\n", seq, ctx->cs_sequence); spin_unlock(&ctx->cs_lock); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index c8d16aa4382c..7344e8a222ae 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2192,7 +2192,7 @@ static int goya_push_linux_to_device(struct hl_device *hdev) static int goya_pldm_init_cpu(struct hl_device *hdev) { - u32 val, unit_rst_val; + u32 unit_rst_val; int rc; /* Must initialize SRAM scrambler before pushing u-boot to SRAM */ @@ -2200,14 +2200,14 @@ static int goya_pldm_init_cpu(struct hl_device *hdev) /* Put ARM cores into reset */ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); /* Reset the CA53 MACRO */ unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); rc = goya_push_uboot_to_device(hdev); if (rc) @@ -2228,7 +2228,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev) /* Release ARM core 0 from reset */ WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_CORE0_DEASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); return 0; } @@ -2502,13 +2502,12 @@ err: static int goya_hw_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; - u32 val; int rc; dev_info(hdev->dev, "Starting initialization of H/W\n"); /* Perform read from the device to make sure device is up */ - val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); /* * Let's mark in the H/W that we have reached this point. We check @@ -2560,7 +2559,7 @@ static int goya_hw_init(struct hl_device *hdev) goto disable_queues; /* Perform read from the device to flush all MSI-X configuration */ - val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); return 0; diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index a4fdad04809a..de87693cf557 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -278,7 +278,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void) void lkdtm_UNSET_SMEP(void) { -#ifdef CONFIG_X86_64 +#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) #define MOV_CR4_DEPTH 64 void (*direct_write_cr4)(unsigned long val); unsigned char *insn; @@ -338,13 +338,13 @@ void lkdtm_UNSET_SMEP(void) native_write_cr4(cr4); } #else - pr_err("FAIL: this test is x86_64-only\n"); + pr_err("XFAIL: this test is x86_64-only\n"); #endif } -#ifdef CONFIG_X86_32 void lkdtm_DOUBLE_FAULT(void) { +#ifdef CONFIG_X86_32 /* * Trigger #DF by setting the stack limit to zero. This clobbers * a GDT TLS slot, which is okay because the current task will die @@ -373,6 +373,8 @@ void lkdtm_DOUBLE_FAULT(void) asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); - panic("tried to double fault but didn't die\n"); -} + pr_err("FAIL: tried to double fault but didn't die\n"); +#else + pr_err("XFAIL: this test is ia32-only\n"); #endif +} diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c index c25fd40f3bd0..fcd999f50d14 100644 --- a/drivers/misc/mic/scif/scif_nodeqp.c +++ b/drivers/misc/mic/scif/scif_nodeqp.c @@ -788,7 +788,7 @@ scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg) "failed to setup interrupts for %d\n", msg->src.node); goto interrupt_setup_error; } - newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len); + newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len); if (!newdev->mmio.va) { dev_err(&scifdev->sdev->dev, "failed to map mmio for %d\n", msg->src.node); diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c index 994563a078eb..de8a66b9d76b 100644 --- a/drivers/misc/ocxl/context.c +++ b/drivers/misc/ocxl/context.c @@ -10,18 +10,17 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, int pasid; struct ocxl_context *ctx; - *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL); - if (!*context) + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) return -ENOMEM; - ctx = *context; - ctx->afu = afu; mutex_lock(&afu->contexts_lock); pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, afu->pasid_base + afu->pasid_max, GFP_KERNEL); if (pasid < 0) { mutex_unlock(&afu->contexts_lock); + kfree(ctx); return pasid; } afu->pasid_count++; @@ -43,6 +42,7 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, * duration of the life of the context */ ocxl_afu_get(afu); + *context = ctx; return 0; } EXPORT_SYMBOL_GPL(ocxl_context_alloc); diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 2870c25da166..4d1b44de1492 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -18,18 +18,15 @@ static struct class *ocxl_class; static struct mutex minors_idr_lock; static struct idr minors_idr; -static struct ocxl_file_info *find_file_info(dev_t devno) +static struct ocxl_file_info *find_and_get_file_info(dev_t devno) { struct ocxl_file_info *info; - /* - * We don't declare an RCU critical section here, as our AFU - * is protected by a reference counter on the device. By the time the - * info reference is removed from the idr, the ref count of - * the device is already at 0, so no user API will access that AFU and - * this function can't return it. - */ + mutex_lock(&minors_idr_lock); info = idr_find(&minors_idr, MINOR(devno)); + if (info) + get_device(&info->dev); + mutex_unlock(&minors_idr_lock); return info; } @@ -58,14 +55,16 @@ static int afu_open(struct inode *inode, struct file *file) pr_debug("%s for device %x\n", __func__, inode->i_rdev); - info = find_file_info(inode->i_rdev); + info = find_and_get_file_info(inode->i_rdev); if (!info) return -ENODEV; rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping); - if (rc) + if (rc) { + put_device(&info->dev); return rc; - + } + put_device(&info->dev); file->private_data = ctx; return 0; } @@ -487,7 +486,6 @@ static void info_release(struct device *dev) { struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev); - free_minor(info); ocxl_afu_put(info->afu); kfree(info); } @@ -577,6 +575,7 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu) ocxl_file_make_invisible(info); ocxl_sysfs_unregister_afu(info); + free_minor(info); device_unregister(&info->dev); } diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 359c5bab45ac..063e4419cd7e 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -834,7 +834,7 @@ static int pti_pci_probe(struct pci_dev *pdev, } drv_data->aperture_base = drv_data->pti_addr+APERTURE_14; drv_data->pti_ioaddr = - ioremap_nocache((u32)drv_data->aperture_base, + ioremap((u32)drv_data->aperture_base, APERTURE_LEN); if (!drv_data->pti_ioaddr) { retval = -ENOMEM; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 5e6be1527571..b837e7eba5f7 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -17,6 +17,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 95b41c0891d0..663d87924e5e 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) card->erase_arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, - 0); + card->ext_csd.generic_cmd6_time); } if (!err) err = mmc_erase(card, from, nr, card->erase_arg); @@ -1149,7 +1149,7 @@ retry: arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } @@ -1167,7 +1167,7 @@ retry: err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index abf8f5eb0a1c..aa54d359dab7 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work) } for (i = 0; i < ARRAY_SIZE(freqs); i++) { - if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) + unsigned int freq = freqs[i]; + if (freq > host->f_max) { + if (i + 1 < ARRAY_SIZE(freqs)) + continue; + freq = host->f_max; + } + if (!mmc_rescan_try_freq(host, max(freq, host->f_min))) break; if (freqs[i] <= host->f_min) break; @@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work) void mmc_start_host(struct mmc_host *host) { - host->f_init = max(freqs[0], host->f_min); + host->f_init = max(min(freqs[0], host->f_max), host->f_min); host->rescan_disable = 0; host->ios.power_mode = MMC_POWER_UNDEFINED; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 105b7a7c0251..c8768726d925 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host) struct device *dev = host->parent; u32 bus_width, drv_type, cd_debounce_delay_ms; int ret; - bool cd_cap_invert, cd_gpio_invert = false; - bool ro_cap_invert, ro_gpio_invert = false; if (!dev || !dev_fwnode(dev)) return 0; @@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host) */ /* Parse Card Detection */ + if (device_property_read_bool(dev, "non-removable")) { host->caps |= MMC_CAP_NONREMOVABLE; } else { - cd_cap_invert = device_property_read_bool(dev, "cd-inverted"); + if (device_property_read_bool(dev, "cd-inverted")) + host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; if (device_property_read_u32(dev, "cd-debounce-delay-ms", &cd_debounce_delay_ms)) @@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host) host->caps |= MMC_CAP_NEEDS_POLL; ret = mmc_gpiod_request_cd(host, "cd", 0, false, - cd_debounce_delay_ms * 1000, - &cd_gpio_invert); + cd_debounce_delay_ms * 1000); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) return ret; - - /* - * There are two ways to flag that the CD line is inverted: - * through the cd-inverted flag and by the GPIO line itself - * being inverted from the GPIO subsystem. This is a leftover - * from the times when the GPIO subsystem did not make it - * possible to flag a line as inverted. - * - * If the capability on the host AND the GPIO line are - * both inverted, the end result is that the CD line is - * not inverted. - */ - if (cd_cap_invert ^ cd_gpio_invert) - host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; } /* Parse Write Protection */ - ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); + if (device_property_read_bool(dev, "wp-inverted")) + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(host, "wp", 0, 0); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) @@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "disable-wp")) host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; - /* See the comment on CD inversion above */ - if (ro_cap_invert ^ ro_gpio_invert) - host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - if (device_property_read_bool(dev, "cap-sd-highspeed")) host->caps |= MMC_CAP_SD_HIGHSPEED; if (device_property_read_bool(dev, "cap-mmc-highspeed")) diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 09113b9ad679..da425ee2d9bf 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -19,7 +19,9 @@ #include "host.h" #include "mmc_ops.h" -#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ +#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ +#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ static const u8 tuning_blk_pattern_4bit[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, @@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, bool expired = false; bool busy = false; - /* We have an unspecified cmd timeout, use the fallback value. */ - if (!timeout_ms) - timeout_ms = MMC_OPS_TIMEOUT_MS; - /* * In cases when not allowed to poll by using CMD13 or because we aren't * capable of polling by using ->card_busy(), then rely on waiting the @@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, mmc_retune_hold(host); + if (!timeout_ms) { + pr_warn("%s: unspecified timeout for CMD6 - use generic\n", + mmc_hostname(host)); + timeout_ms = card->ext_csd.generic_cmd6_time; + } + /* - * If the cmd timeout and the max_busy_timeout of the host are both - * specified, let's validate them. A failure means we need to prevent - * the host from doing hw busy detection, which is done by converting - * to a R1 response instead of a R1B. + * If the max_busy_timeout of the host is specified, make sure it's + * enough to fit the used timeout_ms. In case it's not, let's instruct + * the host to avoid HW busy detection, by converting to a R1 response + * instead of a R1B. */ - if (timeout_ms && host->max_busy_timeout && - (timeout_ms > host->max_busy_timeout)) + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = MMC_SWITCH; @@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, cmd.flags = MMC_CMD_AC; if (use_r1b_resp) { cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; - /* - * A busy_timeout of zero means the host can decide to use - * whatever value it finds suitable. - */ cmd.busy_timeout = timeout_ms; } else { cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; @@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card) * urgent levels by using an asynchronous background task, when idle. */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS); + EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); if (err) pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); @@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card) (card->ext_csd.cache_size > 0) && (card->ext_csd.cache_ctrl & 1)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_FLUSH_CACHE, 1, 0); + EXT_CSD_FLUSH_CACHE, 1, + MMC_CACHE_FLUSH_TIMEOUT_MS); if (err) pr_err("%s: cache flush error %d\n", mmc_hostname(card->host), err); diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index da2596c5fa28..05e907451df9 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -19,7 +19,6 @@ struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; - bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; char *cd_label; @@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host) return -ENOSYS; cansleep = gpiod_cansleep(ctx->cd_gpio); - if (ctx->override_cd_active_level) { - int value = cansleep ? - gpiod_get_raw_value_cansleep(ctx->cd_gpio) : - gpiod_get_raw_value(ctx->cd_gpio); - return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); - } - return cansleep ? gpiod_get_value_cansleep(ctx->cd_gpio) : gpiod_get_value(ctx->cd_gpio); @@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); * @idx: index of the GPIO to obtain in the consumer * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, set - * to NULL to ignore * * Note that this must be called prior to mmc_add_host() * otherwise the caller must also call mmc_gpiod_request_cd_irq(). @@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); */ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce, bool *gpio_invert) + unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, ctx->cd_debounce_delay_ms = debounce / 1000; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + /* override forces default (active-low) polarity ... */ + if (override_active_level && !gpiod_is_active_low(desc)) + gpiod_toggle_active_low(desc); + + /* ... or active-high */ + if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); - ctx->override_cd_active_level = override_active_level; ctx->cd_gpio = desc; return 0; @@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, - * set to NULL to ignore * * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, - unsigned int debounce, bool *gpio_invert) + unsigned int idx, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, return ret; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); ctx->ro_gpio = desc; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index d06b2dfe3c95..3a5089f0332c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -501,6 +501,7 @@ config MMC_SDHCI_MSM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Secure Digital Host Controller Interface (SDHCI) support present in Qualcomm SOCs. The controller supports @@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB tristate "Broadcom SDIO/SD/MMC support" depends on ARCH_BRCMSTB || BMIPS_GENERIC depends on MMC_SDHCI_PLTFM + select MMC_CQHCI default y help This selects support for the SDIO/SD/MMC Host Controller on @@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP depends on MMC_SDHCI_PLTFM && OF select THERMAL imply TI_SOC_THERMAL + select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's DRA7 SOCs. The controller supports @@ -1040,3 +1043,6 @@ config MMC_OWL help This selects support for the SD/MMC Host Controller on Actions Semi Owl SoCs. + +config MMC_SDHCI_EXTERNAL_DMA + bool diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 6f065bb5c55a..aeaaa5314924 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev) { struct atmel_mci *host = dev_get_drvdata(dev); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); return clk_prepare_enable(host->mck); } diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bc8aeb47a7b4..8823680ca42c 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev) goto out2; } - r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!r) { - dev_err(&pdev->dev, "no IRQ defined\n"); + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) goto out3; - } - host->irq = r->start; mmc->ops = &au1xmmc_ops; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 99f61fd2a658..c3d949847cbd 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev) host->dma_chan = NULL; host->dma_desc = NULL; - host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx"); + host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx"); + if (IS_ERR(host->dma_chan_rxtx)) { + ret = PTR_ERR(host->dma_chan_rxtx); + host->dma_chan_rxtx = NULL; + + if (ret == -EPROBE_DEFER) + goto err; + + /* Ignore errors to fall back to PIO mode */ + } + clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index eee08d81b242..76013bbbcff3 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev, return ret; host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); - if (!host->base) - return -EINVAL; + if (!host->base) { + ret = -EINVAL; + goto error; + } /* On ThunderX these are identical */ host->dma_base = host->base; @@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev, host->reg_off_dma = 0x160; host->clk = devm_clk_get(dev, NULL); - if (IS_ERR(host->clk)) - return PTR_ERR(host->clk); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto error; + } ret = clk_prepare_enable(host->clk); if (ret) - return ret; + goto error; host->sys_freq = clk_get_rate(host->clk); spin_lock_init(&host->irq_handler_lock); @@ -157,6 +161,7 @@ error: } } clk_disable_unprepare(host->clk); + pci_release_regions(pdev); return ret; } @@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev) writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); clk_disable_unprepare(host->clk); + pci_release_regions(pdev); } static const struct pci_device_id thunder_mmc_id_table[] = { diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index ebfaeb33bc8c..f01fecd75833 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc) mmc->caps |= pdata->caps; /* Register a cd gpio, if there is not one, enable polling */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; else if (ret) mmc->caps |= MMC_CAP_NEEDS_POLL; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index fc9d4d000f97..bc5278ab5707 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host) if (!host->dms) return -ENOMEM; - host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); - if (!host->dms->ch) { + host->dms->ch = dma_request_chan(host->dev, "rx-tx"); + if (IS_ERR(host->dms->ch)) { + int ret = PTR_ERR(host->dms->ch); + dev_err(host->dev, "Failed to get external DMA channel.\n"); kfree(host->dms); host->dms = NULL; - return -ENXIO; + return ret; } return 0; diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 78383f60a3dc..fbae87d1f017 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev) static int jz4740_mmc_resume(struct device *dev) { - return pinctrl_pm_select_default_state(dev); + return pinctrl_select_default_state(dev); } static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e712315c7e8d..35400cf2a2e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -161,7 +161,6 @@ struct meson_host { bool dram_access_quirk; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_clk_gate; unsigned int bounce_buf_size; @@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host) u32 cfg; if (host->pins_clk_gate) - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(host->dev); /* Make sure the clock is not stopped in the controller */ cfg = readl(host->regs + SD_EMMC_CFG); @@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - ret = PTR_ERR(host->pins_default); - goto free_host; - } - host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, "clk-gate"); if (IS_ERR(host->pins_clk_gate)) { diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index ba9a63db73da..8b038e7b2cd3 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) struct platform_device *slot_pdev; struct mmc_host *mmc; struct meson_mx_mmc_host *host; - struct resource *res; int ret, irq; u32 conf; @@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(host->controller_dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto error_free_mmc; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 74c6cfbf9172..951f76dc1ddd 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host) * SPI protocol. Another is that when chipselect is released while * the card returns BUSY status, the clock must issue several cycles * with chipselect high before the card will stop driving its output. + * + * SPI_CS_HIGH means "asserted" here. In some cases like when using + * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically + * inverted by gpiolib, so if we want to ascertain to drive it high + * we should toggle the default with an XOR as we do here. */ - host->spi->mode |= SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Just warn; most cards work without it. */ dev_warn(&host->spi->dev, "can't change chip-select polarity\n"); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; } else { mmc_spi_readbytes(host, 18); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Wot, we can't get the same setup we had before? */ dev_err(&host->spi->dev, @@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi) * Index 0 is card detect * Old boardfiles were specifying 1 ms as debounce */ - status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL); + status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) { @@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi) mmc_detect_change(mmc, 0); /* Index 1 is write protect/read only */ - status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); + status = mmc_gpiod_request_ro(mmc, NULL, 1, 0); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 40e72c30ea84..e9ffce8d41ea 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = { .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = { .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = { .datacnt_useless = true, .datalength_bits = 25, .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, .stm32_idmabsize_mask = GENMASK(12, 5), .busy_timeout = true, .busy_detect = true, @@ -284,6 +289,7 @@ static struct variant_data variant_qcom = { .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, .pwrreg_powerup = MCI_PWR_UP, .f_max = 208000000, .explicit_mclk_control = true, @@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host) static int mmci_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; + if (!data) return 0; - - if (!is_power_of_2(data->blksz)) { + if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) { dev_err(mmc_dev(host->mmc), "unsupported block size (%d bytes)\n", data->blksz); return -EINVAL; @@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", data->sg_len, data->blksz, data->blocks, data->flags); - host->ops->dma_start(host, &datactrl); + ret = host->ops->dma_start(host, &datactrl); + if (ret) + return ret; /* Trigger the DMA transfer */ mmci_write_datactrlreg(host, datactrl); @@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host) host->dma_priv = dmae; - dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "rx"); - dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "tx"); + dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx"); + if (IS_ERR(dmae->rx_channel)) { + int ret = PTR_ERR(dmae->rx_channel); + dmae->rx_channel = NULL; + return ret; + } + + dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx"); + if (IS_ERR(dmae->tx_channel)) { + if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER) + dev_warn(mmc_dev(host->mmc), + "Deferred probe for TX channel ignored\n"); + dmae->tx_channel = NULL; + } /* * If only an RX channel is specified, the driver will @@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, if (data->blksz * data->blocks <= variant->fifosize) return -EINVAL; + /* + * This is necessary to get SDIO working on the Ux500. We do not yet + * know if this is a bug in: + * - The Ux500 DMA controller (DMA40) + * - The MMCI DMA interface on the Ux500 + * some power of two blocks (such as 64 bytes) are sent regularly + * during SDIO traffic and those work fine so for these we enable DMA + * transfers. + */ + if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz)) + return -EINVAL; + device = chan->device; nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); @@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host, int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) { struct mmci_dmae_priv *dmae = host->dma_priv; + int ret; host->dma_in_progress = true; - dmaengine_submit(dmae->desc_current); + ret = dma_submit_error(dmaengine_submit(dmae->desc_current)); + if (ret < 0) { + host->dma_in_progress = false; + return ret; + } dma_async_issue_pending(dmae->cur); *datactrl |= MCI_DPSM_DMAENABLE; @@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, } else if (host->variant->busy_timeout && busy_resp && status & MCI_DATATIMEOUT) { cmd->error = -ETIMEDOUT; + host->irq_action = IRQ_WAKE_THREAD; } else { cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); @@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, return; } } - mmci_request_end(host, host->mrq); + + if (host->irq_action != IRQ_WAKE_THREAD) + mmci_request_end(host, host->mrq); + } else if (sbc) { mmci_start_command(host, host->mrq->cmd, 0); } else if (!host->variant->datactrl_first && @@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; u32 status; - int ret = 0; spin_lock(&host->lock); + host->irq_action = IRQ_HANDLED; do { status = readl(host->base + MMCISTATUS); @@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) if (host->variant->busy_detect_flag) status &= ~host->variant->busy_detect_flag; - ret = 1; } while (status); spin_unlock(&host->lock); - return IRQ_RETVAL(ret); + return host->irq_action; +} + +/* + * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW. + * + * A reset is needed for some variants, where a datatimeout for a R1B request + * causes the DPSM to stay busy (non-functional). + */ +static irqreturn_t mmci_irq_thread(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + unsigned long flags; + + if (host->rst) { + reset_control_assert(host->rst); + udelay(2); + reset_control_deassert(host->rst); + } + + spin_lock_irqsave(&host->lock, flags); + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->pwr_reg, host->base + MMCIPOWER); + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + + host->irq_action = IRQ_HANDLED; + mmci_request_end(host, host->mrq); + spin_unlock_irqrestore(&host->lock, flags); + + return host->irq_action; } static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) pinctrl_select_state(host->pinctrl, host->pins_opendrain); else - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(mmc_dev(mmc)); } /* @@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev, goto host_free; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - dev_err(mmc_dev(mmc), "Can't select default pins\n"); - ret = PTR_ERR(host->pins_default); - goto host_free; - } - host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, MMCI_PINCTRL_STATE_OPENDRAIN); if (IS_ERR(host->pins_opendrain)) { @@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev, * silently of these do not exist */ if (!np) { - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto clk_disable; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) goto clk_disable; } - ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, - DRIVER_NAME " (cmd)", host); + ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq, + mmci_irq_thread, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); if (ret) goto clk_disable; @@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev) struct mmci_host *host = mmc_priv(mmc); clk_prepare_enable(host->clk); mmci_restore(host); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); } return 0; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 158e1231aa23..ea6a0b5779d4 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -279,7 +279,11 @@ struct mmci_host; * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @datactrl_mask_sdio: SDIO enable mask in datactrl register - * @datactrl_blksz: block size in power of two + * @datactrl_blocksz: block size in power of two + * @datactrl_any_blocksz: true if block any block sizes are accepted by + * hardware, such as with some SDIO traffic that send + * odd packets. + * @dma_power_of_2: DMA only works with blocks that are a power of 2. * @datactrl_first: true if data must be setup before send command * @datacnt_useless: true if you could not use datacnt register to read * remaining data @@ -326,6 +330,8 @@ struct variant_data { unsigned int datactrl_mask_ddrmode; unsigned int datactrl_mask_sdio; unsigned int datactrl_blocksz; + u8 datactrl_any_blocksz:1; + u8 dma_power_of_2:1; u8 datactrl_first:1; u8 datacnt_useless:1; u8 st_sdio:1; @@ -404,7 +410,6 @@ struct mmci_host { struct mmci_host_ops *ops; struct variant_data *variant; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_opendrain; u8 hw_designer; @@ -412,6 +417,7 @@ struct mmci_host { struct timer_list timer; unsigned int oldstat; + u32 irq_action; /* pio stuff */ struct sg_mapping_iter sg_miter; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 189e42674d85..7726dcf48f2c 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -228,6 +228,7 @@ #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ +#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */ #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ @@ -1881,6 +1882,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) /* select EMMC50 PAD CMD tune */ sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); + sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || mmc->ios.timing == MMC_TIMING_UHS_SDR104) @@ -2192,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev) if (ret) goto host_free; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(&pdev->dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto host_free; diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 74a0a7fbbf7f..203b61712601 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev) struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; const struct mbus_dram_target_info *dram; - struct resource *r; int ret, irq; if (!np) { dev_err(&pdev->dev, "no DT node\n"); return -ENODEV; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) + if (irq < 0) return -ENXIO; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); @@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev) spin_lock_init(&host->lock); - host->base = devm_ioremap_resource(&pdev->dev, r); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto out; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 011b59a3602e..b3d654c688e5 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev) mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); if (!host->pdata) { - host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); + host->dma = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(host->dma)) { + if (PTR_ERR(host->dma) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk_put; + } + + /* Ignore errors to fall back to PIO mode */ + host->dma = NULL; + } } else { res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (res) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 4031217d21c3..d82674aed447 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev) goto out_clk_disable; } - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_clk_disable; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 767e964ca5a2..a379c45b985c 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) ret = PTR_ERR(p); goto err_free_irq; } - if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { - dev_info(host->dev, "missing default pinctrl state\n"); - devm_pinctrl_put(p); - ret = -EINVAL; - goto err_free_irq; - } if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { dev_info(host->dev, "missing idle pinctrl state\n"); @@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev) if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); /* irq lost, if pinmux incorrect */ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); } else { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); } spin_unlock_irqrestore(&host->irq_lock, flags); return 0; diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 771e3d00f1bb..01ffe51f413d 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc"); - if (!owl_host->dma) { + owl_host->dma = dma_request_chan(&pdev->dev, "mmc"); + if (IS_ERR(owl_host->dma)) { dev_err(owl_host->dev, "Failed to get external DMA channel.\n"); - ret = -ENXIO; + ret = PTR_ERR(owl_host->dma); goto err_free_host; } diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 024acc1b0a2e..3a9333475a2b 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mmc); - host->dma_chan_rx = dma_request_slave_channel(dev, "rx"); - if (host->dma_chan_rx == NULL) { + host->dma_chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->dma_chan_rx)) { dev_err(dev, "unable to request rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_rx); + host->dma_chan_rx = NULL; goto out; } - host->dma_chan_tx = dma_request_slave_channel(dev, "tx"); - if (host->dma_chan_tx == NULL) { + host->dma_chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->dma_chan_tx)) { dev_err(dev, "unable to request tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_tx); + host->dma_chan_tx = NULL; goto out; } @@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev) } /* FIXME: should we pass detection delay to debounce? */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_cd\n"); goto out; } - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + if (!host->pdata->gpio_card_ro_invert) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_ro\n"); goto out; } - if (!ret) { + if (!ret) host->use_ro_gpio = true; - mmc->caps2 |= host->pdata->gpio_card_ro_invert ? - 0 : MMC_CAP2_RO_ACTIVE_HIGH; - } if (host->pdata->init) host->pdata->init(dev, pxamci_detect_irq, mmc); diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index c0504aa90857..f524251d5113 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -14,8 +14,8 @@ struct renesas_sdhi_scc { unsigned long clk_rate; /* clock rate for SDR104 */ - u32 tap; /* sampling clock position for SDR104 */ - u32 tap_hs400; /* sampling clock position for HS400 */ + u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */ + u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */ }; struct renesas_sdhi_of_data { @@ -33,6 +33,11 @@ struct renesas_sdhi_of_data { unsigned short max_segs; }; +struct renesas_sdhi_quirks { + bool hs400_disabled; + bool hs400_4taps; +}; + struct tmio_mmc_dma { enum dma_slave_buswidth dma_buswidth; bool (*filter)(struct dma_chan *chan, void *arg); @@ -46,6 +51,7 @@ struct renesas_sdhi { struct clk *clk_cd; struct tmio_mmc_data mmc_data; struct tmio_mmc_dma dma_priv; + const struct renesas_sdhi_quirks *quirks; struct pinctrl *pinctrl; struct pinctrl_state *pins_default, *pins_uhs; void __iomem *scc_ctl; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 234551a68739..35cb24cd45b4 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -46,11 +46,6 @@ #define SDHI_VER_GEN3_SD 0xcc10 #define SDHI_VER_GEN3_SDMMC 0xcd10 -struct renesas_sdhi_quirks { - bool hs400_disabled; - bool hs400_4taps; -}; - static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) { u32 val; @@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host) 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); - if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400) + if (priv->quirks && priv->quirks->hs400_4taps) sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set / 2); @@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host) static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host) { struct renesas_sdhi *priv = host_to_priv(host); - bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; /* * Skip checking SCC errors when running on 4 taps in HS400 mode as @@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = { }; static const struct soc_device_attribute sdhi_quirks_match[] = { + { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap }, { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, - { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 }, { /* Sentinel. */ }, }; @@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!priv) return -ENOMEM; + priv->quirks = quirks; mmc_data = &priv->mmc_data; dma_priv = &priv->dma_priv; @@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (quirks && quirks->hs400_disabled) host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); - if (quirks && quirks->hs400_4taps) - mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400; - /* For some SoC, we disable internal WP. GPIO may override this */ if (mmc_can_gpio_ro(host->mmc)) mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT; @@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) { const struct renesas_sdhi_scc *taps = of_data->taps; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; bool hit = false; for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { priv->scc_tappos = taps->tap; - priv->scc_tappos_hs400 = taps->tap_hs400; + priv->scc_tappos_hs400 = use_4tap ? + taps->tap_hs400_4tap : + taps->tap; hit = true; break; } } if (!hit) - dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n"); + dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); host->init_tuning = renesas_sdhi_init_tuning; host->prepare_tuning = renesas_sdhi_prepare_tuning; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 18839a10594c..47ac53e91241 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { .clk_rate = 0, .tap = 0x00000300, - .tap_hs400 = 0x00000704, + .tap_hs400_4tap = 0x00000100, }, }; @@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC * implementation as others may use a different implementation. */ -static const struct soc_device_attribute soc_whitelist[] = { - /* specific ones */ +static const struct soc_device_attribute soc_dma_quirks[] = { { .soc_id = "r7s9210", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, { .soc_id = "r8a7796", .revision = "ES1.0", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, - /* generic ones */ - { .soc_id = "r8a774a1" }, - { .soc_id = "r8a774b1" }, - { .soc_id = "r8a774c0" }, - { .soc_id = "r8a77470" }, - { .soc_id = "r8a7795" }, - { .soc_id = "r8a7796" }, - { .soc_id = "r8a77965" }, - { .soc_id = "r8a77970" }, - { .soc_id = "r8a77980" }, - { .soc_id = "r8a77990" }, - { .soc_id = "r8a77995" }, { /* sentinel */ } }; static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - const struct soc_device_attribute *soc = soc_device_match(soc_whitelist); + const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks); struct device *dev = &pdev->dev; - if (!soc) - return -ENODEV; - - global_flags |= (unsigned long)soc->data; + if (soc) + global_flags |= (unsigned long)soc->data; dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index bce9c33bc4b5..1e616ae56b13 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host) mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; /* If we get -ENOENT we have no card detect GPIO line */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for CD %d\n", ret); return ret; } - ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for WP %d\n", ret); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 105e73d4a3b9..9651dca6863e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -719,7 +719,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) goto err_free; } - host->ioaddr = devm_ioremap_nocache(dev, iomem->start, + host->ioaddr = devm_ioremap(dev, iomem->start, resource_size(iomem)); if (host->ioaddr == NULL) { err = -ENOMEM; @@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); - err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); if (err) { if (err == -EPROBE_DEFER) goto err_free; diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 73bb440aaf93..ad01f6451a95 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -9,29 +9,236 @@ #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/bitops.h> +#include <linux/delay.h> #include "sdhci-pltfm.h" +#include "cqhci.h" -static const struct sdhci_ops sdhci_brcmstb_ops = { +#define SDHCI_VENDOR 0x78 +#define SDHCI_VENDOR_ENHANCED_STRB 0x1 + +#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0) +#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1) + +#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 + +struct sdhci_brcmstb_priv { + void __iomem *cfg_regs; + bool has_cqe; +}; + +struct brcmstb_match_priv { + void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); + struct sdhci_ops *ops; + unsigned int flags; +}; + +static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + + u32 reg; + + dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n", + __func__); + reg = readl(host->ioaddr + SDHCI_VENDOR); + if (ios->enhanced_strobe) + reg |= SDHCI_VENDOR_ENHANCED_STRB; + else + reg &= ~SDHCI_VENDOR_ENHANCED_STRB; + writel(reg, host->ioaddr + SDHCI_VENDOR); +} + +static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + sdhci_enable_clk(host, clk); +} + +static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n", + __func__, timing); + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_SD_HS || + timing == MMC_TIMING_MMC_HS || + timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = { + .enable = sdhci_brcmstb_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_brcmstb_dumpregs, +}; + +static struct sdhci_ops sdhci_brcmstb_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = { +static struct sdhci_ops sdhci_brcmstb_ops_7216 = { + .set_clock = sdhci_brcmstb_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, +}; + +static struct brcmstb_match_priv match_priv_7425 = { + .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT | + BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; +static struct brcmstb_match_priv match_priv_7445 = { + .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .ops = &sdhci_brcmstb_ops, +}; + +static const struct brcmstb_match_priv match_priv_7216 = { + .hs400es = sdhci_brcmstb_hs400es, + .ops = &sdhci_brcmstb_ops_7216, +}; + +static const struct of_device_id sdhci_brcm_of_match[] = { + { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, + { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, + { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, + {}, +}; + +static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static int sdhci_brcmstb_add_host(struct sdhci_host *host, + struct sdhci_brcmstb_priv *priv) +{ + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!priv->has_cqe) + return sdhci_add_host(host); + + dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n"); + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(mmc_dev(host->mmc), + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR; + cq_host->ops = &sdhci_brcmstb_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) { + dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n"); + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; + } + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + static int sdhci_brcmstb_probe(struct platform_device *pdev) { - struct sdhci_host *host; + const struct brcmstb_match_priv *match_priv; + struct sdhci_pltfm_data brcmstb_pdata; struct sdhci_pltfm_host *pltfm_host; + const struct of_device_id *match; + struct sdhci_brcmstb_priv *priv; + struct sdhci_host *host; + struct resource *iomem; + bool has_cqe = false; struct clk *clk; int res; + match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node); + match_priv = match->data; + + dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; dev_err(&pdev->dev, "Clock not found in Device Tree\n"); clk = NULL; } @@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) if (res) return res; - host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0); + memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata)); + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + has_cqe = true; + match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; + } + brcmstb_pdata.ops = match_priv->ops; + host = sdhci_pltfm_init(pdev, &brcmstb_pdata, + sizeof(struct sdhci_brcmstb_priv)); if (IS_ERR(host)) { res = PTR_ERR(host); goto err_clk; } + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->has_cqe = has_cqe; + + /* Map in the non-standard CFG registers */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem); + if (IS_ERR(priv->cfg_regs)) { + res = PTR_ERR(priv->cfg_regs); + goto err; + } + sdhci_get_of_property(pdev); res = mmc_of_parse(host->mmc); if (res) goto err; /* + * If the chip has enhanced strobe and it's enabled, add + * callback + */ + if (match_priv->hs400es && + (host->mmc->caps2 & MMC_CAP2_HS400_ES)) + host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es; + + /* * Supply the existing CAPS, but clear the UHS modes. This * will allow these modes to be specified by device tree * properties through mmc_of_parse(). */ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); - if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci")) + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT) host->caps &= ~SDHCI_CAN_64BIT; host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | - SDHCI_SUPPORT_DDR50); - host->quirks |= SDHCI_QUIRK_MISSING_CAPS | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + SDHCI_SUPPORT_DDR50); + host->quirks |= SDHCI_QUIRK_MISSING_CAPS; + + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - res = sdhci_add_host(host); + res = sdhci_brcmstb_add_host(host, priv); if (res) goto err; - pltfm_host = sdhci_priv(host); pltfm_host->clk = clk; return res; @@ -79,11 +314,15 @@ err_clk: return res; } -static const struct of_device_id sdhci_brcm_of_match[] = { - { .compatible = "brcm,bcm7425-sdhci" }, - { .compatible = "brcm,bcm7445-sdhci" }, - {}, -}; +static void sdhci_brcmstb_shutdown(struct platform_device *pdev) +{ + int ret; + + ret = sdhci_pltfm_unregister(pdev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); static struct platform_driver sdhci_brcmstb_driver = { @@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = { }, .probe = sdhci_brcmstb_probe, .remove = sdhci_pltfm_unregister, + .shutdown = sdhci_brcmstb_shutdown, }; module_platform_driver(sdhci_brcmstb_driver); diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index ae0ec27dd7cc..5827d3751b81 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) return 0; } -static inline void *sdhci_cdns_priv(struct sdhci_host *host) +static void *sdhci_cdns_priv(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1c988d6a2433..382f25b2fa45 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = { struct pltfm_imx_data { u32 scratchpad; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_100mhz; struct pinctrl_state *pins_200mhz; const struct esdhc_soc_data *socdata; @@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host, dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); if (IS_ERR(imx_data->pinctrl) || - IS_ERR(imx_data->pins_default) || IS_ERR(imx_data->pins_100mhz) || IS_ERR(imx_data->pins_200mhz)) return -EINVAL; @@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host, break; default: /* back to default state for other legacy timing */ - pinctrl = imx_data->pins_default; + return pinctrl_select_default_state(mmc_dev(host->mmc)); } return pinctrl_select_state(imx_data->pinctrl, pinctrl); @@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); - if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) { + if (esdhc_is_usdhc(imx_data)) { imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, ESDHC_PINCTRL_STATE_100MHZ); imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, @@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, host->mmc->parent->platform_data); /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request write-protect gpio!\n"); return err; } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } /* card_detect */ switch (boarddata->cd_type) { case ESDHC_CD_GPIO: - err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request card-detect gpio!\n"); @@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } - imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(imx_data->pins_default)) - dev_warn(mmc_dev(host->mmc), "could not get default state\n"); - if (esdhc_is_usdhc(imx_data)) { host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c index a1aa21b9ae1c..92f30a1db435 100644 --- a/drivers/mmc/host/sdhci-milbeaut.c +++ b/drivers/mmc/host/sdhci-milbeaut.c @@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ret = 0; struct f_sdhost_priv *priv; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "%s: no irq specified\n", __func__); + if (irq < 0) return irq; - } host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) @@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) host->ops = &sdhci_milbeaut_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index b75c82d8d6c1..c3a160c18047 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -15,6 +15,7 @@ #include <linux/regulator/consumer.h> #include "sdhci-pltfm.h" +#include "cqhci.h" #define CORE_MCI_VERSION 0x50 #define CORE_VERSION_MAJOR_SHIFT 28 @@ -99,7 +100,7 @@ #define CORE_PWRSAVE_DLL BIT(3) -#define DDR_CONFIG_POR_VAL 0x80040853 +#define DDR_CONFIG_POR_VAL 0x80040873 #define INVALID_TUNING_PHASE -1 @@ -122,6 +123,10 @@ #define msm_host_writel(msm_host, val, host, offset) \ msm_host->var_ops->msm_writel_relaxed(val, host, offset) +/* CQHCI vendor specific registers */ +#define CQHCI_VENDOR_CFG1 0xA00 +#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) + struct sdhci_msm_offset { u32 core_hc_mode; u32 core_mci_data_cnt; @@ -148,8 +153,9 @@ struct sdhci_msm_offset { u32 core_ddr_200_cfg; u32 core_vendor_spec3; u32 core_dll_config_2; + u32 core_dll_config_3; + u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */ u32 core_ddr_config; - u32 core_ddr_config_2; }; static const struct sdhci_msm_offset sdhci_msm_v5_offset = { @@ -177,8 +183,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = { .core_ddr_200_cfg = 0x224, .core_vendor_spec3 = 0x250, .core_dll_config_2 = 0x254, - .core_ddr_config = 0x258, - .core_ddr_config_2 = 0x25c, + .core_dll_config_3 = 0x258, + .core_ddr_config = 0x25c, }; static const struct sdhci_msm_offset sdhci_msm_mci_offset = { @@ -207,8 +213,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = { .core_ddr_200_cfg = 0x184, .core_vendor_spec3 = 0x1b0, .core_dll_config_2 = 0x1b4, - .core_ddr_config = 0x1b8, - .core_ddr_config_2 = 0x1bc, + .core_ddr_config_old = 0x1b8, + .core_ddr_config = 0x1bc, }; struct sdhci_msm_variant_ops { @@ -253,6 +259,7 @@ struct sdhci_msm_host { const struct sdhci_msm_offset *offset; bool use_cdr; u32 transfer_mode; + bool updated_ddr_cfg; }; static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) @@ -924,8 +931,10 @@ out: static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) { struct mmc_host *mmc = host->mmc; - u32 dll_status, config; + u32 dll_status, config, ddr_cfg_offset; int ret; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); @@ -938,8 +947,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host) * bootloaders. In the future, if this changes, then the desired * values will need to be programmed appropriately. */ - writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + - msm_offset->core_ddr_config); + if (msm_host->updated_ddr_cfg) + ddr_cfg_offset = msm_offset->core_ddr_config; + else + ddr_cfg_offset = msm_offset->core_ddr_config_old; + writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset); if (mmc->ios.enhanced_strobe) { config = readl_relaxed(host->ioaddr + @@ -1560,6 +1572,127 @@ out: __sdhci_msm_set_clock(host, clock); } +/*****************************************************************************\ + * * + * MSM Command Queue Engine (CQE) * + * * +\*****************************************************************************/ + +static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + return 0; +} + +void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 ctrl; + + /* + * When CQE is halted, the legacy SDHCI path operates only + * on 16-byte descriptors in 64bit mode. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 16; + + spin_lock_irqsave(&host->lock, flags); + + /* + * During CQE command transfers, command complete bit gets latched. + * So s/w should clear command complete interrupt status when CQE is + * either halted or disabled. Otherwise unexpected SDCHI legacy + * interrupt gets triggered when CQE is halted/disabled. + */ + ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); + ctrl |= SDHCI_INT_RESPONSE; + sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); + + spin_unlock_irqrestore(&host->lock, flags); + + sdhci_cqe_disable(mmc, recovery); +} + +static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_msm_cqe_disable, +}; + +static int sdhci_msm_cqe_add_host(struct sdhci_host *host, + struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + u32 cqcfg; + int ret; + + /* + * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. + * So ensure ADMA table is allocated for 16byte descriptors. + */ + if (host->caps & SDHCI_CAN_64BIT) + host->alloc_desc_sz = 16; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = cqhci_pltfm_init(pdev); + if (IS_ERR(cq_host)) { + ret = PTR_ERR(cq_host); + dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); + goto cleanup; + } + + msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host->ops = &sdhci_msm_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) { + dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", + mmc_hostname(host->mmc), ret); + goto cleanup; + } + + /* Disable cqe reset due to cqe enable signal */ + cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); + cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; + cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); + + /* + * SDHC expects 12byte ADMA descriptors till CQE is enabled. + * So limit desc_sz to 12 so that the data commands that are sent + * during card initialization (before CQE gets enabled) would + * get executed without any issues. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 12; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + dev_info(&pdev->dev, "%s: CQE init: success\n", + mmc_hostname(host->mmc)); + return ret; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + /* * Platform specific register write functions. This is so that, if any * register write needs to be followed up by platform specific actions, @@ -1724,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .set_uhs_signaling = sdhci_msm_set_uhs_signaling, .write_w = sdhci_msm_writew, .write_b = sdhci_msm_writeb, + .irq = sdhci_msm_cqe_irq, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { @@ -1739,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; - struct resource *core_memres; struct clk *clk; int ret; u16 host_version, core_minor; @@ -1747,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) u8 core_major; const struct sdhci_msm_offset *msm_offset; const struct sdhci_msm_variant_info *var_info; + struct device_node *node = pdev->dev.of_node; host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); if (IS_ERR(host)) @@ -1840,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) } if (!msm_host->mci_removed) { - core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); - msm_host->core_mem = devm_ioremap_resource(&pdev->dev, - core_memres); - + msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(msm_host->core_mem)) { ret = PTR_ERR(msm_host->core_mem); goto clk_disable; @@ -1899,6 +2030,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_offset->core_vendor_spec_capabilities0); } + if (core_major == 1 && core_minor >= 0x49) + msm_host->updated_ddr_cfg = true; + /* * Power on reset state may trigger power irq if previous status of * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq @@ -1942,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(&pdev->dev); host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; - ret = sdhci_add_host(host); + if (of_property_read_bool(node, "supports-cqe")) + ret = sdhci_msm_cqe_add_host(host, pdev); + else + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; sdhci_msm_set_regulator_caps(msm_host); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 5959e394b416..ab2bd314a390 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -33,7 +33,14 @@ #define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */ +struct sdhci_at91_soc_data { + const struct sdhci_pltfm_data *pdata; + bool baseclk_is_generated_internally; + unsigned int divider_for_baseclk; +}; + struct sdhci_at91_priv { + const struct sdhci_at91_soc_data *soc_data; struct clk *hclock; struct clk *gck; struct clk *mainck; @@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_power = sdhci_at91_set_power, }; -static const struct sdhci_pltfm_data soc_data_sama5d2 = { +static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = { .ops = &sdhci_at91_sama5d2_ops, }; +static const struct sdhci_at91_soc_data soc_data_sama5d2 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = false, +}; + +static const struct sdhci_at91_soc_data soc_data_sam9x60 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = true, + .divider_for_baseclk = 2, +}; + static const struct of_device_id sdhci_at91_dt_match[] = { { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 }, + { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 }, {} }; MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match); @@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); - int ret; unsigned int caps0, caps1; unsigned int clk_base, clk_mul; - unsigned int gck_rate, real_gck_rate; + unsigned int gck_rate, clk_base_rate; unsigned int preset_div; - /* - * The mult clock is provided by as a generated clock by the PMC - * controller. In order to set the rate of gck, we have to get the - * base clock rate and the clock mult from capabilities. - */ clk_prepare_enable(priv->hclock); caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); - clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; - clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; - gck_rate = clk_base * 1000000 * (clk_mul + 1); - ret = clk_set_rate(priv->gck, gck_rate); - if (ret < 0) { - dev_err(dev, "failed to set gck"); - clk_disable_unprepare(priv->hclock); - return ret; - } - /* - * We need to check if we have the requested rate for gck because in - * some cases this rate could be not supported. If it happens, the rate - * is the closest one gck can provide. We have to update the value - * of clk mul. - */ - real_gck_rate = clk_get_rate(priv->gck); - if (real_gck_rate != gck_rate) { - clk_mul = real_gck_rate / (clk_base * 1000000) - 1; - caps1 &= (~SDHCI_CLOCK_MUL_MASK); - caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & - SDHCI_CLOCK_MUL_MASK); - /* Set capabilities in r/w mode. */ - writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, - host->ioaddr + SDMMC_CACR); - writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); - /* Set capabilities in ro mode. */ - writel(0, host->ioaddr + SDMMC_CACR); - dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n", - clk_mul, real_gck_rate); - } + + gck_rate = clk_get_rate(priv->gck); + if (priv->soc_data->baseclk_is_generated_internally) + clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk; + else + clk_base_rate = clk_get_rate(priv->mainck); + + clk_base = clk_base_rate / 1000000; + clk_mul = gck_rate / clk_base_rate - 1; + + caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK; + caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK; + caps1 &= ~SDHCI_CLOCK_MUL_MASK; + caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK; + /* Set capabilities in r/w mode. */ + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); + writel(caps0, host->ioaddr + SDHCI_CAPABILITIES); + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); + /* Set capabilities in ro mode. */ + writel(0, host->ioaddr + SDMMC_CACR); + + dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n", + clk_mul, gck_rate, clk_base_rate); /* * We have to set preset values because it depends on the clk_mul @@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev) * maximum sd clock value is 120 MHz instead of 208 MHz. For that * reason, we need to use presets to support SDR104. */ - preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR12); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR25); - preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR50); - preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR104); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_DDR50); @@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { static int sdhci_at91_probe(struct platform_device *pdev) { const struct of_device_id *match; - const struct sdhci_pltfm_data *soc_data; + const struct sdhci_at91_soc_data *soc_data; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_at91_priv *priv; @@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev) return -EINVAL; soc_data = match->data; - host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv)); + host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv)); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); priv = sdhci_pltfm_priv(pltfm_host); + priv->soc_data = soc_data; priv->mainck = devm_clk_get(&pdev->dev, "baseclk"); if (IS_ERR(priv->mainck)) { - dev_err(&pdev->dev, "failed to get baseclk\n"); - return PTR_ERR(priv->mainck); + if (soc_data->baseclk_is_generated_internally) { + priv->mainck = NULL; + } else { + dev_err(&pdev->dev, "failed to get baseclk\n"); + ret = PTR_ERR(priv->mainck); + goto sdhci_pltfm_free; + } } priv->hclock = devm_clk_get(&pdev->dev, "hclock"); if (IS_ERR(priv->hclock)) { dev_err(&pdev->dev, "failed to get hclock\n"); - return PTR_ERR(priv->hclock); + ret = PTR_ERR(priv->hclock); + goto sdhci_pltfm_free; } priv->gck = devm_clk_get(&pdev->dev, "multclk"); if (IS_ERR(priv->gck)) { dev_err(&pdev->dev, "failed to get multclk\n"); - return PTR_ERR(priv->gck); + ret = PTR_ERR(priv->gck); + goto sdhci_pltfm_free; } ret = sdhci_at91_set_clks_presets(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 5cca3fa4610b..5d8dd870bd44 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -80,6 +80,7 @@ struct sdhci_esdhc { bool quirk_tuning_erratum_type1; bool quirk_tuning_erratum_type2; bool quirk_ignore_data_inhibit; + bool quirk_delay_before_data_reset; bool in_sw_tuning; unsigned int peripheral_clock; const struct esdhc_clk_fixup *clk_fixup; @@ -172,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host, u16 ret; int shift = (spec_reg & 0x2) * 8; + if (spec_reg == SDHCI_TRANSFER_MODE) + return pltfm_host->xfer_mode_shadow; + if (spec_reg == SDHCI_HOST_VERSION) ret = value & 0xffff; else @@ -561,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) static void esdhc_clock_enable(struct sdhci_host *host, bool enable) { - u32 val; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); ktime_t timeout; + u32 val, clk_en; + + clk_en = ESDHC_CLOCK_SDCLKEN; + + /* + * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version + * is 2.2 or lower. + */ + if (esdhc->vendor_ver <= VENDOR_V_22) + clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | + ESDHC_CLOCK_PEREN); val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); if (enable) - val |= ESDHC_CLOCK_SDCLKEN; + val |= clk_en; else - val &= ~ESDHC_CLOCK_SDCLKEN; + val &= ~clk_en; sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - /* Wait max 20 ms */ + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (1) { + while (esdhc->vendor_ver > VENDOR_V_22) { bool timedout = ktime_after(ktime_get(), timeout); - if (sdhci_readl(host, ESDHC_PRSSTAT) & val) + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) break; if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); break; } - udelay(10); + usleep_range(10, 20); } } @@ -620,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - int pre_div = 1; - int div = 1; - int division; + unsigned int pre_div = 1, div = 1; + unsigned int clock_fixup = 0; ktime_t timeout; - long fixup = 0; u32 temp; - host->mmc->actual_clock = 0; - if (clock == 0) { + host->mmc->actual_clock = 0; esdhc_clock_enable(host, false); return; } - /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ + /* Start pre_div at 2 for vendor version < 2.3. */ if (esdhc->vendor_ver < VENDOR_V_23) pre_div = 2; + /* Fix clock value. */ if (host->mmc->card && mmc_card_sd(host->mmc->card) && - esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) - fixup = esdhc->clk_fixup->sd_dflt_max_clk; + esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) + clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk; else if (esdhc->clk_fixup) - fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; + clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; - if (fixup && clock > fixup) - clock = fixup; + if (clock_fixup == 0 || clock < clock_fixup) + clock_fixup = clock; - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | - ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + /* Calculate pre_div and div. */ + while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256) pre_div *= 2; - while (host->max_clk / pre_div / div > clock && div < 16) + while (host->max_clk / pre_div / div > clock_fixup && div < 16) div++; + esdhc->div_ratio = pre_div * div; + + /* Limit clock division for HS400 200MHz clock for quirk. */ if (esdhc->quirk_limited_clk_division && clock == MMC_HS200_MAX_DTR && (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 || host->flags & SDHCI_HS400_TUNING)) { - division = pre_div * div; - if (division <= 4) { + if (esdhc->div_ratio <= 4) { pre_div = 4; div = 1; - } else if (division <= 8) { + } else if (esdhc->div_ratio <= 8) { pre_div = 4; div = 2; - } else if (division <= 12) { + } else if (esdhc->div_ratio <= 12) { pre_div = 4; div = 3; } else { pr_warn("%s: using unsupported clock division.\n", mmc_hostname(host->mmc)); } + esdhc->div_ratio = pre_div * div; } + host->mmc->actual_clock = host->max_clk / esdhc->div_ratio; + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); - host->mmc->actual_clock = host->max_clk / pre_div / div; - esdhc->div_ratio = pre_div * div; + clock, host->mmc->actual_clock); + + /* Set clock division into register. */ pre_div >>= 1; div--; + esdhc_clock_enable(host, false); + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); + temp &= ~ESDHC_CLOCK_MASK; + temp |= ((div << ESDHC_DIVIDER_SHIFT) | + (pre_div << ESDHC_PREDIV_SHIFT)); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ + timeout = ktime_add_ms(ktime_get(), 20); + while (esdhc->vendor_ver > VENDOR_V_22) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + usleep_range(10, 20); + } + + /* Additional setting for HS400. */ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && clock == MMC_HS200_MAX_DTR) { temp = sdhci_readl(host, ESDHC_TBCTL); @@ -710,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) esdhc_clock_enable(host, false); esdhc_flush_async_fifo(host); } - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - while (1) { - bool timedout = ktime_after(ktime_get(), timeout); - - if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) - break; - if (timedout) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - return; - } - udelay(10); - } - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= ESDHC_CLOCK_SDCLKEN; - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + esdhc_clock_enable(host, true); } static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) @@ -757,21 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u32 val; + u32 val, bus_width = 0; + + /* + * Add delay to make sure all the DMA transfers are finished + * for quirk. + */ + if (esdhc->quirk_delay_before_data_reset && + (mask & SDHCI_RESET_DATA) && + (host->flags & SDHCI_REQ_USE_DMA)) + mdelay(5); + + /* + * Save bus-width for eSDHC whose vendor version is 2.2 + * or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; + } sdhci_reset(host, mask); - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + /* + * Restore bus-width setting and interrupt registers for eSDHC + * whose vendor version is 2.2 or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + val &= ~ESDHC_CTRL_BUSWIDTH_MASK; + val |= bus_width; + sdhci_writel(host, val, ESDHC_PROCTL); - if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) - mdelay(5); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } - if (mask & SDHCI_RESET_ALL) { + /* + * Some bits have to be cleaned manually for eSDHC whose spec + * version is higher than 3.0 for all reset. + */ + if ((mask & SDHCI_RESET_ALL) && + (esdhc->spec_ver >= SDHCI_SPEC_300)) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; sdhci_writel(host, val, ESDHC_TBCTL); + /* + * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to + * 0 for quirk. + */ if (esdhc->quirk_unreliable_pulse_detection) { val = sdhci_readl(host, ESDHC_DLLCFG1); val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; @@ -851,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc, } static struct soc_device_attribute soc_tuning_erratum_type1[] = { - { .family = "QorIQ T1023", .revision = "1.0", }, - { .family = "QorIQ T1040", .revision = "1.0", }, - { .family = "QorIQ T2080", .revision = "1.0", }, - { .family = "QorIQ LS1021A", .revision = "1.0", }, + { .family = "QorIQ T1023", }, + { .family = "QorIQ T1040", }, + { .family = "QorIQ T2080", }, + { .family = "QorIQ LS1021A", }, { }, }; static struct soc_device_attribute soc_tuning_erratum_type2[] = { - { .family = "QorIQ LS1012A", .revision = "1.0", }, - { .family = "QorIQ LS1043A", .revision = "1.*", }, - { .family = "QorIQ LS1046A", .revision = "1.0", }, - { .family = "QorIQ LS1080A", .revision = "1.0", }, - { .family = "QorIQ LS2080A", .revision = "1.0", }, - { .family = "QorIQ LA1575A", .revision = "1.0", }, + { .family = "QorIQ LS1012A", }, + { .family = "QorIQ LS1043A", }, + { .family = "QorIQ LS1046A", }, + { .family = "QorIQ LS1080A", }, + { .family = "QorIQ LS2080A", }, + { .family = "QorIQ LA1575A", }, { }, }; @@ -885,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable) esdhc_clock_enable(host, true); } -static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, +static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start, u8 *window_end) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u8 tbstat_15_8, tbstat_7_0; u32 val; - if (esdhc->quirk_tuning_erratum_type1) { - *window_start = 5 * esdhc->div_ratio; - *window_end = 3 * esdhc->div_ratio; - return; - } - /* Write TBCTL[11:8]=4'h8 */ val = sdhci_readl(host, ESDHC_TBCTL); val &= ~(0xf << 8); @@ -917,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, val = sdhci_readl(host, ESDHC_TBSTAT); val = sdhci_readl(host, ESDHC_TBSTAT); + *window_end = val & 0xff; + *window_start = (val >> 8) & 0xff; +} + +static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, + u8 *window_end) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 start_ptr, end_ptr; + + if (esdhc->quirk_tuning_erratum_type1) { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + return; + } + + esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr); + /* Reset data lines by setting ESDHCCTL[RSTD] */ sdhci_reset(host, SDHCI_RESET_DATA); /* Write 32'hFFFF_FFFF to IRQSTAT register */ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS); - /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio - * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio, + /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2 + * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2, * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio. */ - tbstat_7_0 = val & 0xff; - tbstat_15_8 = (val >> 8) & 0xff; - if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) { + if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) { *window_start = 8 * esdhc->div_ratio; *window_end = 4 * esdhc->div_ratio; } else { @@ -1003,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) if (ret) break; + /* For type2 affected platforms of the tuning erratum, + * tuning may succeed although eSDHC might not have + * tuned properly. Need to check tuning window. + */ + if (esdhc->quirk_tuning_erratum_type2 && + !host->tuning_err) { + esdhc_tuning_window_ptr(host, &window_start, + &window_end); + if (abs(window_start - window_end) > + (4 * esdhc->div_ratio + 2)) + host->tuning_err = -EAGAIN; + } + /* If HW tuning fails and triggers erratum, * try workaround. */ @@ -1221,6 +1299,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) if (match) esdhc->clk_fixup = match->data; np = pdev->dev.of_node; + + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) + esdhc->quirk_delay_before_data_reset = true; + clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { /* @@ -1231,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) * 1/2 peripheral clock. */ if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") || - of_device_is_compatible(np, "fsl,ls1028a-esdhc")) + of_device_is_compatible(np, "fsl,ls1028a-esdhc") || + of_device_is_compatible(np, "fsl,ls1088a-esdhc")) esdhc->peripheral_clock = clk_get_rate(clk) / 2; else esdhc->peripheral_clock = clk_get_rate(clk); @@ -1303,8 +1386,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) { - host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST; - host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST; + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; } if (of_device_is_compatible(np, "fsl,p5040-esdhc") || diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 083e7e053c95..882053151a47 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -7,6 +7,7 @@ */ #include <linux/delay.h> +#include <linux/mmc/mmc.h> #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/of.h> @@ -85,6 +86,7 @@ /* sdhci-omap controller flags */ #define SDHCI_OMAP_REQUIRE_IODELAY BIT(0) +#define SDHCI_OMAP_SPECIAL_RESET BIT(1) struct sdhci_omap_data { u32 offset; @@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host) struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); - reg |= CON_DMA_MASTER; + reg &= ~CON_DMA_MASTER; + /* Switch to DMA slave mode when using external DMA */ + if (!host->use_external_dma) + reg |= CON_DMA_MASTER; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); return 0; @@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host, sdhci_omap_start_clock(omap_host); } +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long limit = MMC_TIMEOUT_US; + unsigned long i = 0; /* Don't reset data lines during tuning operation */ if (omap_host->is_tuning) mask &= ~SDHCI_RESET_DATA; + if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) { + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); + while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) && + (i++ < limit)) + udelay(1); + i = 0; + while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) && + (i++ < limit)) + udelay(1); + + if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) + dev_err(mmc_dev(host->mmc), + "Timeout waiting on controller reset in %s\n", + __func__); + return; + } + sdhci_reset(host, mask); } @@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) return intmask; } +static void sdhci_omap_set_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_ERASE) + sdhci_set_data_timeout_irq(host, false); + + __sdhci_set_timeout(host, cmd); +} + static struct sdhci_ops sdhci_omap_ops = { .set_clock = sdhci_omap_set_clock, .set_power = sdhci_omap_set_power, @@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = { .reset = sdhci_omap_reset, .set_uhs_signaling = sdhci_omap_set_uhs_signaling, .irq = sdhci_omap_irq, + .set_timeout = sdhci_omap_set_timeout, }; static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) @@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = { .offset = 0x200, }; +static const struct sdhci_omap_data am335_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + +static const struct sdhci_omap_data am437_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + static const struct sdhci_omap_data dra7_data = { .offset = 0x200, .flags = SDHCI_OMAP_REQUIRE_IODELAY, @@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = { static const struct of_device_id omap_sdhci_match[] = { { .compatible = "ti,dra7-sdhci", .data = &dra7_data }, { .compatible = "ti,k2g-sdhci", .data = &k2g_data }, + { .compatible = "ti,am335-sdhci", .data = &am335_data }, + { .compatible = "ti,am437-sdhci", .data = &am437_data }, {}, }; MODULE_DEVICE_TABLE(of, omap_sdhci_match); @@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) const struct of_device_id *match; struct sdhci_omap_data *data; const struct soc_device_attribute *soc; + struct resource *regs; match = of_match_device(omap_sdhci_match, dev); if (!match) @@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) } offset = data->offset; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata, sizeof(*omap_host)); if (IS_ERR(host)) { @@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) omap_host->timing = MMC_TIMING_LEGACY; omap_host->flags = data->flags; host->ioaddr += offset; + host->mapbase = regs->start + offset; mmc = host->mmc; sdhci_get_of_property(pdev); @@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning; host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq; + /* Switch to external DMA only if there is the "dmas" property */ + if (of_find_property(dev->of_node, "dmas", NULL)) + sdhci_switch_external_dma(host, true); + ret = sdhci_setup_host(host); if (ret) goto err_put_sync; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index acefb76b4e15..525de2454a4d 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -27,6 +27,7 @@ #include <linux/mmc/slot-gpio.h> #include <linux/mmc/sdhci-pci-data.h> #include <linux/acpi.h> +#include <linux/dmi.h> #ifdef CONFIG_X86 #include <asm/iosf_mbi.h> @@ -783,11 +784,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) return 0; } +static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && + dmi_match(DMI_BIOS_VENDOR, "LENOVO"); +} + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); - slot->host->mmc->caps2 |= MMC_CAP2_CQE; + if (!glk_broken_cqhci(slot)) + slot->host->mmc->caps2 |= MMC_CAP2_CQE; if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, @@ -1983,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( if (slot->cd_idx >= 0) { ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, - slot->cd_override_level, 0, NULL); + slot->cd_override_level, 0); if (ret && ret != -EPROBE_DEFER) ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, slot->cd_override_level, - 0, NULL); + 0); if (ret == -EPROBE_DEFER) goto remove; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 51e096f27388..64200c78e90d 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -117,7 +117,6 @@ struct sdhci_s3c { struct s3c_sdhci_platdata *pdata; int cur_clk; int ext_cd_irq; - int ext_cd_gpio; struct clk *clk_io; struct clk *clk_bus[MAX_BUS_CLK]; @@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sdhci_host *host; struct sdhci_s3c *sc; - struct resource *res; int ret, irq, ptr, clks; if (!pdev->dev.platform_data && !pdev->dev.of_node) { @@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_pdata_io_clk; } else { memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); - sc->ext_cd_gpio = -1; /* invalid gpio number */ } drv_data = sdhci_s3c_get_driver_data(pdev); @@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_no_busclks; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err_req_regs; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index e43143223320..f4b05dd6c20a 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev) * We must request the IRQ after sdhci_add_host(), as the tasklet only * gets setup in sdhci_add_host() and we oops. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto err_request_cd; if (!ret) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 916b5b09c3d1..b4b63089a4e2 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = { static int sdhci_probe(struct platform_device *pdev) { struct sdhci_host *host; - struct resource *iomem; struct spear_sdhci *sdhci; struct device *dev; int ret; @@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev) goto err; } - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret); @@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev) * It is optional to use GPIOs for sdhci card detection. If we * find a descriptor using slot GPIO, we use it. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto disable_clk; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 7bc950520fd9..403ac44a7378 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; - if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3140fe2e5dba..63db84481dff 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -10,6 +10,7 @@ */ #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/ktime.h> #include <linux/highmem.h> #include <linux/io.h> @@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } -static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) { if (enable) host->ier |= SDHCI_INT_DATA_TIMEOUT; @@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } +EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); -static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - u8 count; - - if (host->ops->set_timeout) { - host->ops->set_timeout(host, cmd); - } else { - bool too_big = false; - - count = sdhci_calc_timeout(host, cmd, &too_big); - - if (too_big && - host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { - sdhci_calc_sw_timeout(host, cmd); - sdhci_set_data_timeout_irq(host, false); - } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { - sdhci_set_data_timeout_irq(host, true); - } - - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + bool too_big = false; + u8 count = sdhci_calc_timeout(host, cmd, &too_big); + + if (too_big && + host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { + sdhci_calc_sw_timeout(host, cmd); + sdhci_set_data_timeout_irq(host, false); + } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { + sdhci_set_data_timeout_irq(host, true); } + + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); } +EXPORT_SYMBOL_GPL(__sdhci_set_timeout); -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - struct mmc_data *data = cmd->data; - - host->data_timeout = 0; - - if (sdhci_data_line_cmd(cmd)) - sdhci_set_timeout(host, cmd); - - if (!data) - return; + if (host->ops->set_timeout) + host->ops->set_timeout(host, cmd); + else + __sdhci_set_timeout(host, cmd); +} +static void sdhci_initialize_data(struct sdhci_host *host, + struct mmc_data *data) +{ WARN_ON(host->data); /* Sanity checks */ @@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) host->data = data; host->data_early = 0; host->data->bytes_xfered = 0; +} + +static inline void sdhci_set_block_info(struct sdhci_host *host, + struct mmc_data *data) +{ + /* Set the DMA boundary value and block size */ + sdhci_writew(host, + SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), + SDHCI_BLOCK_SIZE); + /* + * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count + * can be supported, in that case 16-bit block count register must be 0. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { + if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) + sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); + sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); + } else { + sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + } +} + +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { struct scatterlist *sg; @@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_set_transfer_irqs(host); - /* Set the DMA boundary value and block size */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), - SDHCI_BLOCK_SIZE); + sdhci_set_block_info(host, data); +} - /* - * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count - * can be supported, in that case 16-bit block count register must be 0. - */ - if (host->version >= SDHCI_SPEC_410 && host->v4_mode && - (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { - if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) - sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); - sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + +static int sdhci_external_dma_init(struct sdhci_host *host) +{ + int ret = 0; + struct mmc_host *mmc = host->mmc; + + host->tx_chan = dma_request_chan(mmc->parent, "tx"); + if (IS_ERR(host->tx_chan)) { + ret = PTR_ERR(host->tx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request TX DMA channel.\n"); + host->tx_chan = NULL; + return ret; + } + + host->rx_chan = dma_request_chan(mmc->parent, "rx"); + if (IS_ERR(host->rx_chan)) { + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + ret = PTR_ERR(host->rx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request RX DMA channel.\n"); + host->rx_chan = NULL; + } + + return ret; +} + +static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + +static int sdhci_external_dma_setup(struct sdhci_host *host, + struct mmc_command *cmd) +{ + int ret, i; + enum dma_transfer_direction dir; + struct dma_async_tx_descriptor *desc; + struct mmc_data *data = cmd->data; + struct dma_chan *chan; + struct dma_slave_config cfg; + dma_cookie_t cookie; + int sg_cnt; + + if (!host->mapbase) + return -EINVAL; + + cfg.src_addr = host->mapbase + SDHCI_BUFFER; + cfg.dst_addr = host->mapbase + SDHCI_BUFFER; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + /* Sanity check: all the SG entries must be aligned by block size. */ + for (i = 0; i < data->sg_len; i++) { + if ((data->sg + i)->length % data->blksz) + return -EINVAL; + } + + chan = sdhci_external_dma_channel(host, data); + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) + return ret; + + sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); + if (sg_cnt <= 0) + return -EINVAL; + + dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + desc->callback = NULL; + desc->callback_param = NULL; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + ret = cookie; + + return ret; +} + +static void sdhci_external_dma_release(struct sdhci_host *host) +{ + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + if (host->rx_chan) { + dma_release_channel(host->rx_chan); + host->rx_chan = NULL; + } + + sdhci_switch_external_dma(host, false); +} + +static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); + + host->flags |= SDHCI_REQ_USE_DMA; + sdhci_set_transfer_irqs(host); + + sdhci_set_block_info(host, data); +} + +static void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (!sdhci_external_dma_setup(host, cmd)) { + __sdhci_external_dma_prepare_data(host, cmd); } else { - sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + sdhci_external_dma_release(host); + pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", + mmc_hostname(host->mmc)); + sdhci_prepare_data(host, cmd); } } +static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct dma_chan *chan; + + if (!cmd->data) + return; + + chan = sdhci_external_dma_channel(host, cmd->data); + if (chan) + dma_async_issue_pending(chan); +} + +#else + +static inline int sdhci_external_dma_init(struct sdhci_host *host) +{ + return -EOPNOTSUPP; +} + +static inline void sdhci_external_dma_release(struct sdhci_host *host) +{ +} + +static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + /* This should never happen */ + WARN_ON_ONCE(1); +} + +static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ +} + +static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return NULL; +} + +#endif + +void sdhci_switch_external_dma(struct sdhci_host *host, bool en) +{ + host->use_external_dma = en; +} +EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); + static inline bool sdhci_auto_cmd12(struct sdhci_host *host, struct mmc_request *mrq) { @@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); } -static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) { int i; - if (host->cmd && host->cmd->mrq == mrq) - host->cmd = NULL; - - if (host->data_cmd && host->data_cmd->mrq == mrq) - host->data_cmd = NULL; - - if (host->data && host->data->mrq == mrq) - host->data = NULL; - - if (sdhci_needs_reset(host, mrq)) - host->pending_reset = true; - for (i = 0; i < SDHCI_MAX_MRQS; i++) { if (host->mrqs_done[i] == mrq) { WARN_ON(1); @@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) } WARN_ON(i >= SDHCI_MAX_MRQS); +} + +static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +{ + if (host->cmd && host->cmd->mrq == mrq) + host->cmd = NULL; + + if (host->data_cmd && host->data_cmd->mrq == mrq) + host->data_cmd = NULL; + + if (host->data && host->data->mrq == mrq) + host->data = NULL; + + if (sdhci_needs_reset(host, mrq)) + host->pending_reset = true; + + sdhci_set_mrq_done(host, mrq); sdhci_del_timer(host, mrq); @@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host) /* * Need to send CMD12 if - - * a) open-ended multiblock transfer (no CMD23) + * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) * b) error in multiblock transfer */ if (data->stop && - (data->error || - !data->mrq->sbc)) { + ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || + data->error)) { /* * 'cap_cmd_during_tfr' request must not use the command line * after mmc_command_done() has been called. It is upper layer's @@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) } host->cmd = cmd; + host->data_timeout = 0; if (sdhci_data_line_cmd(cmd)) { WARN_ON(host->data_cmd); host->data_cmd = cmd; + sdhci_set_timeout(host, cmd); } - sdhci_prepare_data(host, cmd); + if (cmd->data) { + if (host->use_external_dma) + sdhci_external_dma_prepare_data(host, cmd); + else + sdhci_prepare_data(host, cmd); + } sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); @@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) timeout += 10 * HZ; sdhci_mod_timer(host, cmd->mrq, timeout); + if (host->use_external_dma) + sdhci_external_dma_pre_transfer(host, cmd); + sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); } EXPORT_SYMBOL_GPL(sdhci_send_command); @@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_led_activate(host); - /* - * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED - * requests if Auto-CMD12 is enabled. - */ - if (sdhci_auto_cmd12(host, mrq)) { - if (mrq->stop) { - mrq->data->stop = NULL; - mrq->stop = NULL; - } - } - if (!present || host->flags & SDHCI_DEVICE_DEAD) { mrq->cmd->error = -ENOMEDIUM; sdhci_finish_mrq(host, mrq); @@ -1882,9 +2077,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) ctrl_2 |= SDHCI_CTRL_UHS_SDR104; else if (timing == MMC_TIMING_UHS_SDR12) ctrl_2 |= SDHCI_CTRL_UHS_SDR12; - else if (timing == MMC_TIMING_SD_HS || - timing == MMC_TIMING_MMC_HS || - timing == MMC_TIMING_UHS_SDR25) + else if (timing == MMC_TIMING_UHS_SDR25) ctrl_2 |= SDHCI_CTRL_UHS_SDR25; else if (timing == MMC_TIMING_UHS_SDR50) ctrl_2 |= SDHCI_CTRL_UHS_SDR50; @@ -2419,8 +2612,8 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) sdhci_send_tuning(host, opcode); if (!host->tuning_done) { - pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n", - mmc_hostname(host->mmc)); + pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", + mmc_hostname(host->mmc)); sdhci_abort_tuning(host, opcode); return -ETIMEDOUT; } @@ -2663,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host) if (host->flags & SDHCI_REQ_USE_DMA) { struct mmc_data *data = mrq->data; + if (host->use_external_dma && data && + (mrq->cmd->error || data->error)) { + struct dma_chan *chan = sdhci_external_dma_channel(host, data); + + host->mrqs_done[i] = NULL; + spin_unlock_irqrestore(&host->lock, flags); + dmaengine_terminate_sync(chan); + spin_lock_irqsave(&host->lock, flags); + sdhci_set_mrq_done(host, mrq); + } + if (data && data->host_cookie == COOKIE_MAPPED) { if (host->bounce_buffer) { /* @@ -3769,6 +3973,9 @@ int sdhci_setup_host(struct sdhci_host *host) mmc_hostname(mmc), host->version); } + if (host->quirks & SDHCI_QUIRK_BROKEN_CQE) + mmc->caps2 &= ~MMC_CAP2_CQE; + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; else if (!(host->caps & SDHCI_CAN_DO_SDMA)) @@ -3795,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host) if (sdhci_can_64bit_dma(host)) host->flags |= SDHCI_USE_64_BIT_DMA; + if (host->use_external_dma) { + ret = sdhci_external_dma_init(host); + if (ret == -EPROBE_DEFER) + goto unreg; + /* + * Fall back to use the DMA/PIO integrated in standard SDHCI + * instead of external DMA devices. + */ + else if (ret) + sdhci_switch_external_dma(host, false); + /* Disable internal DMA sources */ + else + host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); + } + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->set_dma_mask) ret = host->ops->set_dma_mask(host); @@ -3821,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host) dma_addr_t dma; void *buf; - if (host->flags & SDHCI_USE_64_BIT_DMA) { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_64_DESC_SZ(host); - host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); - } else { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_32_DESC_SZ; - host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; - } + if (!(host->flags & SDHCI_USE_64_BIT_DMA)) + host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; + else if (!host->alloc_desc_sz) + host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); + + host->desc_sz = host->alloc_desc_sz; + host->adma_table_sz = host->adma_table_cnt * host->desc_sz; host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; /* @@ -3912,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host) if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else if (host->version >= SDHCI_SPEC_300) { - if (host->clk_mul) { - mmc->f_min = (host->max_clk * host->clk_mul) / 1024; + if (host->clk_mul) max_clk = host->max_clk * host->clk_mul; - } else - mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; + /* + * Divided Clock Mode minimum clock rate is always less than + * Programmable Clock Mode minimum clock rate. + */ + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; @@ -4275,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + host->adma_table_sz, host->align_buffer, host->align_addr); + + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } @@ -4320,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host) pr_info("%s: SDHCI controller on %s [%s] using %s\n", mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), + host->use_external_dma ? "External DMA" : (host->flags & SDHCI_USE_ADMA) ? (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); @@ -4408,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) host->adma_table_sz, host->align_buffer, host->align_addr); + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 0ed3e0eaef5f..a6a3ddcf97e7 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -409,6 +409,8 @@ struct sdhci_host { #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) /* Controller reports inverted write-protect state */ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) +/* Controller has unusable command queue engine */ +#define SDHCI_QUIRK_BROKEN_CQE (1<<17) /* Controller does not like fast PIO transfers */ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) /* Controller does not have a LED */ @@ -485,6 +487,7 @@ struct sdhci_host { int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ + phys_addr_t mapbase; /* physical address base */ char *bounce_buffer; /* For packing SDMA reads/writes */ dma_addr_t bounce_addr; unsigned int bounce_buffer_size; @@ -533,6 +536,7 @@ struct sdhci_host { bool pending_reset; /* Cmd/data reset is pending */ bool irq_wake_enabled; /* IRQ wakeup is enabled */ bool v4_mode; /* Host Version 4 Enable */ + bool use_external_dma; /* Host selects to use external DMA */ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ struct mmc_command *cmd; /* Current command */ @@ -554,7 +558,8 @@ struct sdhci_host { dma_addr_t adma_addr; /* Mapped ADMA descr. table */ dma_addr_t align_addr; /* Mapped bounce buffer */ - unsigned int desc_sz; /* ADMA descriptor size */ + unsigned int desc_sz; /* ADMA current descriptor size */ + unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */ struct workqueue_struct *complete_wq; /* Request completion wq */ struct work_struct complete_work; /* Request completion work */ @@ -562,6 +567,11 @@ struct sdhci_host { struct timer_list timer; /* Timer for timeouts */ struct timer_list data_timer; /* Timer for data timeouts */ +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; +#endif + u32 caps; /* CAPABILITY_0 */ u32 caps1; /* CAPABILITY_1 */ bool read_caps; /* Capability flags have been read */ @@ -793,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host); void sdhci_reset_tuning(struct sdhci_host *host); void sdhci_send_tuning(struct sdhci_host *host, u32 opcode); void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode); +void sdhci_switch_external_dma(struct sdhci_host *host, bool en); +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index b8e897e31e2e..3afea580fbea 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) writeb(val, host->ioaddr + reg); } +static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err = sdhci_execute_tuning(mmc, opcode); + + if (err) + return err; + /* + * Tuning data remains in the buffer after tuning. + * Do a command and data reset to get rid of it + */ + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + + return 0; +} + +static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + static struct sdhci_ops sdhci_am654_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = { .set_power = sdhci_am654_set_power, .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, + .irq = sdhci_am654_cqhci_irq, .reset = sdhci_reset, }; static const struct sdhci_pltfm_data sdhci_am654_pdata = { .ops = &sdhci_am654_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = { .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT, }; -static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) -{ - int cmd_error = 0; - int data_error = 0; - - if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) - return intmask; - - cqhci_irq(host->mmc, intmask, cmd_error, data_error); - - return 0; -} - static struct sdhci_ops sdhci_j721e_8bit_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { .ops = &sdhci_j721e_8bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { .ops = &sdhci_j721e_4bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev) struct sdhci_am654_data *sdhci_am654; const struct of_device_id *match; struct sdhci_host *host; - struct resource *res; struct clk *clk_xin; struct device *dev = &pdev->dev; void __iomem *base; @@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_disable; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(base)) { ret = PTR_ERR(base); goto pm_runtime_put; @@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_put; } + host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; + ret = sdhci_am654_init(host); if (ret) goto pm_runtime_put; diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index fa0dfc657c22..4625cc071b61 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ctrl = 0, ret = 0; struct f_sdhost_priv *priv; u32 reg = 0; @@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) host->ops = &sdhci_f_sdh30_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 98c575de43c7..7e1fd557109c 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host) host->chan_rx = sh_mmcif_request_dma_pdata(host, pdata->slave_id_rx); } else { - host->chan_tx = dma_request_slave_channel(dev, "tx"); - host->chan_rx = dma_request_slave_channel(dev, "rx"); + host->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->chan_tx)) + host->chan_tx = NULL; + host->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->chan_rx)) + host->chan_rx = NULL; } dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, host->chan_rx); @@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev) struct sh_mmcif_host *host; struct device *dev = &pdev->dev; struct sh_mmcif_plat_data *pd = dev->platform_data; - struct resource *res; void __iomem *reg; const char *name; @@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) if (irq[0] < 0) return -ENXIO; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(dev, res); + reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) return PTR_ERR(reg); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d577a6b0ceae..f87d7967457f 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, if (ret) return ret; - host->reg_base = devm_ioremap_resource(&pdev->dev, - platform_get_resource(pdev, IORESOURCE_MEM, 0)); + host->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->reg_base)) return PTR_ERR(host->reg_base); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index c4a1d49fbea4..1e424bcdbd5f 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, { struct tmio_mmc_host *host; struct mmc_host *mmc; - struct resource *res; void __iomem *ctl; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctl = devm_ioremap_resource(&pdev->dev, res); + ctl = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctl)) return ERR_CAST(ctl); @@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) * Look for a card detect GPIO, if it fails with anything * else than a probe deferral, just live without it. */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 0c72ec5546c3..a1683c49cb90 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -59,7 +59,6 @@ struct uniphier_sd_priv { struct tmio_mmc_data tmio_data; struct pinctrl *pinctrl; - struct pinctrl_state *pinstate_default; struct pinctrl_state *pinstate_uhs; struct clk *clk; struct reset_control *rst; @@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, { struct tmio_mmc_host *host = mmc_priv(mmc); struct uniphier_sd_priv *priv = uniphier_sd_priv(host); - struct pinctrl_state *pinstate; + struct pinctrl_state *pinstate = NULL; u32 val, tmp; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: val = UNIPHIER_SD_VOLT_330; - pinstate = priv->pinstate_default; break; case MMC_SIGNAL_VOLTAGE_180: val = UNIPHIER_SD_VOLT_180; @@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); writel(tmp, host->ctl + UNIPHIER_SD_VOLT); - pinctrl_select_state(priv->pinctrl, pinstate); + if (pinstate) + pinctrl_select_state(priv->pinctrl, pinstate); + else + pinctrl_select_default_state(mmc_dev(mmc)); return 0; } @@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, if (IS_ERR(priv->pinctrl)) return PTR_ERR(priv->pinctrl); - priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(priv->pinstate_default)) - return PTR_ERR(priv->pinstate_default); - priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); if (IS_ERR(priv->pinstate_uhs)) return PTR_ERR(priv->pinstate_uhs); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index b11ac2314328..9a0b1e4e405d 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -199,7 +199,6 @@ struct usdhi6_host { /* Pin control */ struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_uhs; }; @@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) }; int ret; - host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); + host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, host->chan_tx); - if (!host->chan_tx) + if (IS_ERR(host->chan_tx)) { + host->chan_tx = NULL; return; + } cfg.direction = DMA_MEM_TO_DEV; cfg.dst_addr = start + USDHI6_SD_BUF0; @@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) if (ret < 0) goto e_release_tx; - host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); + host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, host->chan_rx); - if (!host->chan_rx) + if (IS_ERR(host->chan_rx)) { + host->chan_rx = NULL; goto e_release_tx; + } cfg.direction = DMA_DEV_TO_MEM; cfg.src_addr = cfg.dst_addr; @@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage) host->pins_uhs); default: - return pinctrl_select_state(host->pinctrl, - host->pins_default); + return pinctrl_select_default_state(mmc_dev(host->mmc)); } } @@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev) } host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); - if (!IS_ERR(host->pins_uhs)) { - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - - if (IS_ERR(host->pins_default)) { - dev_err(dev, - "UHS pinctrl requires a default pin state.\n"); - ret = PTR_ERR(host->pins_default); - goto e_free_mmc; - } - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->base = devm_ioremap_resource(dev, res); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index f4ac064ff471..e48bddd95ce6 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1106,7 +1106,7 @@ static int via_sd_probe(struct pci_dev *pcidev, len = pci_resource_len(pcidev, 0); base = pci_resource_start(pcidev, 0); - sdhost->mmiobase = ioremap_nocache(base, len); + sdhost->mmiobase = ioremap(base, len); if (!sdhost->mmiobase) { ret = -ENOMEM; goto free_mmc_host; diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c index eccf2e5d905e..3af50db8b21b 100644 --- a/drivers/mtd/devices/bcm47xxsflash.c +++ b/drivers/mtd/devices/bcm47xxsflash.c @@ -320,7 +320,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) * ChipCommon revision. */ if (b47s->bcma_cc->core->id.rev == 54) - b47s->window = ioremap_nocache(res->start, resource_size(res)); + b47s->window = ioremap(res->start, resource_size(res)); else b47s->window = ioremap_cache(res->start, resource_size(res)); if (!b47s->window) { diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index 462fadb56bdb..42a95ba40f2c 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c @@ -163,7 +163,7 @@ static int amd76xrom_init_one(struct pci_dev *pdev, /* FIXME handle registers 0x80 - 0x8C the bios region locks */ /* For write accesses caches are useless */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index c9b7b4d5a923..460494212f6a 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c @@ -191,7 +191,7 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev, /* FIXME handle registers 0x80 - 0x8C the bios region locks */ /* For write accesses caches are useless */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c index 5c27c6994896..85e14150a073 100644 --- a/drivers/mtd/maps/esb2rom.c +++ b/drivers/mtd/maps/esb2rom.c @@ -249,7 +249,7 @@ static int __init esb2rom_init_one(struct pci_dev *pdev, } /* Map the firmware hub into my address space. */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c index 6b989f391baa..fda72c5fd8f9 100644 --- a/drivers/mtd/maps/ichxrom.c +++ b/drivers/mtd/maps/ichxrom.c @@ -184,7 +184,7 @@ static int __init ichxrom_init_one(struct pci_dev *pdev, } /* Map the firmware hub into my address space. */ - window->virt = ioremap_nocache(window->phys, window->size); + window->virt = ioremap(window->phys, window->size); if (!window->virt) { printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n", window->phys, window->size); diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index 69503aef981e..d67b845b0e89 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c @@ -133,7 +133,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p) if (win_len < (CS0_START + CS0_SIZE)) return -ENXIO; - p->csr_base = ioremap_nocache(csr_phys, csr_len); + p->csr_base = ioremap(csr_phys, csr_len); if (!p->csr_base) return -ENOMEM; @@ -152,7 +152,7 @@ static int vr_nor_init_maps(struct vr_nor_mtd *p) p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2; p->map.phys = win_phys + CS0_START; p->map.size = CS0_SIZE; - p->map.virt = ioremap_nocache(p->map.phys, p->map.size); + p->map.virt = ioremap(p->map.phys, p->map.size); if (!p->map.virt) { err = -ENOMEM; goto release; diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c index 0eeadfeb620d..832b880d1aaf 100644 --- a/drivers/mtd/maps/l440gx.c +++ b/drivers/mtd/maps/l440gx.c @@ -78,7 +78,7 @@ static int __init init_l440gx(void) return -ENODEV; } - l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE); + l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); if (!l440gx_map.virt) { printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c index abc52b70bb00..0bb651624f05 100644 --- a/drivers/mtd/maps/netsc520.c +++ b/drivers/mtd/maps/netsc520.c @@ -82,10 +82,10 @@ static int __init init_netsc520(void) printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n", (unsigned long long)netsc520_map.size, (unsigned long long)netsc520_map.phys); - netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size); + netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size); if (!netsc520_map.virt) { - printk("Failed to ioremap_nocache\n"); + printk("Failed to ioremap\n"); return -EIO; } diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c index 50046d497398..7d349874ffeb 100644 --- a/drivers/mtd/maps/nettel.c +++ b/drivers/mtd/maps/nettel.c @@ -176,7 +176,7 @@ static int __init nettel_init(void) #endif int rc = 0; - nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096); + nettel_mmcrp = (void *) ioremap(0xfffef000, 4096); if (nettel_mmcrp == NULL) { printk("SNAPGEAR: failed to disable MMCR cache??\n"); return(-EIO); @@ -217,7 +217,7 @@ static int __init nettel_init(void) __asm__ ("wbinvd"); nettel_amd_map.phys = amdaddr; - nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize); + nettel_amd_map.virt = ioremap(amdaddr, maxsize); if (!nettel_amd_map.virt) { printk("SNAPGEAR: failed to ioremap() BOOTCS\n"); iounmap(nettel_mmcrp); @@ -303,7 +303,7 @@ static int __init nettel_init(void) /* Probe for the size of the first Intel flash */ nettel_intel_map.size = maxsize; nettel_intel_map.phys = intel0addr; - nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); + nettel_intel_map.virt = ioremap(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1\n"); rc = -EIO; @@ -337,7 +337,7 @@ static int __init nettel_init(void) iounmap(nettel_intel_map.virt); nettel_intel_map.size = maxsize; - nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); + nettel_intel_map.virt = ioremap(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n"); rc = -EIO; diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index 9a49f8a06fb8..377ef0fc4e3e 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -94,7 +94,7 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map) map->map.write = mtd_pci_write8, map->map.size = 0x00800000; - map->base = ioremap_nocache(pci_resource_start(dev, 0), + map->base = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!map->base) @@ -188,7 +188,7 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map) map->map.read = mtd_pci_read32, map->map.write = mtd_pci_write32, map->map.size = len; - map->base = ioremap_nocache(base, len); + map->base = ioremap(base, len); if (!map->base) return -ENOMEM; diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c index 03af2df90d47..9902b37e18b4 100644 --- a/drivers/mtd/maps/sc520cdp.c +++ b/drivers/mtd/maps/sc520cdp.c @@ -174,8 +174,8 @@ static void sc520cdp_setup_par(void) int i, j; /* map in SC520's MMCR area */ - mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT); - if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */ + mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT); + if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */ /* force physical address fields to BIOS defaults: */ for(i = 0; i < NUM_FLASH_BANKS; i++) sc520cdp_map[i].phys = par_table[i].default_address; @@ -225,10 +225,10 @@ static int __init init_sc520cdp(void) (unsigned long long)sc520cdp_map[i].size, (unsigned long long)sc520cdp_map[i].phys); - sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size); + sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size); if (!sc520cdp_map[i].virt) { - printk("Failed to ioremap_nocache\n"); + printk("Failed to ioremap\n"); for (j = 0; j < i; j++) { if (mymtd[j]) { map_destroy(mymtd[j]); diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c index 2afb253bf456..57303f904bc1 100644 --- a/drivers/mtd/maps/scb2_flash.c +++ b/drivers/mtd/maps/scb2_flash.c @@ -152,7 +152,7 @@ static int scb2_flash_probe(struct pci_dev *dev, } /* remap the IO window (w/o caching) */ - scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW); + scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW); if (!scb2_ioaddr) { printk(KERN_ERR MODNAME ": Failed to ioremap window!\n"); if (!region_fail) diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c index 6cfc8783c0e5..70d6e865f555 100644 --- a/drivers/mtd/maps/ts5500_flash.c +++ b/drivers/mtd/maps/ts5500_flash.c @@ -56,10 +56,10 @@ static int __init init_ts5500_map(void) { int rc = 0; - ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size); + ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size); if (!ts5500_map.virt) { - printk(KERN_ERR "Failed to ioremap_nocache\n"); + printk(KERN_ERR "Failed to ioremap\n"); rc = -EIO; goto err2; } diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c index edf94ee54ec7..aa9368bf7a0c 100644 --- a/drivers/mtd/nand/onenand/omap2.c +++ b/drivers/mtd/nand/onenand/omap2.c @@ -148,13 +148,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) unsigned long timeout; u32 syscfg; - if (state == FL_RESETING || state == FL_PREPARING_ERASE || + if (state == FL_RESETTING || state == FL_PREPARING_ERASE || state == FL_VERIFYING_ERASE) { int i = 21; unsigned int intr_flags = ONENAND_INT_MASTER; switch (state) { - case FL_RESETING: + case FL_RESETTING: intr_flags |= ONENAND_INT_RESET; break; case FL_PREPARING_ERASE: @@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c, struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; - tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0); + tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!tx) { dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n"); return -EIO; @@ -375,7 +376,7 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; xtra = count & 3; @@ -422,7 +423,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, * context fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress ) + count < 384 || in_interrupt() || oops_in_progress) goto out_copy; dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); @@ -528,7 +529,8 @@ static int omap2_onenand_probe(struct platform_device *pdev) c->gpmc_cs, c->phys_base, c->onenand.base, c->dma_chan ? "DMA" : "PIO"); - if ((r = onenand_scan(&c->mtd, 1)) < 0) + r = onenand_scan(&c->mtd, 1); + if (r < 0) goto err_release_dma; freq = omap2_onenand_get_freq(c->onenand.version_id); diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index 77bd32a683e1..85640ee11c86 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -2853,7 +2853,7 @@ static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); status &= 0x60; @@ -2924,7 +2924,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -2968,7 +2968,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); return ret; } @@ -3008,7 +3008,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); } else { ops.mode = MTD_OPS_PLACE_OOB; ops.ooblen = len; @@ -3413,7 +3413,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd) this->boundary[die] = bdry & FLEXONENAND_PI_MASK; this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); printk(KERN_INFO "Die %d boundary: %d%s\n", die, this->boundary[die], locked ? "(Locked)" : "(Unlocked)"); @@ -3635,7 +3635,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die, ret = this->wait(mtd, FL_WRITING); out: this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND); - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); if (!ret) /* Recalculate device size on boundary change*/ flexonenand_get_size(mtd); @@ -3671,7 +3671,7 @@ static int onenand_chip_probe(struct mtd_info *mtd) /* Reset OneNAND to read default register values */ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM); /* Wait reset */ - this->wait(mtd, FL_RESETING); + this->wait(mtd, FL_RESETTING); /* Restore system configuration 1 */ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); diff --git a/drivers/mtd/nand/onenand/samsung_mtd.c b/drivers/mtd/nand/onenand/samsung_mtd.c index 55e5536a5850..beb7987e4c2b 100644 --- a/drivers/mtd/nand/onenand/samsung_mtd.c +++ b/drivers/mtd/nand/onenand/samsung_mtd.c @@ -675,12 +675,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, normal: if (count != mtd->writesize) { /* Copy the bufferram to memory to prevent unaligned access */ - memcpy(this->page_buf, p, mtd->writesize); - p = this->page_buf + offset; + memcpy_fromio(this->page_buf, p, mtd->writesize); + memcpy(buffer, this->page_buf + offset, count); + } else { + memcpy_fromio(buffer, p, count); } - memcpy(buffer, p, count); - return 0; } diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index e10b76089048..75eb3e97fae3 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -404,7 +404,7 @@ static int au1550nd_probe(struct platform_device *pdev) goto out1; } - ctx->base = ioremap_nocache(r->start, 0x1000); + ctx->base = ioremap(r->start, 0x1000); if (!ctx->base) { dev_err(&pdev->dev, "cannot remap NAND memory area\n"); ret = -ENODEV; diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index 3a36285a8d8a..f6c7102a1e32 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -914,8 +914,8 @@ static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl) /* Prepare CDMA descriptor. */ static void cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, - char nf_mem, u32 flash_ptr, char *mem_ptr, - char *ctrl_data_ptr, u16 ctype) + char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr, + dma_addr_t ctrl_data_ptr, u16 ctype) { struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc; @@ -931,13 +931,13 @@ cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl, cdma_desc->command_flags |= CDMA_CF_DMA_MASTER; cdma_desc->command_flags |= CDMA_CF_INT; - cdma_desc->memory_pointer = (uintptr_t)mem_ptr; + cdma_desc->memory_pointer = mem_ptr; cdma_desc->status = 0; cdma_desc->sync_flag_pointer = 0; cdma_desc->sync_arguments = 0; cdma_desc->command_type = ctype; - cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr; + cdma_desc->ctrl_data_ptr = ctrl_data_ptr; } static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl, @@ -1280,8 +1280,7 @@ cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr, } cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page, - (void *)dma_buf, (void *)dma_ctrl_dat, - ctype); + dma_buf, dma_ctrl_dat, ctype); status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); @@ -1360,7 +1359,7 @@ static int cadence_nand_erase(struct nand_chip *chip, u32 page) cadence_nand_cdma_desc_prepare(cdns_ctrl, cdns_chip->cs[chip->cur_cs], - page, NULL, NULL, + page, 0, 0, CDMA_CT_ERASE); status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr); if (status) { diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c index d62aa5271753..2f77ee55e1bf 100644 --- a/drivers/mtd/nand/raw/denali_pci.c +++ b/drivers/mtd/nand/raw/denali_pci.c @@ -74,15 +74,15 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return ret; } - denali->reg = ioremap_nocache(csr_base, csr_len); + denali->reg = ioremap(csr_base, csr_len); if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); return -ENOMEM; } - denali->host = ioremap_nocache(mem_base, mem_len); + denali->host = ioremap(mem_base, mem_len); if (!denali->host) { - dev_err(&dev->dev, "Spectra: ioremap_nocache failed!"); + dev_err(&dev->dev, "Spectra: ioremap failed!"); ret = -ENOMEM; goto out_unmap_reg; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 1054cc070747..f31fae3a4c68 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -285,7 +285,7 @@ static int fun_probe(struct platform_device *ofdev) fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN | FSL_UPM_WAIT_WRITE_BYTE; - fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, + fun->io_base = devm_ioremap(&ofdev->dev, io_res.start, resource_size(&io_res)); if (!fun->io_base) { ret = -ENOMEM; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 334fe3130285..b9d5d55a5edb 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -148,6 +148,10 @@ static int gpmi_init(struct gpmi_nand_data *this) struct resources *r = &this->resources; int ret; + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) + return ret; + ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) goto err_out; @@ -179,8 +183,9 @@ static int gpmi_init(struct gpmi_nand_data *this) */ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); - return 0; err_out: + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); return ret; } @@ -2722,6 +2727,10 @@ static int gpmi_pm_resume(struct device *dev) return ret; } + /* Set flag to get timing setup restored for next exec_op */ + if (this->hw.clk_rate) + this->hw.must_apply_timings = true; + /* re-init the BCH registers */ ret = bch_set_geometry(this); if (ret) { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 9e63800f768a..3ba73f18841f 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -37,6 +37,7 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) +#define FMC2_TIMEOUT_US 1000 #define FMC2_TIMEOUT_MS 1000 /* Timings */ @@ -53,6 +54,8 @@ #define FMC2_PMEM 0x88 #define FMC2_PATT 0x8c #define FMC2_HECCR 0x94 +#define FMC2_ISR 0x184 +#define FMC2_ICR 0x188 #define FMC2_CSQCR 0x200 #define FMC2_CSQCFGR1 0x204 #define FMC2_CSQCFGR2 0x208 @@ -118,6 +121,12 @@ #define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) #define FMC2_PATT_DEFAULT 0x0a0a0a0a +/* Register: FMC2_ISR */ +#define FMC2_ISR_IHLF BIT(1) + +/* Register: FMC2_ICR */ +#define FMC2_ICR_CIHLF BIT(1) + /* Register: FMC2_CSQCR */ #define FMC2_CSQCR_CSQSTART BIT(0) @@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, stm32_fmc2_set_buswidth_16(fmc2, true); } +static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + const struct nand_sdr_timings *timings; + u32 isr, sr; + + /* Check if there is no pending requests to the NAND flash */ + if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, + sr & FMC2_SR_NWRF, 1, + FMC2_TIMEOUT_US)) + dev_warn(fmc2->dev, "Waitrdy timeout\n"); + + /* Wait tWB before R/B# signal is low */ + timings = nand_get_sdr_timings(&chip->data_interface); + ndelay(PSEC_TO_NSEC(timings->tWB_max)); + + /* R/B# signal is low, clear high level flag */ + writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); + + /* Wait R/B# signal is high */ + return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, + isr, isr & FMC2_ISR_IHLF, + 5, 1000 * timeout_ms); +} + static int stm32_fmc2_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) @@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, break; case NAND_OP_WAITRDY_INSTR: - ret = nand_soft_waitrdy(chip, - instr->ctx.waitrdy.timeout_ms); + ret = stm32_fmc2_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); break; } } diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 4744bf94ad9a..b9f272408c4d 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -247,7 +247,8 @@ static int sm_read_sector(struct sm_ftl *ftl, /* FTL can contain -1 entries that are by default filled with bits */ if (block == -1) { - memset(buffer, 0xFF, SM_SECTOR_SIZE); + if (buffer) + memset(buffer, 0xFF, SM_SECTOR_SIZE); return 0; } diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index f4afe123e9dc..b0cd443dd758 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -2124,6 +2124,8 @@ static int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) return 0; + nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; + return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); } @@ -4596,6 +4598,7 @@ static void sst_set_default_init(struct spi_nor *nor) static void st_micron_set_default_init(struct spi_nor *nor) { nor->flags |= SNOR_F_HAS_LOCK; + nor->flags &= ~SNOR_F_HAS_16BIT_SR; nor->params.quad_enable = NULL; nor->params.set_4byte = st_micron_set_4byte; } @@ -4768,9 +4771,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor) static void spansion_post_sfdp_fixups(struct spi_nor *nor) { - struct mtd_info *mtd = &nor->mtd; - - if (mtd->size <= SZ_16M) + if (nor->params.size <= SZ_16M) return; nor->flags |= SNOR_F_4B_OPCODES; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fcb7c2f7f001..48d5ec770b94 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2272,9 +2272,6 @@ static void bond_miimon_commit(struct bonding *bond) } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); - } else if (slave != primary) { - /* prevent it from being the active one */ - bond_set_backup_slave(slave); } slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", @@ -3702,32 +3699,35 @@ static int bond_neigh_init(struct neighbour *n) const struct net_device_ops *slave_ops; struct neigh_parms parms; struct slave *slave; - int ret; + int ret = 0; - slave = bond_first_slave(bond); + rcu_read_lock(); + slave = bond_first_slave_rcu(bond); if (!slave) - return 0; + goto out; slave_ops = slave->dev->netdev_ops; if (!slave_ops->ndo_neigh_setup) - return 0; - - parms.neigh_setup = NULL; - parms.neigh_cleanup = NULL; - ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); - if (ret) - return ret; + goto out; - /* Assign slave's neigh_cleanup to neighbour in case cleanup is called - * after the last slave has been detached. Assumes that all slaves - * utilize the same neigh_cleanup (true at this writing as only user - * is ipoib). + /* TODO: find another way [1] to implement this. + * Passing a zeroed structure is fragile, + * but at least we do not pass garbage. + * + * [1] One way would be that ndo_neigh_setup() never touch + * struct neigh_parms, but propagate the new neigh_setup() + * back to ___neigh_create() / neigh_parms_alloc() */ - n->parms->neigh_cleanup = parms.neigh_cleanup; + memset(&parms, 0, sizeof(parms)); + ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); - if (!parms.neigh_setup) - return 0; + if (ret) + goto out; - return parms.neigh_setup(n); + if (parms.neigh_setup) + ret = parms.neigh_setup(n); +out: + rcu_read_unlock(); + return ret; } /* The bonding ndo_neigh_setup is called at init time beofre any diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index c8e1a04ba384..9df2007b5e56 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1302,7 +1302,7 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_put; } - addr = ioremap_nocache(res->start, resource_size(res)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { err = -ENOMEM; goto exit_release; diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index b9047d8110d5..194c86e0f340 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -175,7 +175,7 @@ static int cc770_isa_probe(struct platform_device *pdev) err = -EBUSY; goto exit; } - base = ioremap_nocache(mem[idx], iosize); + base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index a929cdda9ab2..94d10ec954a0 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -389,6 +389,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv (&priv->regs->mb[bank][priv->mb_size * mb_index]); } +static int flexcan_low_power_enter_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_low_power_exit_ack(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + return -ETIMEDOUT; + + return 0; +} + static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) { struct flexcan_regs __iomem *regs = priv->regs; @@ -407,7 +435,6 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int ackval; u32 reg_mcr; reg_mcr = priv->read(®s->mcr); @@ -418,36 +445,24 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); - /* get stop acknowledgment */ - if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, - ackval, ackval & (1 << priv->stm.ack_bit), - 0, FLEXCAN_TIMEOUT_US)) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_enter_ack(priv); } static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int ackval; u32 reg_mcr; /* remove stop request */ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 0); - /* get stop acknowledgment */ - if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, - ackval, !(ackval & (1 << priv->stm.ack_bit)), - 0, FLEXCAN_TIMEOUT_US)) - return -ETIMEDOUT; reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, ®s->mcr); - return 0; + return flexcan_low_power_exit_ack(priv); } static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) @@ -506,39 +521,25 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg &= ~FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_exit_ack(priv); } static int flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(®s->mcr); reg |= FLEXCAN_MCR_MDIS; priv->write(reg, ®s->mcr); - while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - udelay(10); - - if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - return -ETIMEDOUT; - - return 0; + return flexcan_low_power_enter_ack(priv); } static int flexcan_chip_freeze(struct flexcan_priv *priv) @@ -1722,6 +1723,9 @@ static int __maybe_unused flexcan_resume(struct device *device) netif_start_queue(dev); if (device_may_wakeup(device)) { disable_irq_wake(dev->irq); + err = flexcan_exit_stop_mode(priv); + if (err) + return err; } else { err = pm_runtime_force_resume(device); if (err) @@ -1767,14 +1771,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); - int err; - if (netif_running(dev) && device_may_wakeup(device)) { + if (netif_running(dev) && device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); - err = flexcan_exit_stop_mode(priv); - if (err) - return err; - } return 0; } diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index 3db619209fe1..eacd428e07e9 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -101,6 +101,9 @@ #define TCAN4X5X_MODE_STANDBY BIT(6) #define TCAN4X5X_MODE_NORMAL BIT(7) +#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) +#define TCAN4X5X_DISABLE_INH_MSK BIT(9) + #define TCAN4X5X_SW_RESET BIT(2) #define TCAN4X5X_MCAN_CONFIGURED BIT(5) @@ -164,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) } } +static int tcan4x5x_reset(struct tcan4x5x_priv *priv) +{ + int ret = 0; + + if (priv->reset_gpio) { + gpiod_set_value(priv->reset_gpio, 1); + + /* tpulse_width minimum 30us */ + usleep_range(30, 100); + gpiod_set_value(priv->reset_gpio, 0); + } else { + ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_SW_RESET); + if (ret) + return ret; + } + + usleep_range(700, 1000); + + return ret; +} + static int regmap_spi_gather_write(void *context, const void *reg, size_t reg_len, const void *val, size_t val_len) @@ -338,15 +363,34 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) return ret; } +static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_WAKE_MSK, 0x00); +} + +static int tcan4x5x_disable_state(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + + return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_DISABLE_INH_MSK, 0x01); +} + static int tcan4x5x_parse_config(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + int ret; tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", GPIOD_OUT_HIGH); if (IS_ERR(tcan4x5x->device_wake_gpio)) { - dev_err(cdev->dev, "device-wake gpio not defined\n"); - return -EINVAL; + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + tcan4x5x_disable_wake(cdev); } tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", @@ -354,16 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev) if (IS_ERR(tcan4x5x->reset_gpio)) tcan4x5x->reset_gpio = NULL; + ret = tcan4x5x_reset(tcan4x5x); + if (ret) + return ret; + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, "device-state", GPIOD_IN); - if (IS_ERR(tcan4x5x->device_state_gpio)) + if (IS_ERR(tcan4x5x->device_state_gpio)) { tcan4x5x->device_state_gpio = NULL; - - tcan4x5x->power = devm_regulator_get_optional(cdev->dev, - "vsup"); - if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) - return -EPROBE_DEFER; + tcan4x5x_disable_state(cdev); + } return 0; } @@ -398,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi) if (!priv) return -ENOMEM; + priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); + if (PTR_ERR(priv->power) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else + priv->power = NULL; + mcan_class->device_data = priv; m_can_class_get_clocks(mcan_class); @@ -428,10 +479,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi) spi_set_drvdata(spi, priv); - ret = tcan4x5x_parse_config(mcan_class); - if (ret) - goto out_clk; - /* Configure the SPI bus */ spi->bits_per_word = 32; ret = spi_setup(spi); @@ -441,7 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi) priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus, &spi->dev, &tcan4x5x_regmap); - tcan4x5x_power_enable(priv->power, 1); + ret = tcan4x5x_power_enable(priv->power, 1); + if (ret) + goto out_clk; + + ret = tcan4x5x_parse_config(mcan_class); + if (ret) + goto out_power; + + ret = tcan4x5x_init(mcan_class); + if (ret) + goto out_power; ret = m_can_class_register(mcan_class); if (ret) diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 8caf7af0dee2..99101d7027a8 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) struct net_device *dev = napi->dev; struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; - int npackets = 0; - int ret = 1; + int work_done = 0; struct sk_buff *skb; struct can_frame *frame; u8 canrflg; - while (npackets < quota) { + while (work_done < quota) { canrflg = in_8(®s->canrflg); if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) break; @@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) stats->rx_packets++; stats->rx_bytes += frame->can_dlc; - npackets++; + work_done++; netif_receive_skb(skb); } - if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { - napi_complete(&priv->napi); - clear_bit(F_RX_PROGRESS, &priv->flags); - if (priv->can.state < CAN_STATE_BUS_OFF) - out_8(®s->canrier, priv->shadow_canrier); - ret = 0; + if (work_done < quota) { + if (likely(napi_complete_done(&priv->napi, work_done))) { + clear_bit(F_RX_PROGRESS, &priv->flags); + if (priv->can.state < CAN_STATE_BUS_OFF) + out_8(®s->canrier, priv->shadow_canrier); + } } - return ret; + return work_done; } static irqreturn_t mscan_isr(int irq, void *dev_id) diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index 1c4d32d1a542..d513fac50718 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -130,7 +130,7 @@ static int sja1000_isa_probe(struct platform_device *pdev) err = -EBUSY; goto exit; } - base = ioremap_nocache(mem[idx], iosize); + base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index ff5a96f34085..d7222ba46622 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -229,7 +229,7 @@ static int sp_probe(struct platform_device *pdev) resource_size(res_mem), DRV_NAME)) return -EBUSY; - addr = devm_ioremap_nocache(&pdev->dev, res_mem->start, + addr = devm_ioremap(&pdev->dev, res_mem->start, resource_size(res_mem)); if (!addr) return -ENOMEM; diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 2e57122f02fb..2f5c287eac95 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work) */ static void slcan_write_wakeup(struct tty_struct *tty) { - struct slcan *sl = tty->disc_data; + struct slcan *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } /* Send a can_frame to a TTY queue. */ @@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* Flush network side */ diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 8242fb287cbb..d1ddf763b188 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -777,7 +777,7 @@ static int softing_pdev_probe(struct platform_device *pdev) goto platform_resource_failed; card->dpram_phys = pres->start; card->dpram_size = resource_size(pres); - card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size); + card->dpram = ioremap(card->dpram_phys, card->dpram_size); if (!card->dpram) { dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); goto ioremap_failed; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 2f74f6704c12..a4b4b742c80c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, hconf, sizeof(*hconf), 1000); @@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf, GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, - intf->altsetting[0].desc.bInterfaceNumber, + intf->cur_altsetting->desc.bInterfaceNumber, dconf, sizeof(*dconf), 1000); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 5fc0be564274..7ab87a758754 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *ep; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { ep = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 07d2f3aa2c02..1b9957f12459 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, struct kvaser_cmd *cmd; int err; - cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; @@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) struct kvaser_cmd *cmd; int rc; - cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) struct usb_endpoint_descriptor *endpoint; int i; - iface_desc = &dev->intf->altsetting[0]; + iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 464af939cd8a..c1dbab8c896d 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -60,6 +60,8 @@ enum xcan_reg { XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ + XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ + XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ }; #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) @@ -1809,6 +1811,11 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { + priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); + priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); + } + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, hw_tx_max, priv->tx_max); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 36828f210030..edacacfc9365 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -347,7 +347,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) * frames should be flooded or not. */ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt); - mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN; + mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN; b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt); } @@ -526,6 +526,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + b53_br_egress_floods(ds, port, true, true); + if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); if (ret) @@ -641,6 +643,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); b53_brcm_hdr_setup(dev->ds, port); + + b53_br_egress_floods(dev->ds, port, true, true); } static void b53_enable_mib(struct b53_device *dev) @@ -1821,19 +1825,26 @@ int b53_br_egress_floods(struct dsa_switch *ds, int port, struct b53_device *dev = ds->priv; u16 uc, mc; - b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc); + b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc); if (unicast) uc |= BIT(port); else uc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc); + b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc); + + b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc); + if (multicast) + mc |= BIT(port); + else + mc &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc); - b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc); + b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc); if (multicast) mc |= BIT(port); else mc &= ~BIT(port); - b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc); + b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc); return 0; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e43040c9f9ee..3e8635311d0d 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -68,7 +68,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) /* Force link status for IMP port */ reg = core_readl(priv, offset); - reg |= (MII_SW_OR | LINK_STS); + reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G); core_writel(priv, reg, offset); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index f3f0c3f07391..1962c8330daa 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -358,7 +358,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, return -EINVAL; } - ip_frag = be32_to_cpu(fs->m_ext.data[0]); + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); /* Locate the first rule available */ if (fs->location == RX_CLS_LOC_ANY) @@ -569,7 +569,7 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, if (rule->fs.flow_type != fs->flow_type || rule->fs.ring_cookie != fs->ring_cookie || - rule->fs.m_ext.data[0] != fs->m_ext.data[0]) + rule->fs.h_ext.data[0] != fs->h_ext.data[0]) continue; switch (fs->flow_type & ~FLOW_EXT) { @@ -621,7 +621,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, return -EINVAL; } - ip_frag = be32_to_cpu(fs->m_ext.data[0]); + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); layout = &udf_tcpip6_layout; slice_num = bcm_sf2_get_slice_number(layout, 0); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 120a65d3e3ef..b016cc205f81 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) { u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST; + /* Use the default high priority for management frames sent to + * the CPU. + */ + port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI; + return mv88e6390_g1_monitor_write(chip, ptr, port); } diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index bc5a6b2bb1e4..5324c6f4ae90 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -211,6 +211,7 @@ #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000 +#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff /* Offset 0x1C: Global Control 2 */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 7fe256c5739d..0b43c650e100 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -393,7 +393,7 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port) } static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, - phy_interface_t mode) + phy_interface_t mode, bool force) { u8 lane; u16 cmode; @@ -427,8 +427,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } - /* cmode doesn't change, nothing to do for us */ - if (cmode == chip->ports[port].cmode) + /* cmode doesn't change, nothing to do for us unless forced */ + if (cmode == chip->ports[port].cmode && !force) return 0; lane = mv88e6xxx_serdes_get_lane(chip, port); @@ -484,7 +484,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (port != 9 && port != 10) return -EOPNOTSUPP; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, @@ -504,7 +504,7 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, break; } - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, false); } static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip, @@ -555,7 +555,7 @@ int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; - return mv88e6xxx_port_set_cmode(chip, port, mode); + return mv88e6xxx_port_set_cmode(chip, port, mode, true); } int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index 0031ca814346..6f9804093150 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -2,6 +2,7 @@ config NET_DSA_MSCC_FELIX tristate "Ocelot / Felix Ethernet switch support" depends on NET_DSA && PCI + depends on NET_VENDOR_MICROSEMI select MSCC_OCELOT_SWITCH select NET_DSA_TAG_OCELOT help diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index a51ac088c0bc..bb91f3d17cf2 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -582,7 +582,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, struct device *dev = &priv->spidev->dev; struct device_node *child; - for_each_child_of_node(ports_node, child) { + for_each_available_child_of_node(ports_node, child) { struct device_node *phy_node; phy_interface_t phy_mode; u32 index; @@ -1569,8 +1569,8 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) if (enabled) { /* Enable VLAN filtering. */ - tpid = ETH_P_8021AD; - tpid2 = ETH_P_8021Q; + tpid = ETH_P_8021Q; + tpid2 = ETH_P_8021AD; } else { /* Disable VLAN filtering. */ tpid = ETH_P_SJA1105; @@ -1579,9 +1579,9 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; - /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ - general_params->tpid = tpid; /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ + general_params->tpid = tpid; + /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ general_params->tpid2 = tpid2; /* When VLAN filtering is on, we need to at least be able to * decode management traffic through the "backup plan". @@ -1855,7 +1855,7 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, if (!clone) goto out; - sja1105_ptp_txtstamp_skb(ds, slot, clone); + sja1105_ptp_txtstamp_skb(ds, port, clone); out: mutex_unlock(&priv->mgmt_lock); diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index 54258a25031d..43ab7589d0d0 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -234,7 +234,7 @@ int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd, if (rw == SPI_WRITE) priv->info->ptp_cmd_packing(buf, cmd, PACK); - rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf, + rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf, SJA1105_SIZE_PTP_CMD); if (rw == SPI_READ) @@ -659,7 +659,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds) ptp_data->clock = NULL; } -void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, +void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sja1105_private *priv = ds->priv; @@ -679,7 +679,7 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, goto out; } - rc = sja1105_ptpegr_ts_poll(ds, slot, &ts); + rc = sja1105_ptpegr_ts_poll(ds, port, &ts); if (rc < 0) { dev_err(ds->dev, "timed out polling for tstamp\n"); kfree_skb(skb); diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index 0d03e13e9909..63d2311817c4 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -142,6 +142,9 @@ static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, return size; } +/* TPID and TPID2 are intentionally reversed so that semantic + * compatibility with E/T is kept. + */ static size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, enum packing_op op) @@ -166,9 +169,9 @@ sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op); sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op); sja1105_packing(buf, &entry->vlmask, 106, 75, size, op); - sja1105_packing(buf, &entry->tpid, 74, 59, size, op); + sja1105_packing(buf, &entry->tpid2, 74, 59, size, op); sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op); - sja1105_packing(buf, &entry->tpid2, 57, 42, size, op); + sja1105_packing(buf, &entry->tpid, 57, 42, size, op); sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op); sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op); sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op); diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c index 26b925b5dace..fa6750d973d7 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.c +++ b/drivers/net/dsa/sja1105/sja1105_tas.c @@ -477,11 +477,6 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, if (admin->cycle_time_extension) return -ENOTSUPP; - if (!ns_to_sja1105_delta(admin->base_time)) { - dev_err(ds->dev, "A base time of zero is not hardware-allowed\n"); - return -ERANGE; - } - for (i = 0; i < admin->num_entries; i++) { s64 delta_ns = admin->entries[i].interval; s64 delta_cycles = ns_to_sja1105_delta(delta_ns); diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 80ef3e15bd22..9daef4c8feef 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1791,7 +1791,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) sdev->is_fiber = slic_is_fiber(pdev->subsystem_device); sdev->pdev = pdev; sdev->netdev = dev; - sdev->regs = ioremap_nocache(pci_resource_start(pdev, 0), + sdev->regs = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!sdev->regs) { dev_err(&pdev->dev, "failed to map registers\n"); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 4cd53fc338b5..1671c1f36691 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1332,10 +1332,10 @@ static int request_and_map(struct platform_device *pdev, const char *name, return -EBUSY; } - *ptr = devm_ioremap_nocache(device, region->start, + *ptr = devm_ioremap(device, region->start, resource_size(region)); if (*ptr == NULL) { - dev_err(device, "ioremap_nocache of %s failed!", name); + dev_err(device, "ioremap of %s failed!", name); return -ENOMEM; } diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 7c941eba0bc9..0ce37d54ed10 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -72,7 +72,7 @@ /*****************************************************************************/ /* ENA adaptive interrupt moderation settings */ -#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index a3250dcf7d53..fc96c66b44cb 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -315,10 +315,9 @@ static int ena_get_coalesce(struct net_device *net_dev, ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * ena_dev->intr_delay_resolution; - if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) - coalesce->rx_coalesce_usecs = - ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) - * ena_dev->intr_delay_resolution; + coalesce->rx_coalesce_usecs = + ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) + * ena_dev->intr_delay_resolution; coalesce->use_adaptive_rx_coalesce = ena_com_get_adaptive_moderation_enabled(ena_dev); @@ -367,12 +366,6 @@ static int ena_set_coalesce(struct net_device *net_dev, ena_update_tx_rings_intr_moderation(adapter); - if (coalesce->use_adaptive_rx_coalesce) { - if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) - ena_com_enable_adaptive_moderation(ena_dev); - return 0; - } - rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, coalesce->rx_coalesce_usecs); if (rc) @@ -380,10 +373,13 @@ static int ena_set_coalesce(struct net_device *net_dev, ena_update_rx_rings_intr_moderation(adapter); - if (!coalesce->use_adaptive_rx_coalesce) { - if (ena_com_get_adaptive_moderation_enabled(ena_dev)) - ena_com_disable_adaptive_moderation(ena_dev); - } + if (coalesce->use_adaptive_rx_coalesce && + !ena_com_get_adaptive_moderation_enabled(ena_dev)) + ena_com_enable_adaptive_moderation(ena_dev); + + if (!coalesce->use_adaptive_rx_coalesce && + ena_com_get_adaptive_moderation_enabled(ena_dev)) + ena_com_disable_adaptive_moderation(ena_dev); return 0; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d46a912002ff..948583fdcc28 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1238,8 +1238,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget) struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring, *rx_ring; - u32 tx_work_done; - u32 rx_work_done; + int tx_work_done; + int rx_work_done = 0; int tx_budget; int napi_comp_call = 0; int ret; @@ -1256,7 +1256,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget) } tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); - rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); + /* On netpoll the budget is zero and the handler should only clean the + * tx completions. + */ + if (likely(budget)) + rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); /* If the device is about to reset or down, avoid unmask * the interrupt and return 0 so NAPI won't reschedule diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 1793950f0582..307e402db8c9 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1161,7 +1161,7 @@ static int au1000_probe(struct platform_device *pdev) /* aup->mac is the base address of the MAC's registers */ aup->mac = (struct mac_reg *) - ioremap_nocache(base->start, resource_size(base)); + ioremap(base->start, resource_size(base)); if (!aup->mac) { dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); err = -ENXIO; @@ -1169,7 +1169,7 @@ static int au1000_probe(struct platform_device *pdev) } /* Setup some variables for quick register address access */ - aup->enable = (u32 *)ioremap_nocache(macen->start, + aup->enable = (u32 *)ioremap(macen->start, resource_size(macen)); if (!aup->enable) { dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); @@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev) } aup->mac_id = pdev->id; - aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); + aup->macdma = ioremap(macdma->start, resource_size(macdma)); if (!aup->macdma) { dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n"); err = -ENXIO; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index a880f10e3e70..8083173f1a8f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -129,13 +129,13 @@ struct xgbe_stats { #define XGMAC_MMC_STAT(_string, _var) \ { _string, \ - FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \ + sizeof_field(struct xgbe_mmc_stats, _var), \ offsetof(struct xgbe_prv_data, mmc_stats._var), \ } #define XGMAC_EXT_STAT(_string, _var) \ { _string, \ - FIELD_SIZEOF(struct xgbe_ext_stats, _var), \ + sizeof_field(struct xgbe_ext_stats, _var), \ offsetof(struct xgbe_prv_data, ext_stats._var), \ } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index a17a4da7bc15..c85e3e29012c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -403,6 +403,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; + aq_nic_set_loopback(self); + err = self->aq_hw_ops->hw_start(self->aq_hw); if (err < 0) goto err_exit; @@ -413,8 +415,6 @@ int aq_nic_start(struct aq_nic_s *self) INIT_WORK(&self->service_task, aq_nic_service_task); - aq_nic_set_loopback(self); - timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); aq_nic_service_timer_cb(&self->service_timer); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 2bb329606794..6b27af0db499 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -253,7 +253,7 @@ static int aq_pci_probe(struct pci_dev *pdev, goto err_free_aq_hw; } - self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); + self->aq_hw->mmio = ioremap(mmio_pa, reg_sz); if (!self->aq_hw->mmio) { err = -EIO; goto err_free_aq_hw; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 58e891af6e09..ec041f78d063 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -1525,9 +1525,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .rx_extract_ts = hw_atl_b0_rx_extract_ts, .extract_hwts = hw_atl_b0_extract_hwts, .hw_set_offload = hw_atl_b0_hw_offload_set, - .hw_get_hw_stats = hw_atl_utils_get_hw_stats, - .hw_get_fw_version = hw_atl_utils_get_fw_version, - .hw_set_offload = hw_atl_b0_hw_offload_set, .hw_set_loopback = hw_atl_b0_set_loopback, .hw_set_fc = hw_atl_b0_set_fc, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 8910b62e67ed..f547baa6c954 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -667,9 +667,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) u32 speed; mpi_state = hw_atl_utils_mpi_get_state(self); - speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | - FW2X_RATE_2G5 | FW2X_RATE_5G | - FW2X_RATE_10G); + speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT; if (!speed) { link_status->mbps = 0U; diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 8f5021091eee..60ba69db48c6 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -313,7 +313,7 @@ struct ag71xx { struct ag71xx_desc *stop_desc; dma_addr_t stop_desc_dma; - int phy_if_mode; + phy_interface_t phy_if_mode; struct delayed_work restart_work; struct timer_list oom_timer; @@ -1687,8 +1687,7 @@ static int ag71xx_probe(struct platform_device *pdev) goto err_free; } - ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); + ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ag->mac_base) { err = -ENOMEM; goto err_free; @@ -1744,7 +1743,7 @@ static int ag71xx_probe(struct platform_device *pdev) eth_random_addr(ndev->dev_addr); } - err = of_get_phy_mode(np, ag->phy_if_mode); + err = of_get_phy_mode(np, &ag->phy_if_mode); if (err) { netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); goto err_free; diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 035dbb1b2c98..ec25fd81985d 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) int ethaddr_bytes = ETH_ALEN; memset(ppattern + offset, 0xff, magicsync); - for (j = 0; j < magicsync; j++) - set_bit(len++, (unsigned long *) pmask); + for (j = 0; j < magicsync; j++) { + pmask[len >> 3] |= BIT(len & 7); + len++; + } for (j = 0; j < B44_MAX_PATTERNS; j++) { if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) @@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) for (k = 0; k< ethaddr_bytes; k++) { ppattern[offset + magicsync + (j * ETH_ALEN) + k] = macaddr[k]; - set_bit(len++, (unsigned long *) pmask); + pmask[len >> 3] |= BIT(len & 7); + len++; } } return len - 1; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 825af709708e..d6b1a153f9df 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; - priv->ring_map[q + port * num_tx_queues] = ring; + priv->ring_map[qp + port * num_tx_queues] = ring; qp++; } @@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, struct net_device *slave_dev; unsigned int num_tx_queues; struct net_device *dev; - unsigned int q, port; + unsigned int q, qp, port; priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); if (priv->netdev != info->master) @@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, continue; ring->inspect = false; - priv->ring_map[q + port * num_tx_queues] = NULL; + qp = ring->switch_queue; + priv->ring_map[qp + port * num_tx_queues] = NULL; } return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 8b08cb18e363..3f63ffd7561b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) for (i = 0; i < E1H_FUNC_MAX / 2; i++) { u32 func_config = MF_CFG_RD(bp, - func_mf_config[BP_PORT(bp) + 2 * i]. + func_mf_config[BP_PATH(bp) + 2 * i]. config); func_num += ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 192ff8d5da32..61fa32cdd3e3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp) */ static void bnx2x_parity_recover(struct bnx2x *bp) { - bool global = false; u32 error_recovered, error_unrecovered; - bool is_parity; + bool is_parity, global = false; +#ifdef CONFIG_BNX2X_SRIOV + int vf_idx; + + for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + if (vf) + vf->state = VF_LOST; + } +#endif DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { @@ -14045,7 +14053,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = -ENOMEM; goto init_one_freemem; } - bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + bp->doorbells = ioremap(pci_resource_start(pdev, 2), doorbell_size); } if (!bp->doorbells) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 7a6e82db4231..bacc8552bce1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \ func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT) +#define BNX2X_VFS_VLAN_CREDIT(bp) \ + (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) + #define PF_VLAN_CREDIT_E2(bp, func_num) \ - ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \ + ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \ func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT) #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index b6ebd92ec565..3a716c015415 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -139,6 +139,7 @@ struct bnx2x_virtf { #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ +#define VF_LOST 4 /* Recovery while VFs are loaded */ bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0752b7fa4d9c..ea0e9394f898 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, { int i; + if (vf->state == VF_LOST) { + /* Just ack the FW and return if VFs are lost + * in case of parity error. VFs are supposed to be timedout + * on waiting for PF response. + */ + DP(BNX2X_MSG_IOV, + "VF 0x%x lost, not handling the request\n", vf->abs_vfid); + + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + return; + } + /* check if tlv type is known */ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { /* Lock the per vf op mutex and note the locker's identity. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 85983f0e3134..e6f18f6070ef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2001,6 +2001,9 @@ static int bnxt_async_event_process(struct bnxt *bp, case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { u32 data1 = le32_to_cpu(cmpl->event_data1); + if (!bp->fw_health) + goto async_event_process_exit; + bp->fw_reset_timestamp = jiffies; bp->fw_reset_min_dsecs = cmpl->timestamp_lo; if (!bp->fw_reset_min_dsecs) @@ -4421,8 +4424,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | - FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; @@ -6186,7 +6190,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); val = clamp_t(u16, tmr, 1, coal_cap->cmpl_aggr_dma_tmr_during_int_max); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); req->enables |= cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); } @@ -7115,14 +7119,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) goto err_recovery_out; - if (!fw_health) { - fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL); - bp->fw_health = fw_health; - if (!fw_health) { - rc = -ENOMEM; - goto err_recovery_out; - } - } fw_health->flags = le32_to_cpu(resp->flags); if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { @@ -8796,6 +8792,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (fw_reset) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_stop(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rc = bnxt_fw_init_one(bp); if (rc) { set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@ -9990,8 +9989,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) struct bnxt_fw_health *fw_health = bp->fw_health; u32 val; - if (!fw_health || !fw_health->enabled || - test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return; if (fw_health->tmr_counter) { @@ -10482,6 +10480,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +static void bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) { + netdev_warn(bp->dev, "Failed to allocate fw_health\n"); + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + } +} + static int bnxt_fw_init_one_p1(struct bnxt *bp) { int rc; @@ -10528,6 +10543,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", rc); + bnxt_alloc_fw_health(bp); rc = bnxt_hwrm_error_recovery_qcfg(bp); if (rc) netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", @@ -10609,6 +10625,12 @@ static int bnxt_fw_init_one(struct bnxt *bp) rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); if (rc) return rc; + + /* In case fw capabilities have changed, destroy the unneeded + * reporters and create newly capable ones. + */ + bnxt_dl_fw_reporters_destroy(bp, false); + bnxt_dl_fw_reporters_create(bp); bnxt_fw_init_one_p3(bp); return 0; } @@ -10751,8 +10773,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); return; case BNXT_FW_RESET_STATE_ENABLE_DEV: - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && - bp->fw_health) { + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { u32 val; val = bnxt_fw_health_readl(bp, @@ -11044,11 +11065,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys; - if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && - keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && - keys1->ports.ports == keys2->ports.ports && - keys1->basic.ip_proto == keys2->basic.ip_proto && - keys1->basic.n_proto == keys2->basic.n_proto && + if (keys1->basic.n_proto != keys2->basic.n_proto || + keys1->basic.ip_proto != keys2->basic.ip_proto) + return false; + + if (keys1->basic.n_proto == htons(ETH_P_IP)) { + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) + return false; + } else { + if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, + sizeof(keys1->addrs.v6addrs.src)) || + memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, + sizeof(keys1->addrs.v6addrs.dst))) + return false; + } + + if (keys1->ports.ports == keys2->ports.ports && keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) @@ -11340,7 +11373,7 @@ int bnxt_get_port_parent_id(struct net_device *dev, return -EOPNOTSUPP; /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP; ppid->id_len = sizeof(bp->switch_id); @@ -11396,11 +11429,11 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) { + if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - bnxt_dl_unregister(bp); - } + bnxt_dl_fw_reporters_destroy(bp, true); + bnxt_dl_unregister(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); bnxt_shutdown_tc(bp); @@ -11415,6 +11448,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; + kfree(bp->fw_health); + bp->fw_health = NULL; bnxt_cleanup_pci(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); @@ -11711,6 +11746,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) put_unaligned_le32(dw, &dsn[0]); pci_read_config_dword(pdev, pos + 4, &dw); put_unaligned_le32(dw, &dsn[4]); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; } @@ -11822,9 +11858,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_PF(bp)) { /* Read the adapter's DSN to use as the eswitch switch_id */ - rc = bnxt_pcie_dsn_get(bp, bp->switch_id); - if (rc) - goto init_err_pci_clean; + bnxt_pcie_dsn_get(bp, bp->switch_id); } /* MTU range: 60 - FW defined max */ @@ -11875,8 +11909,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_cleanup_tc; - if (BNXT_PF(bp)) - bnxt_dl_register(bp); + bnxt_dl_register(bp); + bnxt_dl_fw_reporters_create(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 505af5cfb1bd..f14335433a64 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1532,6 +1532,7 @@ struct bnxt { #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_MULTI_HOST 0x100000 + #define BNXT_FLAG_DSN_VALID 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 @@ -1936,9 +1937,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type) case HWRM_CFA_ENCAP_RECORD_FREE: case HWRM_CFA_DECAP_FILTER_ALLOC: case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_ALLOC: - case HWRM_CFA_NTUPLE_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_CFG: case HWRM_CFA_EM_FLOW_ALLOC: case HWRM_CFA_EM_FLOW_FREE: case HWRM_CFA_EM_FLOW_CFG: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index acb2dd64c023..3eedd4477218 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -39,11 +39,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); - struct bnxt_fw_health *health = bp->fw_health; u32 val, health_status; int rc; - if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return 0; val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); @@ -126,21 +125,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { .recover = bnxt_fw_fatal_recover, }; -static void bnxt_dl_fw_reporters_create(struct bnxt *bp) +void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - health->fw_reporter = - devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, - 0, false, bp); - if (IS_ERR(health->fw_reporter)) { - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", - PTR_ERR(health->fw_reporter)); - health->fw_reporter = NULL; - } + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) + goto err_recovery; health->fw_reset_reporter = devlink_health_reporter_create(bp->dl, @@ -150,8 +143,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_reset_reporter)); health->fw_reset_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + } + +err_recovery: + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + if (!health->fw_reporter) { + health->fw_reporter = + devlink_health_reporter_create(bp->dl, + &bnxt_dl_fw_reporter_ops, + 0, false, bp); + if (IS_ERR(health->fw_reporter)) { + netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", + PTR_ERR(health->fw_reporter)); + health->fw_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return; + } } + if (health->fw_fatal_reporter) + return; + health->fw_fatal_reporter = devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_fatal_reporter_ops, @@ -160,24 +175,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_fatal_reporter)); health->fw_fatal_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; } } -static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - if (health->fw_reporter) - devlink_health_reporter_destroy(health->fw_reporter); - - if (health->fw_reset_reporter) + if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && + health->fw_reset_reporter) { devlink_health_reporter_destroy(health->fw_reset_reporter); + health->fw_reset_reporter = NULL; + } - if (health->fw_fatal_reporter) + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all) + return; + + if (health->fw_reporter) { + devlink_health_reporter_destroy(health->fw_reporter); + health->fw_reporter = NULL; + } + + if (health->fw_fatal_reporter) { devlink_health_reporter_destroy(health->fw_fatal_reporter); + health->fw_fatal_reporter = NULL; + } } void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) @@ -185,9 +211,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) struct bnxt_fw_health *fw_health = bp->fw_health; struct bnxt_fw_reporter_ctx fw_reporter_ctx; - if (!fw_health) - return; - fw_reporter_ctx.sp_event = event; switch (event) { case BNXT_FW_RESET_NOTIFY_SP_EVENT: @@ -247,6 +270,8 @@ static const struct devlink_ops bnxt_dl_ops = { .flash_update = bnxt_dl_flash_update, }; +static const struct devlink_ops bnxt_vf_dl_ops; + enum bnxt_dl_param_id { BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, @@ -460,7 +485,10 @@ int bnxt_dl_register(struct bnxt *bp) return -ENOTSUPP; } - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (BNXT_PF(bp)) + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + else + dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); if (!dl) { netdev_warn(bp->dev, "devlink_alloc failed"); return -ENOMEM; @@ -479,6 +507,9 @@ int bnxt_dl_register(struct bnxt *bp) goto err_dl_free; } + if (!BNXT_PF(bp)) + return 0; + rc = devlink_params_register(dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); if (rc) { @@ -506,8 +537,6 @@ int bnxt_dl_register(struct bnxt *bp) devlink_params_publish(dl); - bnxt_dl_fw_reporters_create(bp); - return 0; err_dl_port_unreg: @@ -530,12 +559,14 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; - bnxt_dl_fw_reporters_destroy(bp); - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - devlink_port_unregister(&bp->dl_port); - devlink_params_unregister(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); + if (BNXT_PF(bp)) { + devlink_port_params_unregister(&bp->dl_port, + bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); + devlink_port_unregister(&bp->dl_port); + devlink_params_unregister(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + } devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 665d4bdcd8c0..6db6c3dac472 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -58,6 +58,8 @@ struct bnxt_dl_nvm_param { void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); +void bnxt_dl_fw_reporters_create(struct bnxt *bp); +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2ccf79cdcb1e..08d56ec7b68a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -3071,8 +3071,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, } } - if (info->dest_buf) - memcpy(info->dest_buf + off, dma_buf, len); + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + memcpy(info->dest_buf + off, dma_buf, len); + } else { + rc = -ENOBUFS; + break; + } + } if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) @@ -3126,7 +3133,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, - void *buf, u32 offset) + void *buf, u32 buf_len, u32 offset) { struct hwrm_dbg_coredump_retrieve_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; @@ -3141,8 +3148,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, data_len); - if (buf) + if (buf) { info.dest_buf = buf + offset; + info.buf_len = buf_len; + info.seg_start = offset; + } rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); if (!rc) @@ -3232,14 +3242,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) { u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; struct coredump_segment_record *seg_record = NULL; - u32 offset = 0, seg_hdr_len, seg_record_len; struct bnxt_coredump_segment_hdr seg_hdr; struct bnxt_coredump coredump = {NULL}; time64_t start_time; u16 start_utc; int rc = 0, i; + if (buf) + buf_len = *dump_len; + start_time = ktime_get_real_seconds(); start_utc = sys_tz.tz_minuteswest * 60; seg_hdr_len = sizeof(seg_hdr); @@ -3272,6 +3285,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) u32 duration = 0, seg_len = 0; unsigned long start, end; + if (buf && ((offset + seg_hdr_len) > + BNXT_COREDUMP_BUF_LEN(buf_len))) { + rc = -ENOBUFS; + goto err; + } + start = jiffies; rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); @@ -3284,9 +3303,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) /* Write segment data into the buffer */ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, - &seg_len, buf, + &seg_len, buf, buf_len, offset + seg_hdr_len); - if (rc) + if (rc && rc == -ENOBUFS) + goto err; + else if (rc) netdev_err(bp->dev, "Failed to retrieve coredump for seg = %d\n", seg_record->segment_id); @@ -3316,7 +3337,8 @@ err: rc); kfree(coredump.data); *dump_len += sizeof(struct bnxt_coredump_record); - + if (rc == -ENOBUFS) + netdev_err(bp->dev, "Firmware returned large coredump buffer"); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 4428d0abcbc1..3576d951727b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -31,6 +31,8 @@ struct bnxt_coredump { u16 total_segs; }; +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) + struct bnxt_hwrm_dbg_dma_info { void *dest_buf; int dest_buf_size; @@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info { u16 seq_off; u16 data_len_off; u16 segs; + u32 seg_start; + u32 buf_len; }; struct hwrm_dbg_cmn_input { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c601ff7b8f61..4a316c4b3fa8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc; int max_idx, max_cp_rings; int avail_msix, idx; + int total_vecs; int rc = 0; ASSERT_RTNL(); @@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_requested = avail_msix; - if (bp->total_irqs < (idx + avail_msix)) { + hw_resc = &bp->hw_resc; + total_vecs = idx + avail_msix; + if (bp->total_irqs < total_vecs || + (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) { if (netif_running(dev)) { bnxt_close_nic(bp, true, false); rc = bnxt_open_nic(bp, true, false); @@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } if (BNXT_NEW_RM(bp)) { - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int resv_msix; resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index f9bf7d7250ab..b010b34cdaf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp) struct net_device *dev; int rc, i; + if (!(bp->flags & BNXT_FLAG_DSN_VALID)) + return -ENODEV; + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); if (!bp->vf_reps) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 120fa05a39ff..0a8624be44a9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, DMA_END_ADDR); /* Initialize Tx NAPI */ - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, - NAPI_POLL_WEIGHT); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); } /* Initialize a RDMA ring */ diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 1604ad32e920..f991537818fe 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2537,7 +2537,7 @@ static int sbmac_probe(struct platform_device *pldev) res = platform_get_resource(pldev, IORESOURCE_MEM, 0); BUG_ON(!res); - sbm_base = ioremap_nocache(res->start, resource_size(res)); + sbm_base = ioremap(res->start, resource_size(res)); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", dev_name(&pldev->dev)); diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index e338272931d1..01a50a4b2113 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3477,7 +3477,7 @@ bnad_init(struct bnad *bnad, bnad->pcidev = pdev; bnad->mmio_start = pci_resource_start(pdev, 0); bnad->mmio_len = pci_resource_len(pdev, 0); - bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); + bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len); if (!bnad->bar0) { dev_err(&pdev->dev, "ioremap for bar0 failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 9c767ee252ac..f7d87c71aaa9 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -611,21 +611,24 @@ static const struct phylink_mac_ops macb_phylink_ops = { .mac_link_up = macb_mac_link_up, }; +static bool macb_phy_handle_exists(struct device_node *dn) +{ + dn = of_parse_phandle(dn, "phy-handle", 0); + of_node_put(dn); + return dn != NULL; +} + static int macb_phylink_connect(struct macb *bp) { + struct device_node *dn = bp->pdev->dev.of_node; struct net_device *dev = bp->dev; struct phy_device *phydev; int ret; - if (bp->pdev->dev.of_node && - of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) { - ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node, - 0); - if (ret) { - netdev_err(dev, "Could not attach PHY (%d)\n", ret); - return ret; - } - } else { + if (dn) + ret = phylink_of_phy_connect(bp->phylink, dn, 0); + + if (!dn || (ret && !macb_phy_handle_exists(dn))) { phydev = phy_find_first(bp->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); @@ -634,10 +637,11 @@ static int macb_phylink_connect(struct macb *bp) /* attach the mac to the phy */ ret = phylink_connect_phy(bp->phylink, phydev); - if (ret) { - netdev_err(dev, "Could not attach to PHY (%d)\n", ret); - return ret; - } + } + + if (ret) { + netdev_err(dev, "Could not attach PHY (%d)\n", ret); + return ret; } phylink_start(bp->phylink); @@ -664,9 +668,30 @@ static int macb_mii_probe(struct net_device *dev) return 0; } +static int macb_mdiobus_register(struct macb *bp) +{ + struct device_node *child, *np = bp->pdev->dev.of_node; + + /* Only create the PHY from the device tree if at least one PHY is + * described. Otherwise scan the entire MDIO bus. We do this to support + * old device tree that did not follow the best practices and did not + * describe their network PHYs. + */ + for_each_available_child_of_node(np, child) + if (of_mdiobus_child_is_phy(child)) { + /* The loop increments the child refcount, + * decrement it before returning. + */ + of_node_put(child); + + return of_mdiobus_register(bp->mii_bus, np); + } + + return mdiobus_register(bp->mii_bus); +} + static int macb_mii_init(struct macb *bp) { - struct device_node *np; int err = -ENXIO; /* Enable management port */ @@ -688,9 +713,7 @@ static int macb_mii_init(struct macb *bp) dev_set_drvdata(&bp->dev->dev, bp->mii_bus); - np = bp->pdev->dev.of_node; - - err = of_mdiobus_register(bp->mii_bus, np); + err = macb_mdiobus_register(bp); if (err) goto err_out_free_mdiobus; @@ -4069,7 +4092,7 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, mgmt->rate = 0; mgmt->hw.init = &init; - *tx_clk = clk_register(NULL, &mgmt->hw); + *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); if (IS_ERR(*tx_clk)) return PTR_ERR(*tx_clk); @@ -4397,7 +4420,6 @@ err_out_free_netdev: err_disable_clocks: clk_disable_unprepare(tx_clk); - clk_unregister(tx_clk); clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); clk_disable_unprepare(rx_clk); @@ -4427,7 +4449,6 @@ static int macb_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { clk_disable_unprepare(bp->tx_clk); - clk_unregister(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); clk_disable_unprepare(bp->rx_clk); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 0cc2338d8d2a..dfc77507b159 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -205,11 +205,11 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct, major_version = (u32)__cvmx_bootmem_desc_get( oct, oct->bootmem_desc_addr, offsetof(struct cvmx_bootmem_desc, major_version), - FIELD_SIZEOF(struct cvmx_bootmem_desc, major_version)); + sizeof_field(struct cvmx_bootmem_desc, major_version)); minor_version = (u32)__cvmx_bootmem_desc_get( oct, oct->bootmem_desc_addr, offsetof(struct cvmx_bootmem_desc, minor_version), - FIELD_SIZEOF(struct cvmx_bootmem_desc, minor_version)); + sizeof_field(struct cvmx_bootmem_desc, minor_version)); dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__, major_version); @@ -237,13 +237,13 @@ static const struct cvmx_bootmem_named_block_desc oct, named_addr, offsetof(struct cvmx_bootmem_named_block_desc, base_addr), - FIELD_SIZEOF( + sizeof_field( struct cvmx_bootmem_named_block_desc, base_addr)); desc->size = __cvmx_bootmem_desc_get(oct, named_addr, offsetof(struct cvmx_bootmem_named_block_desc, size), - FIELD_SIZEOF( + sizeof_field( struct cvmx_bootmem_named_block_desc, size)); @@ -268,20 +268,20 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, oct, oct->bootmem_desc_addr, offsetof(struct cvmx_bootmem_desc, named_block_array_addr), - FIELD_SIZEOF(struct cvmx_bootmem_desc, + sizeof_field(struct cvmx_bootmem_desc, named_block_array_addr)); u32 num_blocks = (u32)__cvmx_bootmem_desc_get( oct, oct->bootmem_desc_addr, offsetof(struct cvmx_bootmem_desc, nb_num_blocks), - FIELD_SIZEOF(struct cvmx_bootmem_desc, + sizeof_field(struct cvmx_bootmem_desc, nb_num_blocks)); u32 name_length = (u32)__cvmx_bootmem_desc_get( oct, oct->bootmem_desc_addr, offsetof(struct cvmx_bootmem_desc, named_block_name_len), - FIELD_SIZEOF(struct cvmx_bootmem_desc, + sizeof_field(struct cvmx_bootmem_desc, named_block_name_len)); u64 named_addr = named_block_array_addr; @@ -292,7 +292,7 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, offsetof( struct cvmx_bootmem_named_block_desc, size), - FIELD_SIZEOF( + sizeof_field( struct cvmx_bootmem_named_block_desc, size)); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 58f89f6a040f..883cfa9c4b6d 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (!is_offload(adapter)) return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; if (!(adapter->flags & FULL_INIT_DONE)) return -EIO; /* need the memory controllers */ if (copy_from_user(&t, useraddr, sizeof(t))) @@ -3265,7 +3267,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_adapter; } - adapter->regs = ioremap_nocache(mmio_start, mmio_len); + adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); err = -ENOMEM; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index a70ac2097892..becee29f5df7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -504,6 +504,7 @@ struct link_config { enum cc_pause requested_fc; /* flow control user has requested */ enum cc_pause fc; /* actual link flow control */ + enum cc_pause advertised_fc; /* actual advertised flow control */ enum cc_fec requested_fec; /* Forward Error Correction: */ enum cc_fec fec; /* requested and actual in use */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 93868dca186a..4144c230dc97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos) static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos) { v = seq_tab_get_idx(seq->private, *pos + 1); - if (v) - ++*pos; + ++(*pos); return v; } @@ -3048,6 +3047,9 @@ static int sge_queue_entries(const struct adapter *adap) int tot_uld_entries = 0; int i; + if (!is_uld(adap)) + goto lld_only; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_TX_MAX; i++) tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i); @@ -3058,6 +3060,7 @@ static int sge_queue_entries(const struct adapter *adap) } mutex_unlock(&uld_mutex); +lld_only: return DIV_ROUND_UP(adap->sge.ethqsets, 4) + (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) + tot_uld_entries + diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 20ab3b6285a2..c837382ee522 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -807,8 +807,8 @@ static void get_pauseparam(struct net_device *dev, struct port_info *p = netdev_priv(dev); epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; - epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; + epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0; } static int set_pauseparam(struct net_device *dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 12ff69b3ba91..0dedd3e9c31e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3135,9 +3135,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; + struct ch_sched_queue qe = { 0 }; + struct ch_sched_params p = { 0 }; struct sched_class *e; - struct ch_sched_params p; - struct ch_sched_queue qe; u32 req_rate; int err = 0; @@ -3154,6 +3154,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return -EINVAL; } + qe.queue = index; + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { + dev_err(adap->pdev_dev, + "Queue %u already bound to class %u of type: %u\n", + index, e->idx, e->info.u.params.level); + return -EBUSY; + } + /* Convert from Mbps to Kbps */ req_rate = rate * 1000; @@ -3183,7 +3192,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return 0; /* Fetch any available unused or matching scheduling class */ - memset(&p, 0, sizeof(p)); p.type = SCHED_CLASS_TYPE_PACKET; p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; p.u.params.mode = SCHED_CLASS_MODE_CLASS; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 102b370fbd3e..6d485803ddbe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, struct flow_action *actions = &cls->rule->action; struct port_info *pi = netdev2pinfo(dev); struct flow_action_entry *entry; + struct ch_sched_queue qe; + struct sched_class *e; u64 max_link_rate; u32 i, speed; int ret; @@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, } } + for (i = 0; i < pi->nqsets; i++) { + memset(&qe, 0, sizeof(qe)); + qe.queue = i; + + e = cxgb4_sched_queue_lookup(dev, &qe); + if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) { + NL_SET_ERR_MSG_MOD(extack, + "Some queues are already bound to different class"); + return -EBUSY; + } + } + return 0; } +static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc) +{ + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + int ret; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = tc; + ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE); + if (ret) + goto out_free; + } + + return 0; + +out_free: + while (i--) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } + + return ret; +} + +static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct ch_sched_queue qe; + u32 i; + + for (i = 0; i < pi->nqsets; i++) { + qe.queue = i; + qe.class = SCHED_CLS_NONE; + cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); + } +} + static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct tc_cls_matchall_offload *cls) { @@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct adapter *adap = netdev2adap(dev); struct flow_action_entry *entry; struct sched_class *e; + int ret; u32 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; @@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, return -ENOMEM; } + ret = cxgb4_matchall_tc_bind_queues(dev, e->idx); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Could not bind queues to traffic class"); + goto out_free; + } + tc_port_matchall->egress.hwtc = e->idx; tc_port_matchall->egress.cookie = cls->cookie; tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; + +out_free: + cxgb4_sched_class_free(dev, e->idx); + return ret; } static void cxgb4_matchall_free_tc(struct net_device *dev) @@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev) struct adapter *adap = netdev2adap(dev); tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + cxgb4_matchall_tc_unbind_queues(dev); cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index 477973d2e341..ec3eb45ee3b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -12,8 +12,9 @@ static int cxgb4_mqprio_validate(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); u32 speed, qcount = 0, qoffset = 0; + u32 start_a, start_b, end_a, end_b; int ret; - u8 i; + u8 i, j; if (!mqprio->qopt.num_tc) return 0; @@ -47,6 +48,31 @@ static int cxgb4_mqprio_validate(struct net_device *dev, qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset); qcount += mqprio->qopt.count[i]; + start_a = mqprio->qopt.offset[i]; + end_a = start_a + mqprio->qopt.count[i] - 1; + for (j = i + 1; j < mqprio->qopt.num_tc; j++) { + start_b = mqprio->qopt.offset[j]; + end_b = start_b + mqprio->qopt.count[j] - 1; + + /* If queue count is 0, then the traffic + * belonging to this class will not use + * ETHOFLD queues. So, no need to validate + * further. + */ + if (!mqprio->qopt.count[i]) + break; + + if (!mqprio->qopt.count[j]) + continue; + + if (max_t(u32, start_a, start_b) <= + min_t(u32, end_a, end_b)) { + netdev_err(dev, + "Queues can't overlap across tc\n"); + return -EINVAL; + } + } + /* Convert byte per second to bits per second */ min_rate += (mqprio->min_rate[i] * 8); max_rate += (mqprio->max_rate[i] * 8); @@ -145,6 +171,10 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev) kfree(adap->sge.eohw_rxq); return -ENOMEM; } + + refcount_set(&adap->tc_mqprio->refcnt, 1); + } else { + refcount_inc(&adap->tc_mqprio->refcnt); } if (!(adap->flags & CXGB4_USING_MSIX)) @@ -205,7 +235,6 @@ static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev) cxgb4_enable_rx(adap, &eorxq->rspq); } - refcount_inc(&adap->tc_mqprio->refcnt); return 0; out_free_msix: @@ -234,9 +263,10 @@ out_free_queues: t4_sge_free_ethofld_txq(adap, eotxq); } - kfree(adap->sge.eohw_txq); - kfree(adap->sge.eohw_rxq); - + if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) { + kfree(adap->sge.eohw_txq); + kfree(adap->sge.eohw_rxq); + } return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index e9e45006632d..1a16449e9deb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) { v = l2t_get_idx(seq, *pos); - if (v) - ++*pos; + ++(*pos); return v; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 3e61bd5d0c29..cebe1412d960 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi, return found; } +struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p) +{ + struct port_info *pi = netdev2pinfo(dev); + struct sched_queue_entry *qe = NULL; + struct adapter *adap = pi->adapter; + struct sge_eth_txq *txq; + + if (p->queue < 0 || p->queue >= pi->nqsets) + return NULL; + + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; + qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); + return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL; +} + static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) { struct sched_queue_entry *qe = NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index e92ff68bdd0a..5cc74a5a1774 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id) return true; } +struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p); int cxgb4_sched_class_bind(struct net_device *dev, void *arg, enum sched_bind_type type); int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 19d18acfc9a6..844fdcf55118 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4089,7 +4089,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) if (cc_pause & PAUSE_TX) fw_pause |= FW_PORT_CAP32_802_3_PAUSE; else - fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; + fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR | + FW_PORT_CAP32_802_3_PAUSE; } else if (cc_pause & PAUSE_TX) { fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; } @@ -8563,17 +8564,17 @@ static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) { const struct fw_port_cmd *cmd = (const void *)rpl; - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); - struct adapter *adapter = pi->adapter; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; struct link_config *lc = &pi->link_cfg; - int link_ok, linkdnrc; - enum fw_port_type port_type; + struct adapter *adapter = pi->adapter; + unsigned int speed, fc, fec, adv_fc; enum fw_port_module_type mod_type; - unsigned int speed, fc, fec; - fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + int action, link_ok, linkdnrc; + enum fw_port_type port_type; /* Extract the various fields from the Port Information message. */ + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); switch (action) { case FW_PORT_ACTION_GET_PORT_INFO: { u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); @@ -8611,6 +8612,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } fec = fwcap_to_cc_fec(acaps); + adv_fc = fwcap_to_cc_pause(acaps); fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); @@ -8667,7 +8669,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc || fec != lc->fec) { /* something changed */ + fc != lc->fc || adv_fc != lc->advertised_fc || + fec != lc->fec) { + /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; dev_warn_ratelimited(adapter->pdev_dev, @@ -8677,6 +8681,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) } lc->link_ok = link_ok; lc->speed = speed; + lc->advertised_fc = adv_fc; lc->fc = fc; lc->fec = fec; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index f6fc0875d5b0..f4d41f968afa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1690,8 +1690,8 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev, struct port_info *pi = netdev_priv(dev); pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; - pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; - pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; + pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0; + pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0; } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index ccca67cf4487..57cfd10a99ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -135,6 +135,7 @@ struct link_config { enum cc_pause requested_fc; /* flow control user has requested */ enum cc_pause fc; /* actual link flow control */ + enum cc_pause advertised_fc; /* actual advertised flow control */ enum cc_fec auto_fec; /* Forward Error Correction: */ enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 8a389d617a23..9d49ff211cc1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1913,16 +1913,16 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) static void t4vf_handle_get_port_info(struct port_info *pi, const struct fw_port_cmd *cmd) { - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); - struct adapter *adapter = pi->adapter; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; struct link_config *lc = &pi->link_cfg; - int link_ok, linkdnrc; - enum fw_port_type port_type; + struct adapter *adapter = pi->adapter; + unsigned int speed, fc, fec, adv_fc; enum fw_port_module_type mod_type; - unsigned int speed, fc, fec; - fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + int action, link_ok, linkdnrc; + enum fw_port_type port_type; /* Extract the various fields from the Port Information message. */ + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); switch (action) { case FW_PORT_ACTION_GET_PORT_INFO: { u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); @@ -1982,6 +1982,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } fec = fwcap_to_cc_fec(acaps); + adv_fc = fwcap_to_cc_pause(acaps); fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); @@ -2012,7 +2013,9 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc || fec != lc->fec) { /* something changed */ + fc != lc->fc || adv_fc != lc->advertised_fc || + fec != lc->fec) { + /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; dev_warn_ratelimited(adapter->pdev_dev, @@ -2022,6 +2025,7 @@ static void t4vf_handle_get_port_info(struct port_info *pi, } lc->link_ok = link_ok; lc->speed = speed; + lc->advertised_fc = adv_fc; lc->fc = fc; lc->fec = fec; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index a8f4c69252ff..2814b96751b4 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -576,6 +576,8 @@ static int gmac_setup_txqs(struct net_device *netdev) if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { dev_warn(geth->dev, "TX queue base is not aligned\n"); + dma_free_coherent(geth->dev, len * sizeof(*desc_ring), + desc_ring, port->txq_dma_base); kfree(skb_tab); return -ENOMEM; } diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index f1a2da15dd0a..7852a4308194 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -2039,7 +2039,7 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* remap CSR registers */ - regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); + regs = ioremap(pciaddr, DE_REGS_SIZE); if (!regs) { rc = -EIO; pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 5bb5abf99588..022a54a1805b 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -23,7 +23,7 @@ struct be_ethtool_stat { }; enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; -#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ +#define FIELDINFO(_struct, field) sizeof_field(_struct, field), \ offsetof(_struct, field) #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\ FIELDINFO(struct be_tx_stats, field) diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ea4f17f5cce7..c6e74ae0ff0d 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1087,7 +1087,7 @@ static int ethoc_probe(struct platform_device *pdev) priv = netdev_priv(netdev); priv->netdev = netdev; - priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, + priv->iobase = devm_ioremap(&pdev->dev, netdev->base_addr, resource_size(mmio)); if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); @@ -1096,7 +1096,7 @@ static int ethoc_probe(struct platform_device *pdev) } if (netdev->mem_end) { - priv->membase = devm_ioremap_nocache(&pdev->dev, + priv->membase = devm_ioremap(&pdev->dev, netdev->mem_start, resource_size(mem)); if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 6a9d12dad5d9..a301f0095223 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1719,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, int page_offset; unsigned int sz; int *count_ptr; - int i; + int i, j; vaddr = phys_to_virt(addr); WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); @@ -1736,14 +1736,14 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, SMP_CACHE_BYTES)); + dma_unmap_page(priv->rx_dma_dev, sg_addr, + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); + /* We may use multiple Rx pools */ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); if (!dpaa_bp) goto free_buffers; - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); - dma_unmap_page(priv->rx_dma_dev, sg_addr, - DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); if (!skb) { sz = dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -1786,7 +1786,9 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, skb_add_rx_frag(skb, i - 1, head_page, frag_off, frag_len, dpaa_bp->size); } + /* Update the pool count for the current {cpu x bpool} */ + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); (*count_ptr)--; if (qm_sg_entry_is_final(&sgt[i])) @@ -1800,26 +1802,25 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, return skb; free_buffers: - /* compensate sw bpool counter changes */ - for (i--; i >= 0; i--) { - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); - if (dpaa_bp) { - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); - (*count_ptr)++; - } - } /* free all the SG entries */ - for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { - sg_addr = qm_sg_addr(&sgt[i]); + for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) { + sg_addr = qm_sg_addr(&sgt[j]); sg_vaddr = phys_to_virt(sg_addr); + /* all pages 0..i were unmaped */ + if (j > i) + dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); free_pages((unsigned long)sg_vaddr, 0); - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); - if (dpaa_bp) { - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); - (*count_ptr)--; + /* counters 0..i-1 were decremented */ + if (j >= i) { + dpaa_bp = dpaa_bpid2pool(sgt[j].bpid); + if (dpaa_bp) { + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); + (*count_ptr)--; + } } - if (qm_sg_entry_is_final(&sgt[i])) + if (qm_sg_entry_is_final(&sgt[j])) break; } /* free the SGT fragment */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index a9503aea527f..6437fe6b9abf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -160,10 +160,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) irq = mc_dev->irqs[0]; ptp_qoriq->irq = irq->msi_desc->irq; - err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL, - dpaa2_ptp_irq_handler_thread, - IRQF_NO_SUSPEND | IRQF_ONESHOT, - dev_name(dev), ptp_qoriq); + err = request_threaded_irq(ptp_qoriq->irq, NULL, + dpaa2_ptp_irq_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(dev), ptp_qoriq); if (err < 0) { dev_err(dev, "devm_request_threaded_irq(): %d\n", err); goto err_free_mc_irq; @@ -173,18 +173,20 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) DPRTC_IRQ_INDEX, 1); if (err < 0) { dev_err(dev, "dprtc_set_irq_enable(): %d\n", err); - goto err_free_mc_irq; + goto err_free_threaded_irq; } err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps); if (err) - goto err_free_mc_irq; + goto err_free_threaded_irq; dpaa2_phc_index = ptp_qoriq->phc_index; dev_set_drvdata(dev, ptp_qoriq); return 0; +err_free_threaded_irq: + free_irq(ptp_qoriq->irq, ptp_qoriq); err_free_mc_irq: fsl_mc_free_irqs(mc_dev); err_unmap: diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 05c1899f6628..9294027e9d90 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2199,8 +2199,14 @@ static void fec_enet_get_regs(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + struct device *dev = &fep->pdev->dev; u32 *buf = (u32 *)regbuf; u32 i, off; + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return; regs->version = fec_enet_register_version; @@ -2216,6 +2222,9 @@ static void fec_enet_get_regs(struct net_device *ndev, off >>= 2; buf[off] = readl(&theregs[off]); } + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } static int fec_enet_get_ts_info(struct net_device *ndev, diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 41c6fa200e74..e1901874c19f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -110,7 +110,7 @@ do { \ /* Interface Mode Register (IF_MODE) */ #define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */ -#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */ +#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */ #define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */ #define IF_MODE_RGMII 0x00000004 #define IF_MODE_RGMII_AUTO 0x00008000 @@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, tmp = 0; switch (phy_if) { case PHY_INTERFACE_MODE_XGMII: - tmp |= IF_MODE_XGMII; + tmp |= IF_MODE_10G; break; default: tmp |= IF_MODE_GMII; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index e03b30c60dcf..c82c85ef5fb3 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -49,6 +49,7 @@ struct tgec_mdio_controller { struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; + bool has_a011043; }; static u32 xgmac_read32(void __iomem *regs, @@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; /* Return all Fs if nothing was there */ - if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { + if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && + !priv->has_a011043) { dev_err(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); @@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev) priv->is_little_endian = of_property_read_bool(pdev->dev.of_node, "little-endian"); + priv->has_a011043 = of_property_read_bool(pdev->dev.of_node, + "fsl,erratum-a011043"); + ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "cannot register MDIO bus\n"); diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index edec61dfc868..9f52e72ff641 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -418,8 +418,6 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, rx->cnt = cnt; rx->fill_cnt += work_done; - /* restock desc ring slots */ - dma_wmb(); /* Ensure descs are visible before ringing doorbell */ gve_rx_write_doorbell(priv, rx); return gve_rx_work_pending(rx); } diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index f4889431f9b7..d0244feb0301 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -487,10 +487,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) * may have added descriptors without ringing the doorbell. */ - /* Ensure tx descs from a prior gve_tx are visible before - * ringing doorbell. - */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } @@ -505,8 +501,6 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) return NETDEV_TX_OK; - /* Ensure tx descs are visible before ringing doorbell */ - dma_wmb(); gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 3e9b6d543c77..150a8ccfb8b1 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -543,9 +543,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); hip04_set_xmit_desc(priv, phys); - priv->tx_head = TX_NEXT(tx_head); count++; netdev_sent_queue(ndev, skb->len); + priv->tx_head = TX_NEXT(tx_head); stats->tx_bytes += skb->len; stats->tx_packets++; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 14ab20491fd0..eb69e5c81a4d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -565,7 +565,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { - netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } @@ -1056,7 +1055,6 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) container_of(napi, struct hns_nic_ring_data, napi); struct hnae_ring *ring = ring_data->ring; -try_again: clean_complete += ring_data->poll_one( ring_data, budget - clean_complete, ring_data->ex_process); @@ -1066,7 +1064,7 @@ try_again: napi_complete(napi); ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); } else { - goto try_again; + return budget; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 69545dd6c938..b3deb5e5ce29 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -54,6 +54,8 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); #define HNS3_INNER_VLAN_TAG 1 #define HNS3_OUTER_VLAN_TAG 2 +#define HNS3_MIN_TX_LEN 33U + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -1405,6 +1407,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) int bd_num = 0; int ret; + /* Hardware can only handle short frames above 32 bytes */ + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) + return NETDEV_TX_OK; + /* Prefetch the data used later */ prefetch(skb->data); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d862e9ba27e1..13dbd249f35f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -10240,7 +10240,7 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) return ret; } - data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data); + data_len_per_desc = sizeof_field(struct hclge_desc, data); *len = 0; for (i = 0; i < dfx_reg_type_num; i++) { bd_num = bd_num_list[i]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index fbc39a2480d0..180224eab1ca 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -614,7 +614,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) } memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, - FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); + sizeof_field(struct hnae3_knic_private_info, prio_tc)); } static void hclge_tm_vport_info_update(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 60ec48fe4144..966aea949c0b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -450,7 +450,7 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) #define HINIC_FUNC_STAT(_stat_item) { \ .name = #_stat_item, \ - .size = FIELD_SIZEOF(struct hinic_vport_stats, _stat_item), \ + .size = sizeof_field(struct hinic_vport_stats, _stat_item), \ .offset = offsetof(struct hinic_vport_stats, _stat_item) \ } @@ -477,7 +477,7 @@ static struct hinic_stats hinic_function_stats[] = { #define HINIC_PORT_STAT(_stat_item) { \ .name = #_stat_item, \ - .size = FIELD_SIZEOF(struct hinic_phy_port_stats, _stat_item), \ + .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \ .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ } @@ -571,7 +571,7 @@ static struct hinic_stats hinic_port_stats[] = { #define HINIC_TXQ_STAT(_stat_item) { \ .name = "txq%d_"#_stat_item, \ - .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \ + .size = sizeof_field(struct hinic_txq_stats, _stat_item), \ .offset = offsetof(struct hinic_txq_stats, _stat_item) \ } @@ -586,7 +586,7 @@ static struct hinic_stats hinic_tx_queue_stats[] = { #define HINIC_RXQ_STAT(_stat_item) { \ .name = "rxq%d_"#_stat_item, \ - .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \ + .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \ .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ } diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 6436a98c5953..22f5887578b2 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -91,10 +91,10 @@ static int sni_82596_probe(struct platform_device *dev) idprom = platform_get_resource(dev, IORESOURCE_MEM, 2); if (!res || !ca || !options || !idprom) return -ENODEV; - mpu_addr = ioremap_nocache(res->start, 4); + mpu_addr = ioremap(res->start, 4); if (!mpu_addr) return -ENOMEM; - ca_addr = ioremap_nocache(ca->start, 4); + ca_addr = ioremap(ca->start, 4); if (!ca_addr) goto probe_failed_free_mpu; @@ -110,7 +110,7 @@ static int sni_82596_probe(struct platform_device *dev) netdevice->base_addr = res->start; netdevice->irq = platform_get_irq(dev, 0); - eth_addr = ioremap_nocache(idprom->start, 0x10); + eth_addr = ioremap(idprom->start, 0x10); if (!eth_addr) goto probe_failed; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c90080781924..830791ab4619 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -184,7 +184,7 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, netdev_err(netdev, "Device down!\n"); return -ENODEV; } - if (retry--) + if (!retry--) break; if (wait_for_completion_timeout(comp_done, div_timeout)) return 0; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6c51b1bad8c4..37a2314d3e6b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -185,13 +185,12 @@ struct e1000_phy_regs { /* board specific private data structure */ struct e1000_adapter { + struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct timer_list blink_timer; struct work_struct reset_task; - struct delayed_work watchdog_task; - - struct workqueue_struct *e1000_workqueue; + struct work_struct watchdog_task; const struct e1000_info *ei; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fe7997c18a10..7c5b18d87b49 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } /* Reset on uncorrectable ECC error */ @@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) - mod_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, HZ); + mod_timer(&adapter->watchdog_timer, jiffies + 1); } if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -4284,6 +4281,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) napi_synchronize(&adapter->napi); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); spin_lock(&adapter->stats64_lock); @@ -5155,11 +5153,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) } } +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(struct timer_list *t) +{ + struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, - watchdog_task.work); + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@ -5407,9 +5419,8 @@ link_up: /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) - queue_delayed_work(adapter->e1000_workqueue, - &adapter->watchdog_task, - round_jiffies(2 * HZ)); + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); } #define E1000_TX_FLAGS_CSUM 0x00000001 @@ -7449,21 +7460,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, - e1000e_driver_name); - - if (!adapter->e1000_workqueue) { - err = -ENOMEM; - goto err_workqueue; - } - - INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task); - queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task, - 0); - + timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); @@ -7557,9 +7558,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_register: - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); -err_workqueue: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: @@ -7604,17 +7602,15 @@ static void e1000_remove(struct pci_dev *pdev) * from being rescheduled. */ set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->downshift_task); cancel_work_sync(&adapter->update_phy_task); cancel_work_sync(&adapter->print_hang_task); - cancel_delayed_work(&adapter->watchdog_task); - flush_workqueue(adapter->e1000_workqueue); - destroy_workqueue(adapter->e1000_workqueue); - if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index c681d2d28107..68edf55ac906 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -18,7 +18,7 @@ struct fm10k_stats { #define FM10K_STAT_FIELDS(_type, _name, _stat) { \ .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .sizeof_stat = sizeof_field(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cb6367334ca7..4833187bd259 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1152,7 +1152,7 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 9f0a4e92a231..37514a75f928 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -536,6 +536,11 @@ static void i40e_set_hw_flags(struct i40e_hw *hw) (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + + if (aq->api_maj_ver > 1 || + (aq->api_maj_ver == 1 && + aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; /* fall through */ default: break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index d4055037af89..45b90eb11adb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { - hw_dbg(hw, "Buffer to small for PBA data.\n"); + hw_dbg(hw, "Buffer too small for PBA data.\n"); return I40E_ERR_PARAM; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index d24d8731bef0..317f3f1458db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -43,7 +43,7 @@ struct i40e_stats { */ #define I40E_STAT(_type, _name, _stat) { \ .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .sizeof_stat = sizeof_field(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index be24d42280d8..a3da422ab05b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -659,7 +659,7 @@ i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw) #define I40E_HMC_STORE(_struct, _ele) \ offsetof(struct _struct, _ele), \ - FIELD_SIZEOF(struct _struct, _ele) + sizeof_field(struct _struct, _ele) struct i40e_context_ele { u16 offset; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1ccabeafa44c..2c5af6d4a6b1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6823,8 +6823,8 @@ void i40e_down(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); if (i40e_enabled_xdp_vsi(vsi)) { - /* Make sure that in-progress ndo_xdp_xmit - * calls are completed. + /* Make sure that in-progress ndo_xdp_xmit and + * ndo_xsk_wakeup calls are completed. */ synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[i]); @@ -12546,8 +12546,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, old_prog = xchg(&vsi->xdp_prog, prog); - if (need_reset) + if (need_reset) { + if (!prog) + /* Wait until ndo_xsk_wakeup completes. */ + synchronize_rcu(); i40e_reset_and_rebuild(pf, true, true); + } for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6a3f0fc56c3b..69523ac85639 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2322,6 +2322,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, } /** + * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL + * @vqs: virtchnl_queue_select structure containing bitmaps to validate + * + * Returns true if validation was successful, else false. + */ +static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) +{ + if ((!vqs->rx_queues && !vqs->tx_queues) || + vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || + vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) + return false; + + return true; +} + +/** * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2346,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } - if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { + if (i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2408,9 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } - if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) || - vqs->rx_queues > I40E_MAX_VF_QUEUES || - vqs->tx_queues > I40E_MAX_VF_QUEUES) { + if (i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index d07e1a890428..f73cd917c44f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -787,8 +787,12 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; struct i40e_ring *ring; + if (test_bit(__I40E_CONFIG_BUSY, pf->state)) + return -ENETDOWN; + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 29de3ae96ef2..bd1b1ed323f4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index dad3eec8ccd8..84c3d8d97ef6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -42,7 +42,7 @@ struct iavf_stats { */ #define IAVF_STAT(_type, _name, _stat) { \ .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .sizeof_stat = sizeof_field(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 821987da5698..8e16be960e96 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter, * * Returns ptr to the filter object or NULL when no memory available. **/ -static struct -iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, - const u8 *macaddr) +struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, + const u8 *macaddr) { struct iavf_mac_filter *f; @@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work) struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; + struct iavf_mac_filter *f, *ftmp; struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; - struct iavf_mac_filter *f; u32 reg_val; int i = 0, err; bool running; @@ -2181,6 +2180,16 @@ continue_reset: spin_lock_bh(&adapter->mac_vlan_list_lock); + /* Delete filter for the current MAC address, it could have + * been changed by the PF via administratively set MAC. + * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES. + */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) { + list_del(&f->list); + kfree(f); + } + } /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index c46770eba320..1ab9cb339acb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } + spin_lock_bh(&adapter->mac_vlan_list_lock); + iavf_add_filter(adapter, adapter->hw.mac.addr); + spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_process_config(adapter); } break; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index aec3c6c379df..9ebd93e79aeb 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -15,7 +15,7 @@ struct ice_stats { #define ICE_STAT(_type, _name, _stat) { \ .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .sizeof_stat = sizeof_field(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } @@ -36,10 +36,10 @@ static int ice_q_stats_len(struct net_device *netdev) #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) #define ICE_PFC_STATS_LEN ( \ - (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \ - FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \ - FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \ - FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \ + (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \ + sizeof_field(struct ice_pf, stats.priority_xon_rx) + \ + sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \ + sizeof_field(struct ice_pf, stats.priority_xon_tx)) \ / sizeof(u64)) #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \ ICE_VSI_STATS_LEN + ice_q_stats_len(n)) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index ad34f22d44ef..0997d352709b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -302,7 +302,7 @@ struct ice_ctx_ele { #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ .offset = offsetof(struct _struct, _ele), \ - .size_of = FIELD_SIZEOF(struct _struct, _ele), \ + .size_of = sizeof_field(struct _struct, _ele), \ .width = _width, \ .lsb = _lsb, \ } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 8a6ef3514129..438b42ce2cd9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) dev_spec->module_plugged = true; if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { hw->phy.media_type = e1000_media_type_internal_serdes; - } else if (eth_flags->e100_base_fx) { + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { dev_spec->sgmii_active = true; hw->phy.media_type = e1000_media_type_internal_serdes; } else if (eth_flags->e1000_base_t) { @@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; } - /* do not change link mode for 100BaseFX */ - if (dev_spec->eth_flags.e100_base_fx) - break; - /* change current link mode setting */ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; - if (hw->phy.media_type == e1000_media_type_copper) + if (dev_spec->sgmii_active) ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; else ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 3182b059bf55..445fbdce3e25 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -26,7 +26,7 @@ struct igb_stats { #define IGB_STAT(_name, _stat) { \ .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ + .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \ .stat_offset = offsetof(struct igb_adapter, _stat) \ } static const struct igb_stats igb_gstrings_stats[] = { @@ -76,7 +76,7 @@ static const struct igb_stats igb_gstrings_stats[] = { #define IGB_NETDEV_STAT(_net_stat) { \ .stat_string = __stringify(_net_stat), \ - .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ } static const struct igb_stats igb_gstrings_net_stats[] = { @@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev, advertising &= ~ADVERTISED_1000baseKX_Full; } } - if (eth_flags->e100_base_fx) { + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { supported |= SUPPORTED_100baseT_Full; advertising |= ADVERTISED_100baseT_Full; } diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index ac98f1d96892..455c1cdceb6e 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -16,7 +16,7 @@ struct igc_stats { #define IGC_STAT(_name, _stat) { \ .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ .stat_offset = offsetof(struct igc_adapter, _stat) \ } @@ -67,7 +67,7 @@ static const struct igc_stats igc_gstrings_stats[] = { #define IGC_NETDEV_STAT(_net_stat) { \ .stat_string = __stringify(_net_stat), \ - .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ } diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index c8c93ac436d4..c65eb1afc8fb 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -19,10 +19,10 @@ struct ixgb_stats { }; #define IXGB_STAT(m) IXGB_STATS, \ - FIELD_SIZEOF(struct ixgb_adapter, m), \ + sizeof_field(struct ixgb_adapter, m), \ offsetof(struct ixgb_adapter, m) #define IXGB_NETDEV_STAT(m) NETDEV_STATS, \ - FIELD_SIZEOF(struct net_device, m), \ + sizeof_field(struct net_device, m), \ offsetof(struct net_device, m) static struct ixgb_stats ixgb_gstrings_stats[] = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 25c097cd8100..a2b2ad1f60b1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; - u64 action; + u8 queue; spin_lock(&adapter->fdir_perfect_lock); @@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { - action = filter->action; - if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) - action = - (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + if (filter->action == IXGBE_FDIR_DROP_QUEUE) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + u8 vf = ethtool_get_flow_spec_ring_vf(filter->action); + + if (!vf && (ring >= adapter->num_rx_queues)) { + e_err(drv, "FDIR restore failed without VF, ring: %u\n", + ring); + continue; + } else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) { + e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n", + vf, ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } ixgbe_fdir_write_perfect_filter_82599(hw, - &filter->filter, - filter->sw_idx, - (action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[action]->reg_idx); + &filter->filter, filter->sw_idx, queue); } spin_unlock(&adapter->fdir_perfect_lock); @@ -10261,7 +10278,12 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) /* If transitioning XDP modes reconfigure rings */ if (need_reset) { - int err = ixgbe_setup_tc(dev, adapter->hw_tcs); + int err; + + if (!prog) + /* Wait until ndo_xsk_wakeup completes. */ + synchronize_rcu(); + err = ixgbe_setup_tc(dev, adapter->hw_tcs); if (err) { rcu_assign_pointer(adapter->xdp_prog, old_prog); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index d6feaacfbf89..b43be9f14105 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -709,10 +709,14 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) if (qid >= adapter->num_xdp_queues) return -ENXIO; - if (!adapter->xdp_ring[qid]->xsk_umem) + ring = adapter->xdp_ring[qid]; + + if (test_bit(__IXGBE_TX_DISABLED, &ring->state)) + return -ENETDOWN; + + if (!ring->xsk_umem) return -ENXIO; - ring = adapter->xdp_ring[qid]; if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { u64 eics = BIT_ULL(ring->q_vector->v_idx); diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 54459b69c948..f7f309c96fa8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -31,14 +31,14 @@ struct ixgbe_stats { #define IXGBEVF_STAT(_name, _stat) { \ .stat_string = _name, \ .type = IXGBEVF_STATS, \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \ + .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ } #define IXGBEVF_NETDEV_STAT(_net_stat) { \ .stat_string = #_net_stat, \ .type = NETDEV_STATS, \ - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ .stat_offset = offsetof(struct net_device_stats, _net_stat) \ } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 076f2da36f27..64ec0e7c64b4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) struct ixgbe_hw *hw = &adapter->hw; int count = 0; - if ((netdev_uc_count(netdev)) > 10) { - pr_err("Too many unicast filters - No Space\n"); - return -ENOSPC; - } - if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index ae195f8adff5..d3164537b694 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1043,7 +1043,7 @@ static int korina_probe(struct platform_device *pdev) r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); dev->base_addr = r->start; - lp->eth_regs = ioremap_nocache(r->start, resource_size(r)); + lp->eth_regs = ioremap(r->start, resource_size(r)); if (!lp->eth_regs) { printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); rc = -ENXIO; @@ -1051,7 +1051,7 @@ static int korina_probe(struct platform_device *pdev) } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); - lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r)); + lp->rx_dma_regs = ioremap(r->start, resource_size(r)); if (!lp->rx_dma_regs) { printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); rc = -ENXIO; @@ -1059,7 +1059,7 @@ static int korina_probe(struct platform_device *pdev) } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); - lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r)); + lp->tx_dma_regs = ioremap(r->start, resource_size(r)); if (!lp->tx_dma_regs) { printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); rc = -ENXIO; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6e73ffe6f928..41f2f5480741 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -649,7 +649,7 @@ ltq_etop_probe(struct platform_device *pdev) goto err_out; } - ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, + ltq_etop_membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ltq_etop_membase) { dev_err(&pdev->dev, "failed to remap etop engine %d\n", diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index d5b644131cff..65a093216dac 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1432,11 +1432,11 @@ struct mv643xx_eth_stats { }; #define SSTAT(m) \ - { #m, FIELD_SIZEOF(struct net_device_stats, m), \ + { #m, sizeof_field(struct net_device_stats, m), \ offsetof(struct net_device, stats.m), -1 } #define MIBSTAT(m) \ - { #m, FIELD_SIZEOF(struct mib_counters, m), \ + { #m, sizeof_field(struct mib_counters, m), \ -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 71a872d46bc4..67ad8b8b127d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2081,7 +2081,11 @@ static int mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp) { - u32 ret, act = bpf_prog_run_xdp(prog, xdp); + unsigned int len; + u32 ret, act; + + len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: @@ -2094,9 +2098,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, if (err) { ret = MVNETA_XDP_DROPPED; __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); } else { ret = MVNETA_XDP_REDIR; } @@ -2106,9 +2109,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) __page_pool_put_page(rxq->page_pool, - virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + virt_to_head_page(xdp->data), + len, true); break; default: bpf_warn_invalid_xdp_action(act); @@ -2119,8 +2121,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, case XDP_DROP: __page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), - xdp->data_end - xdp->data_hard_start, - true); + len, true); ret = MVNETA_XDP_DROPPED; break; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 62dc2f362a16..14e372cda7f4 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3680,7 +3680,7 @@ static int mvpp2_open(struct net_device *dev) valid = true; } - if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { + if (priv->hw_version == MVPP22 && port->link_irq) { err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, dev->name, port); if (err) { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 095f6c71b4fa..7515d079c600 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3932,7 +3932,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&hw->phy_lock); tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); - hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); goto err_out_free_hw; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 5f56ee83e3b1..535dee35e04e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5022,7 +5022,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->pdev = pdev; sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); - hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); goto err_out_free_hw; diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c index eaf08f7ad128..64ed725aec28 100644 --- a/drivers/net/ethernet/mellanox/mlx4/crdump.c +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -182,7 +182,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev) crdump_enable_crspace_access(dev, cr_space); /* Get the available snapshot ID for the dumps */ - id = devlink_region_shapshot_id_get(devlink); + id = devlink_region_snapshot_id_get(devlink); /* Try to capture dumps */ mlx4_crdump_collect_crspace(dev, cr_space, id); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a1202e53710c..8bf1f08fdee2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -611,7 +611,7 @@ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) } #define MLX4_LINK_MODES_SZ \ - (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) + (sizeof_field(struct mlx4_ptys_reg, eth_proto_cap) * 8) enum ethtool_report { SUPPORTED = 0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2c16add0b642..9c8427698238 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -760,7 +760,7 @@ enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, MLX5E_STATE_XDP_TX_ENABLED, - MLX5E_STATE_XDP_OPEN, + MLX5E_STATE_XDP_ACTIVE, }; struct mlx5e_rqt { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 68d593074f6c..d48292ccda29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -122,6 +122,22 @@ enum { #endif }; +#define MLX5E_TTC_NUM_GROUPS 3 +#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) +#define MLX5E_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ + MLX5E_TTC_GROUP2_SIZE +\ + MLX5E_TTC_GROUP3_SIZE) + +#define MLX5E_INNER_TTC_NUM_GROUPS 3 +#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) +#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ + MLX5E_INNER_TTC_GROUP2_SIZE +\ + MLX5E_INNER_TTC_GROUP3_SIZE) + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_table { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 1d6b58860da6..3a975641f902 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv, struct devlink_health_reporter *reporter, char *err_str, struct mlx5e_err_ctx *err_ctx) { - if (!reporter) { - netdev_err(priv->netdev, err_str); + netdev_err(priv->netdev, err_str); + + if (!reporter) return err_ctx->recover(&err_ctx->ctx); - } + return devlink_health_report(reporter, err_str, err_ctx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 36ac1e3816b9..d7587f40ecae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -75,12 +75,18 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); + + if (priv->channels.params.xdp_prog) + set_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); } static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) { + if (priv->channels.params.xdp_prog) + clear_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); - /* let other device's napi(s) see our new state */ + /* Let other device's napi(s) and XSK wakeups see our new state. */ synchronize_rcu(); } @@ -89,19 +95,9 @@ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); } -static inline void mlx5e_xdp_set_open(struct mlx5e_priv *priv) -{ - set_bit(MLX5E_STATE_XDP_OPEN, &priv->state); -} - -static inline void mlx5e_xdp_set_closed(struct mlx5e_priv *priv) -{ - clear_bit(MLX5E_STATE_XDP_OPEN, &priv->state); -} - -static inline bool mlx5e_xdp_is_open(struct mlx5e_priv *priv) +static inline bool mlx5e_xdp_is_active(struct mlx5e_priv *priv) { - return test_bit(MLX5E_STATE_XDP_OPEN, &priv->state); + return test_bit(MLX5E_STATE_XDP_ACTIVE, &priv->state); } static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 631af8dee517..c28cbae42331 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -144,6 +144,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) { clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); napi_synchronize(&c->napi); + synchronize_rcu(); /* Sync with the XSK wakeup. */ mlx5e_close_rq(&c->xskrq); mlx5e_close_cq(&c->xskrq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 87827477d38c..fe2d596cb361 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -14,7 +14,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) struct mlx5e_channel *c; u16 ix; - if (unlikely(!mlx5e_xdp_is_open(priv))) + if (unlikely(!mlx5e_xdp_is_active(priv))) return -ENETDOWN; if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix))) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 778dab1af8fc..f260dd96873b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct tx_sync_info { u64 rcd_sn; - s32 sync_len; + u32 sync_len; int nr_frags; skb_frag_t frags[MAX_SKB_FRAGS]; }; @@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval { static enum mlx5e_ktls_sync_retval tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, - u32 tcp_seq, struct tx_sync_info *info) + u32 tcp_seq, int datalen, struct tx_sync_info *info) { struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; struct tls_record_info *record; int remaining, i = 0; unsigned long flags; + bool ends_before; spin_lock_irqsave(&tx_ctx->lock, flags); record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); @@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, goto out; } - if (unlikely(tcp_seq < tls_record_start_seq(record))) { - ret = tls_record_is_start_marker(record) ? - MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + /* There are the following cases: + * 1. packet ends before start marker: bypass offload. + * 2. packet starts before start marker and ends after it: drop, + * not supported, breaks contract with kernel. + * 3. packet ends before tls record info starts: drop, + * this packet was already acknowledged and its record info + * was released. + */ + ends_before = before(tcp_seq + datalen, tls_record_start_seq(record)); + + if (unlikely(tls_record_is_start_marker(record))) { + ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + goto out; + } else if (ends_before) { + ret = MLX5E_KTLS_SYNC_FAIL; goto out; } @@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, u8 num_wqebbs; int i = 0; - ret = tx_sync_info_get(priv_tx, seq, &info); + ret = tx_sync_info_get(priv_tx, seq, datalen, &info); if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { stats->tls_skip_no_sync_data++; @@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, goto err_out; } - if (unlikely(info.sync_len < 0)) { - if (likely(datalen <= -info.sync_len)) - return MLX5E_KTLS_SYNC_DONE; - - stats->tls_drop_bypass_req++; - goto err_out; - } - stats->tls_ooo++; tx_post_resync_params(sq, priv_tx, info.rcd_sn); @@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, if (unlikely(contig_wqebbs_room < num_wqebbs)) mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); - tx_post_resync_params(sq, priv_tx, info.rcd_sn); - for (; i < info.nr_frags; i++) { unsigned int orig_fsz, frag_offset = 0, n = 0; skb_frag_t *f = &info.frags[i]; @@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, enum mlx5e_ktls_sync_retval ret = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); - if (likely(ret == MLX5E_KTLS_SYNC_DONE)) + switch (ret) { + case MLX5E_KTLS_SYNC_DONE: *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); - else if (ret == MLX5E_KTLS_SYNC_FAIL) + break; + case MLX5E_KTLS_SYNC_SKIP_NO_DATA: + if (likely(!skb->decrypted)) + goto out; + WARN_ON_ONCE(1); + /* fall-through */ + default: /* MLX5E_KTLS_SYNC_FAIL */ goto err_out; - else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */ - goto out; + } } priv_tx->expected_seq = seq + datalen; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 15b7f0f1427c..73d3dc07331f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -904,22 +904,6 @@ del_rules: return err; } -#define MLX5E_TTC_NUM_GROUPS 3 -#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) -#define MLX5E_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ - MLX5E_TTC_GROUP2_SIZE +\ - MLX5E_TTC_GROUP3_SIZE) - -#define MLX5E_INNER_TTC_NUM_GROUPS 3 -#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ - MLX5E_INNER_TTC_GROUP2_SIZE +\ - MLX5E_INNER_TTC_GROUP3_SIZE) - static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, bool use_ipv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 4980e80a5e85..4997b8a51994 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3000,12 +3000,9 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - bool is_xdp = priv->channels.params.xdp_prog; int err; set_bit(MLX5E_STATE_OPENED, &priv->state); - if (is_xdp) - mlx5e_xdp_set_open(priv); err = mlx5e_open_channels(priv, &priv->channels); if (err) @@ -3020,8 +3017,6 @@ int mlx5e_open_locked(struct net_device *netdev) return 0; err_clear_state_opened_flag: - if (is_xdp) - mlx5e_xdp_set_closed(priv); clear_bit(MLX5E_STATE_OPENED, &priv->state); return err; } @@ -3053,8 +3048,6 @@ int mlx5e_close_locked(struct net_device *netdev) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - if (priv->channels.params.xdp_prog) - mlx5e_xdp_set_closed(priv); clear_bit(MLX5E_STATE_OPENED, &priv->state); netif_carrier_off(priv->netdev); @@ -4371,16 +4364,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return 0; } -static int mlx5e_xdp_update_state(struct mlx5e_priv *priv) -{ - if (priv->channels.params.xdp_prog) - mlx5e_xdp_set_open(priv); - else - mlx5e_xdp_set_closed(priv); - - return 0; -} - static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4415,7 +4398,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) mlx5e_set_rq_type(priv->mdev, &new_channels.params); old_prog = priv->channels.params.xdp_prog; - err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto unlock; } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9b32a9c0f497..7e32b9e3667c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; - ft_attr->max_fte = MLX5E_NUM_TT; + ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; ft_attr->prio = MLX5E_TC_PRIO; } @@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info return kmemdup(tun_info, tun_size, GFP_KERNEL); } +static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < out_index; i++) { + if (flow->encaps[i].e != e) + continue; + NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action"); + netdev_err(priv->netdev, "can't duplicate encap action\n"); + return true; + } + + return false; +} + static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct net_device *mirred_dev, @@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, /* must verify if encap is valid or not */ if (e) { + /* Check that entry was not already attached to this flow */ + if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { + err = -EOPNOTSUPP; + goto out_err; + } + mutex_unlock(&esw->offloads.encap_tbl_lock); wait_for_completion(&e->res_ready); @@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, same_hw_devs(priv, netdev_priv(out_dev)); } +static bool is_duplicated_output_device(struct net_device *dev, + struct net_device *out_dev, + int *ifindexes, int if_count, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < if_count; i++) { + if (ifindexes[i] == out_dev->ifindex) { + NL_SET_ERR_MSG_MOD(extack, + "can't duplicate output to same device"); + netdev_err(dev, "can't duplicate output to same device: %s\n", + out_dev->name); + return true; + } + } + + return false; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow *flow, @@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; const struct ip_tunnel_info *info = NULL; + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; bool ft_flow = mlx5e_is_ft_flow(flow); const struct flow_action_entry *act; + int err, i, if_count = 0; bool encap = false; u32 action = 0; - int err, i; if (!flow_action_has_entries(flow_action)) return -EINVAL; @@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_upper; + if (is_duplicated_output_device(priv->netdev, + out_dev, + ifindexes, + if_count, + extack)) + return -EOPNOTSUPP; + + ifindexes[if_count] = out_dev->ifindex; + if_count++; + rcu_read_lock(); uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); @@ -3980,6 +4036,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, u32 rate_mbps; int err; + vport_num = rpriv->rep->vport; + if (vport_num >= MLX5_VPORT_ECPF) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress rate limit is supported only for Eswitch ports connected to VFs"); + return -EOPNOTSUPP; + } + esw = priv->mdev->priv.eswitch; /* rate is given in bytes/sec. * First convert to bits/sec and then round to the nearest mbit/secs. @@ -3988,8 +4051,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, * 1 mbit/sec. */ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; - vport_num = rpriv->rep->vport; - err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 2c965ad0d744..3df3604e8929 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1928,8 +1928,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) struct mlx5_vport *vport; int i; - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { memset(&vport->info, 0, sizeof(vport->info)); + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + } } /* Public E-Switch API */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 243a5440867e..3e6412783078 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -866,7 +866,7 @@ out: */ #define ESW_SIZE (16 * 1024 * 1024) const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, - 64 * 1024, 4 * 1024 }; + 64 * 1024, 128 }; static int get_sz_from_pool(struct mlx5_eswitch *esw) @@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, return -EINVAL; } - mlx5_eswitch_disable(esw, false); + mlx5_eswitch_disable(esw, true); mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); if (err) { @@ -2220,7 +2220,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type int esw_offloads_enable(struct mlx5_eswitch *esw) { - int err; + struct mlx5_vport *vport; + int err, i; if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) @@ -2237,6 +2238,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; + /* Representor will control the vport link state */ + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; + err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); if (err) goto err_vports; @@ -2266,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err, err1; - mlx5_eswitch_disable(esw, false); + mlx5_eswitch_disable(esw, true); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index c76da309506b..e4ec0e03c289 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -87,10 +87,10 @@ static const struct rhashtable_params rhash_sa = { * value is not constant during the lifetime * of the key object. */ - .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - - FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), + .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - + sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + - FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), + sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), .automatic_shrinking = true, .min_size = 1, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index d60577484567..8c5df6c7d7b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -209,7 +209,7 @@ enum fs_i_lock_class { }; static const struct rhashtable_params rhash_fte = { - .key_len = FIELD_SIZEOF(struct fs_fte, val), + .key_len = sizeof_field(struct fs_fte, val), .key_offset = offsetof(struct fs_fte, val), .head_offset = offsetof(struct fs_fte, hash), .automatic_shrinking = true, @@ -217,7 +217,7 @@ static const struct rhashtable_params rhash_fte = { }; static const struct rhashtable_params rhash_fg = { - .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask), + .key_len = sizeof_field(struct mlx5_flow_group, mask), .key_offset = offsetof(struct mlx5_flow_group, mask), .head_offset = offsetof(struct mlx5_flow_group, hash), .automatic_shrinking = true, @@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node) } } -static void del_sw_fte_rcu(struct rcu_head *head) -{ - struct fs_fte *fte = container_of(head, struct fs_fte, rcu); - struct mlx5_flow_steering *steering = get_steering(&fte->node); - - kmem_cache_free(steering->ftes_cache, fte); -} - static void del_sw_fte(struct fs_node *node) { + struct mlx5_flow_steering *steering = get_steering(node); struct mlx5_flow_group *fg; struct fs_fte *fte; int err; @@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node) rhash_fte); WARN_ON(err); ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); - - call_rcu(&fte->rcu, del_sw_fte_rcu); + kmem_cache_free(steering->ftes_cache, fte); } static void del_hw_flow_group(struct fs_node *node) @@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head) } static struct fs_fte * -lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value) +lookup_fte_locked(struct mlx5_flow_group *g, + const u32 *match_value, + bool take_write) { struct fs_fte *fte_tmp; - nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); - - fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte); - if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { - fte_tmp = NULL; - goto out; - } - - if (!fte_tmp->node.active) { - tree_put_node(&fte_tmp->node, false); - fte_tmp = NULL; - goto out; - } - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); - -out: - up_write_ref_node(&g->node, false); - return fte_tmp; -} - -static struct fs_fte * -lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) -{ - struct fs_fte *fte_tmp; - - if (!tree_get_node(&g->node)) - return NULL; - - rcu_read_lock(); - fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte); + if (take_write) + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); + else + nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); + fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, + rhash_fte); if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { - rcu_read_unlock(); fte_tmp = NULL; goto out; } - rcu_read_unlock(); - if (!fte_tmp->node.active) { tree_put_node(&fte_tmp->node, false); fte_tmp = NULL; @@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) } nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); - out: - tree_put_node(&g->node, false); - return fte_tmp; -} - -static struct fs_fte * -lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write) -{ - if (write) - return lookup_fte_for_write_locked(g, match_value); + if (take_write) + up_write_ref_node(&g->node, false); else - return lookup_fte_for_read_locked(g, match_value); + up_read_ref_node(&g->node); + return fte_tmp; } static struct mlx5_flow_handle * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index e8cd997f413e..c2621b911563 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -203,7 +203,6 @@ struct fs_fte { enum fs_fte_status status; struct mlx5_fc *counter; struct rhash_head hash; - struct rcu_head rcu; int modify_mask; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 173e2c12e1c7..f554cfddcf4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) if (err) goto err_load; + if (boot) { + err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1210,6 +1216,9 @@ out: return err; err_reg_dev: + if (boot) + mlx5_devlink_unregister(priv_to_devlink(dev)); +err_devlink_reg: mlx5_unload(dev); err_load: if (boot) @@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) request_module_nowait(MLX5_IB_MOD); - err = mlx5_devlink_register(devlink, &pdev->dev); - if (err) - goto clean_load; - err = mlx5_crdump_enable(dev); if (err) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); @@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_save_state(pdev); return 0; -clean_load: - mlx5_unload_one(dev, true); - err_load_one: mlx5_pci_close(dev); pci_init_err: @@ -1561,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 32e94d2ee5e4..e4cff7abb348 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, /* We need to copy the refcount since this ste * may have been traversed several times */ - refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount)); + new_ste->refcount = cur_ste->refcount; /* Link old STEs rule_mem list to the new ste */ mlx5dr_rule_update_rule_member(cur_ste, new_ste); @@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, if (!rule_mem) return -ENOMEM; + INIT_LIST_HEAD(&rule_mem->list); + INIT_LIST_HEAD(&rule_mem->use_ste_list); + rule_mem->ste = ste; list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 51803eef13dd..c7f10d4f8f8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2019 Mellanox Technologies. */ +#include <linux/smp.h> #include "dr_types.h" #define QUEUE_SIZE 128 @@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, if (!in) goto err_cqwq; - vector = smp_processor_id() % mlx5_comp_vectors_count(mdev); + vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev); err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn); if (err) { kvfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index a5a266983dd3..c6c7d1defbd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) if (dst->next_htbl) dst->next_htbl->pointing_ste = dst; - refcount_set(&dst->refcount, refcount_read(&src->refcount)); + dst->refcount = src->refcount; INIT_LIST_HEAD(&dst->rule_list); list_splice_tail_init(&src->rule_list, &dst->rule_list); @@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste) bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) { - return !refcount_read(&ste->refcount); + return !ste->refcount; } /* Init one ste as a pattern for ste data array */ @@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, htbl->ste_arr = chunk->ste_arr; htbl->hw_ste_arr = chunk->hw_ste_arr; htbl->miss_list = chunk->miss_list; - refcount_set(&htbl->refcount, 0); + htbl->refcount = 0; for (i = 0; i < chunk->num_of_entries; i++) { struct mlx5dr_ste *ste = &htbl->ste_arr[i]; ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; ste->htbl = htbl; - refcount_set(&ste->refcount, 0); + ste->refcount = 0; INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&htbl->miss_list[i]); INIT_LIST_HEAD(&ste->rule_list); @@ -713,7 +713,7 @@ out_free_htbl: int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) { - if (refcount_read(&htbl->refcount)) + if (htbl->refcount) return -EBUSY; mlx5dr_icm_free_chunk(htbl->chunk); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 290fe61c33d0..3fdf4a5eb031 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx; struct mlx5dr_ste { u8 *hw_ste; /* refcount: indicates the num of rules that using this ste */ - refcount_t refcount; + u32 refcount; /* attached to the miss_list head at each htbl entry */ struct list_head miss_list_node; @@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl { struct mlx5dr_ste_htbl { u8 lu_type; u16 byte_mask; - refcount_t refcount; + u32 refcount; struct mlx5dr_icm_chunk *chunk; struct mlx5dr_ste *ste_arr; u8 *hw_ste_arr; @@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl); static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl) { - if (refcount_dec_and_test(&htbl->refcount)) + htbl->refcount--; + if (!htbl->refcount) mlx5dr_ste_htbl_free(htbl); } static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) { - refcount_inc(&htbl->refcount); + htbl->refcount++; } /* STE utils */ @@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste, struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher) { - if (refcount_dec_and_test(&ste->refcount)) + ste->refcount--; + if (!ste->refcount) mlx5dr_ste_free(ste, matcher, nic_matcher); } /* initial as 0, increased only when ste appears in a new rule */ static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste) { - refcount_inc(&ste->refcount); + ste->refcount++; } void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 3d587d0bdbbe..1e32e2443f73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { enum mlx5_flow_destination_type type = dst->dest_attr.type; - u32 id; if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { err = -ENOSPC; goto free_actions; } - switch (type) { - case MLX5_FLOW_DESTINATION_TYPE_COUNTER: - id = dst->dest_attr.counter_id; + if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; - tmp_action = - mlx5dr_action_create_flow_counter(id); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - break; + switch (type) { case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: tmp_action = create_ft_action(dev, dst); if (!tmp_action) { @@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + list_for_each_entry(dst, &fte->node.children, node.list) { + u32 id; + + if (dst->dest_attr.type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -ENOSPC; + goto free_actions; + } + + id = dst->dest_attr.counter_id; + tmp_action = + mlx5dr_action_create_flow_counter(id); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + } + params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c index 544344ac4894..79057af4fe99 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netlink.h> +#include <linux/vmalloc.h> #include <linux/xz.h> #include "mlxfw_mfa2.h" #include "mlxfw_mfa2_file.h" @@ -548,7 +549,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_size = be32_to_cpu(comp->size); comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len; - comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL); + comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size); if (!comp_data) return ERR_PTR(-ENOMEM); comp_data->comp.data_size = comp_size; @@ -570,7 +571,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file, comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len; return &comp_data->comp; err_out: - kfree(comp_data); + vfree(comp_data); return ERR_PTR(err); } @@ -579,7 +580,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp) const struct mlxfw_mfa2_comp_data *comp_data; comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp); - kfree(comp_data); + vfree(comp_data); } void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5294a1622643..af30e8a76682 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5472,6 +5472,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0, MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1, + MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP, __MLXSW_REG_HTGT_TRAP_GROUP_MAX, MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 556dca328bb5..8ed15199eb4f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -860,23 +860,17 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } - if (eth_skb_pad(skb)) { this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); return NETDEV_TX_OK; @@ -1215,6 +1209,9 @@ static void update_stats_cache(struct work_struct *work) periodic_hw_stats.update_dw.work); if (!netif_carrier_ok(mlxsw_sp_port->dev)) + /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as + * necessary when port goes down. + */ goto out; mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, @@ -4324,6 +4321,15 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; } +static void +mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int i; + + for (i = 0; i < TC_MAX_QUEUE; i++) + mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0; +} + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@ -4345,6 +4351,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, } else { netdev_info(mlxsw_sp_port->dev, "link down\n"); netif_carrier_off(mlxsw_sp_port->dev); + mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port); } } @@ -4542,8 +4549,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -4626,6 +4633,10 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) rate = 19 * 1024; burst_size = 12; break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: + rate = 360; + burst_size = 7; + break; default: continue; } @@ -4665,6 +4676,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: priority = 5; tc = 5; break; @@ -5127,6 +5139,27 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); } +static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; + mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; + mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; + mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); +} + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -5629,7 +5662,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = { static struct mlxsw_driver mlxsw_sp3_driver = { .kind = mlxsw_sp3_driver_name, .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp2_init, + .init = mlxsw_sp3_init, .fini = mlxsw_sp_fini, .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 150b3a144b83..3d3cca596116 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -8,6 +8,7 @@ #include <linux/string.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> +#include <linux/mutex.h> #include <net/net_namespace.h> #include <net/tc_act/tc_vlan.h> @@ -25,6 +26,7 @@ struct mlxsw_sp_acl { struct mlxsw_sp_fid *dummy_fid; struct rhashtable ruleset_ht; struct list_head rules; + struct mutex rules_lock; /* Protects rules list */ struct { struct delayed_work dw; unsigned long interval; /* ms */ @@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, goto err_ruleset_block_bind; } + mutex_lock(&mlxsw_sp->acl->rules_lock); list_add_tail(&rule->list, &mlxsw_sp->acl->rules); + mutex_unlock(&mlxsw_sp->acl->rules_lock); block->rule_count++; block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker; return 0; @@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker; ruleset->ht_key.block->rule_count--; + mutex_lock(&mlxsw_sp->acl->rules_lock); list_del(&rule->list); + mutex_unlock(&mlxsw_sp->acl->rules_lock); if (!ruleset->ht_key.chain_index && mlxsw_sp_acl_ruleset_is_singular(ruleset)) mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, @@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) struct mlxsw_sp_acl_rule *rule; int err; - /* Protect internal structures from changes */ - rtnl_lock(); + mutex_lock(&acl->rules_lock); list_for_each_entry(rule, &acl->rules, list) { err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, rule); if (err) goto err_rule_update; } - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return 0; err_rule_update: - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return err; } @@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->dummy_fid = fid; INIT_LIST_HEAD(&acl->rules); + mutex_init(&acl->rules_lock); err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam); if (err) goto err_acl_ops_init; @@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) return 0; err_acl_ops_init: + mutex_destroy(&acl->rules_lock); mlxsw_sp_fid_put(fid); err_fid_get: rhashtable_destroy(&acl->ruleset_ht); @@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam); + mutex_destroy(&acl->rules_lock); WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 68cc6737d45c..0124bfe1963b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -195,6 +195,20 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } +static u64 +mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->backlog[tclass_num] + + xstats->backlog[tclass_num + 8]; +} + +static u64 +mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num) +{ + return xstats->tail_drop[tclass_num] + + xstats->tail_drop[tclass_num + 8]; +} + static void mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, u8 prio_bitmap, u64 *tx_packets, @@ -269,7 +283,7 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; - red_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; stats_base->drops = red_base->prob_drop + red_base->pdrop; @@ -370,7 +384,8 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; marks = xstats->ecn - xstats_base->prob_mark; - pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - + xstats_base->pdrop; res->pdrop += pdrops; res->prob_drop += early_drops; @@ -403,9 +418,10 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; - drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + drops = xstats->wred_drop[tclass_num] + + mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - stats_base->drops; - backlog = xstats->backlog[tclass_num]; + backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num); _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); stats_ptr->qstats->overlimits += overlimits; @@ -576,9 +592,9 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, tx_packets = stats->tx_packets - stats_base->tx_packets; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - drops += xstats->tail_drop[i]; + drops += mlxsw_sp_xstats_tail_drop(xstats, i); drops += xstats->wred_drop[i]; - backlog += xstats->backlog[i]; + backlog += mlxsw_sp_xstats_backlog(xstats, i); } drops = drops - stats_base->drops; @@ -614,7 +630,7 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base->drops = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i); stats_base->drops += xstats->wred_drop[i]; } @@ -651,6 +667,13 @@ mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) return 0; + if (!p->child_handle) { + /* This is an invisible FIFO replacing the original Qdisc. + * Ignore it--the original Qdisc's destroy will follow. + */ + return 0; + } + /* See if the grafted qdisc is already offloaded on any tclass. If so, * unoffload it. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 30bfe3880faf..8290e82240fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5742,8 +5742,13 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp_fib6_rt_should_ignore(rt)) return; + /* Multipath routes are first added to the FIB trie and only then + * notified. If we vetoed the addition, we will get a delete + * notification for a route we do not have. Therefore, do not warn if + * route was not found. + */ fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); - if (WARN_ON(!fib6_entry)) + if (!fib6_entry) return; /* If not all the nexthops are deleted, then only reduce the nexthop @@ -7074,6 +7079,9 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { rif = mlxsw_sp->router->rifs[i]; + if (rif && rif->ops && + rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB) + continue; if (rif && rif->dev && rif->dev != dev && !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, mlxsw_sp->mac_mask)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index de6cb22f68b1..f0e98ec8f1ee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, u64 len; int err; + if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb)); if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) return NETDEV_TX_BUSY; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_orig = skb; - - skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb) { - this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_orig); - return NETDEV_TX_OK; - } - dev_consume_skb_any(skb_orig); - } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its * bytes as being sent. diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 6af9a7eee114..091254061052 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1937,7 +1937,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev, pci_set_master(pci_dev); addr = pci_resource_start(pci_dev, 1); - dev->base = ioremap_nocache(addr, PAGE_SIZE); + dev->base = ioremap(addr, PAGE_SIZE); dev->tx_descs = pci_alloc_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs); dev->rx_info.descs = pci_alloc_consistent(pci_dev, diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index b339125b2f09..05e760444a92 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); + spin_lock_init(&lp->lock); + for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (skb == NULL) { @@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev) return 0; } +/* Wait for the SONIC to become idle. */ +static void sonic_quiesce(struct net_device *dev, u16 mask) +{ + struct sonic_local * __maybe_unused lp = netdev_priv(dev); + int i; + u16 bits; + + for (i = 0; i < 1000; ++i) { + bits = SONIC_READ(SONIC_CMD) & mask; + if (!bits) + return; + if (irqs_disabled() || in_interrupt()) + udelay(20); + else + usleep_range(100, 200); + } + WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits); +} /* * Close the SONIC device @@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev) /* * stop the SONIC, disable interrupts */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev) * put the Sonic into software-reset mode and * disable all interrupts before releasing DMA buffers */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev) * wake the tx queue * Concurrently with all of this, the SONIC is potentially writing to * the status flags of the TDs. - * Until some mutual exclusion is added, this code will not work with SMP. However, - * MIPS Jazz machines and m68k Macs were all uni-processor machines. */ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) @@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); dma_addr_t laddr; int length; - int entry = lp->next_tx; + int entry; + unsigned long flags; netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); @@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + spin_lock_irqsave(&lp->lock, flags); + + entry = lp->next_tx; + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ @@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); - /* - * Must set tx_skb[entry] only after clearing status, and - * before clearing EOL and before stopping queue - */ wmb(); lp->tx_len[entry] = length; lp->tx_laddr[entry] = laddr; @@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; } @@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) struct net_device *dev = dev_id; struct sonic_local *lp = netdev_priv(dev); int status; + unsigned long flags; + + /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt() + * with sonic_send_packet() so that the two functions can share state. + * Secondly, it makes sonic_interrupt() re-entrant, as that is required + * by macsonic which must use two IRQs with different priority levels. + */ + spin_lock_irqsave(&lp->lock, flags); + + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + if (!status) { + spin_unlock_irqrestore(&lp->lock, flags); - if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) return IRQ_NONE; + } do { + SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */ + if (status & SONIC_INT_PKTRX) { netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); sonic_rx(dev); /* got packet(s) */ - SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ } if (status & SONIC_INT_TXDN) { @@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) int td_status; int freed_some = 0; - /* At this point, cur_tx is the index of a TD that is one of: - * unallocated/freed (status set & tx_skb[entry] clear) - * allocated and sent (status set & tx_skb[entry] set ) - * allocated and not yet sent (status clear & tx_skb[entry] set ) - * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) + /* The state of a Transmit Descriptor may be inferred + * from { tx_skb[entry], td_status } as follows. + * { clear, clear } => the TD has never been used + * { set, clear } => the TD was handed to SONIC + * { set, set } => the TD was handed back + * { clear, set } => the TD is available for re-use */ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); @@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) break; - if (td_status & 0x0001) { + if (td_status & SONIC_TCR_PTX) { lp->stats.tx_packets++; lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); } else { - lp->stats.tx_errors++; - if (td_status & 0x0642) + if (td_status & (SONIC_TCR_EXD | + SONIC_TCR_EXC | SONIC_TCR_BCM)) lp->stats.tx_aborted_errors++; - if (td_status & 0x0180) + if (td_status & + (SONIC_TCR_NCRS | SONIC_TCR_CRLS)) lp->stats.tx_carrier_errors++; - if (td_status & 0x0020) + if (td_status & SONIC_TCR_OWC) lp->stats.tx_window_errors++; - if (td_status & 0x0004) + if (td_status & SONIC_TCR_FU) lp->stats.tx_fifo_errors++; } @@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (freed_some || lp->tx_skb[entry] == NULL) netif_wake_queue(dev); /* The ring is no longer full */ lp->cur_tx = entry; - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ } /* @@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (status & SONIC_INT_RFO) { netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", __func__); - lp->stats.rx_fifo_errors++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ } if (status & SONIC_INT_RDE) { netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ } if (status & SONIC_INT_RBAE) { netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ } /* counter overruns; all counters are 16bit wide */ - if (status & SONIC_INT_FAE) { + if (status & SONIC_INT_FAE) lp->stats.rx_frame_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ - } - if (status & SONIC_INT_CRC) { + if (status & SONIC_INT_CRC) lp->stats.rx_crc_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ - } - if (status & SONIC_INT_MP) { + if (status & SONIC_INT_MP) lp->stats.rx_missed_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ - } /* transmit error */ if (status & SONIC_INT_TXER) { - if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) - netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ + u16 tcr = SONIC_READ(SONIC_TCR); + + netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n", + __func__, tcr); + + if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC | + SONIC_TCR_FU | SONIC_TCR_BCM)) { + /* Aborted transmission. Try again. */ + netif_stop_queue(dev); + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + } } /* bus retry */ @@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) /* ... to help debug DMA problems causing endless interrupts. */ /* Bounce the eth interface to turn on the interrupt again. */ SONIC_WRITE(SONIC_IMR, 0); - SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ } - /* load CAM done */ - if (status & SONIC_INT_LCD) - SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ - } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + } while (status); + + spin_unlock_irqrestore(&lp->lock, flags); + return IRQ_HANDLED; } +/* Return the array index corresponding to a given Receive Buffer pointer. */ +static int index_from_addr(struct sonic_local *lp, dma_addr_t addr, + unsigned int last) +{ + unsigned int i = last; + + do { + i = (i + 1) & SONIC_RRS_MASK; + if (addr == lp->rx_laddr[i]) + return i; + } while (i != last); + + return -ENOENT; +} + +/* Allocate and map a new skb to be used as a receive buffer. */ +static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, + struct sk_buff **new_skb, dma_addr_t *new_addr) +{ + *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); + if (!*new_skb) + return false; + + if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) + skb_reserve(*new_skb, 2); + + *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), + SONIC_RBSIZE, DMA_FROM_DEVICE); + if (!*new_addr) { + dev_kfree_skb(*new_skb); + *new_skb = NULL; + return false; + } + + return true; +} + +/* Place a new receive resource in the Receive Resource Area and update RWP. */ +static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp, + dma_addr_t old_addr, dma_addr_t new_addr) +{ + unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP)); + unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP)); + u32 buf; + + /* The resources in the range [RRP, RWP) belong to the SONIC. This loop + * scans the other resources in the RRA, those in the range [RWP, RRP). + */ + do { + buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) | + sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L); + + if (buf == old_addr) + break; + + entry = (entry + 1) & SONIC_RRS_MASK; + } while (entry != end); + + WARN_ONCE(buf != old_addr, "failed to find resource!\n"); + + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16); + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff); + + entry = (entry + 1) & SONIC_RRS_MASK; + + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry)); +} + /* * We have a good packet(s), pass it/them up the network stack. */ static void sonic_rx(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); - int status; int entry = lp->cur_rx; + int prev_entry = lp->eol_rx; + bool rbe = false; while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { - struct sk_buff *used_skb; - struct sk_buff *new_skb; - dma_addr_t new_laddr; - u16 bufadr_l; - u16 bufadr_h; - int pkt_len; - - status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); - if (status & SONIC_RCR_PRX) { - /* Malloc up new buffer. */ - new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); - if (new_skb == NULL) { - lp->stats.rx_dropped++; + u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); + + /* If the RD has LPKT set, the chip has finished with the RB */ + if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) { + struct sk_buff *new_skb; + dma_addr_t new_laddr; + u32 addr = (sonic_rda_get(dev, entry, + SONIC_RD_PKTPTR_H) << 16) | + sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L); + int i = index_from_addr(lp, addr, entry); + + if (i < 0) { + WARN_ONCE(1, "failed to find buffer!\n"); break; } - /* provide 16 byte IP header alignment unless DMA requires otherwise */ - if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) - skb_reserve(new_skb, 2); - - new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), - SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!new_laddr) { - dev_kfree_skb(new_skb); - printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); + + if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) { + struct sk_buff *used_skb = lp->rx_skb[i]; + int pkt_len; + + /* Pass the used buffer up the stack */ + dma_unmap_single(lp->device, addr, SONIC_RBSIZE, + DMA_FROM_DEVICE); + + pkt_len = sonic_rda_get(dev, entry, + SONIC_RD_PKTLEN); + skb_trim(used_skb, pkt_len); + used_skb->protocol = eth_type_trans(used_skb, + dev); + netif_rx(used_skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pkt_len; + + lp->rx_skb[i] = new_skb; + lp->rx_laddr[i] = new_laddr; + } else { + /* Failed to obtain a new buffer so re-use it */ + new_laddr = addr; lp->stats.rx_dropped++; - break; } - - /* now we have a new skb to replace it, pass the used one up the stack */ - dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); - used_skb = lp->rx_skb[entry]; - pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); - skb_trim(used_skb, pkt_len); - used_skb->protocol = eth_type_trans(used_skb, dev); - netif_rx(used_skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; - - /* and insert the new skb */ - lp->rx_laddr[entry] = new_laddr; - lp->rx_skb[entry] = new_skb; - - bufadr_l = (unsigned long)new_laddr & 0xffff; - bufadr_h = (unsigned long)new_laddr >> 16; - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); - } else { - /* This should only happen, if we enable accepting broken packets. */ - lp->stats.rx_errors++; - if (status & SONIC_RCR_FAER) - lp->stats.rx_frame_errors++; - if (status & SONIC_RCR_CRCR) - lp->stats.rx_crc_errors++; - } - if (status & SONIC_RCR_LPKT) { - /* - * this was the last packet out of the current receive buffer - * give the buffer back to the SONIC + /* If RBE is already asserted when RWP advances then + * it's safe to clear RBE after processing this packet. */ - lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); - if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); - if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { - netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ - } - } else - printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", - dev->name); + rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; + sonic_update_rra(dev, lp, addr, new_laddr); + } /* * give back the descriptor */ - sonic_rda_put(dev, entry, SONIC_RD_LINK, - sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); + sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0); sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); - sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, - sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); - lp->eol_rx = entry; - lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; + + prev_entry = entry; + entry = (entry + 1) & SONIC_RDS_MASK; + } + + lp->cur_rx = entry; + + if (prev_entry != lp->eol_rx) { + /* Advance the EOL flag to put descriptors back into service */ + sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL | + sonic_rda_get(dev, prev_entry, SONIC_RD_LINK)); + sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL & + sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK)); + lp->eol_rx = prev_entry; } + + if (rbe) + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* * If any worth-while packets have been received, netif_rx() * has done a mark_bh(NET_BH) for us and will work on them @@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { + unsigned long flags; + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ @@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev) i++; } SONIC_WRITE(SONIC_CDC, 16); - /* issue Load CAM command */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); + + /* LCAM and TXP commands can't be used simultaneously */ + spin_lock_irqsave(&lp->lock, flags); + sonic_quiesce(dev, SONIC_CR_TXP); SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); + sonic_quiesce(dev, SONIC_CR_LCAM); + spin_unlock_irqrestore(&lp->lock, flags); } } @@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev) */ static int sonic_init(struct net_device *dev) { - unsigned int cmd; struct sonic_local *lp = netdev_priv(dev); int i; @@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev) SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + /* While in reset mode, clear CAM Enable register */ + SONIC_WRITE(SONIC_CE, 0); + /* * clear software reset flag, disable receiver, clear and * enable interrupts, then completely initialize the SONIC */ SONIC_WRITE(SONIC_CMD, 0); - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP); + sonic_quiesce(dev, SONIC_CR_ALL); /* * initialize the receive resource area @@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev) } /* initialize all RRA registers */ - lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - - SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_REA, lp->rra_end); - SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); + SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS)); + SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1)); SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); @@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) - break; - } - - netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), i); + sonic_quiesce(dev, SONIC_CR_RRRA); /* * Initialize the receive descriptors so that they @@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev) * load the CAM */ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); - - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) - break; - } - netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); + sonic_quiesce(dev, SONIC_CR_LCAM); /* * enable receiver, disable loopback * and enable all interrupts */ - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); - - cmd = SONIC_READ(SONIC_CMD); - if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) - printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN); netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, SONIC_READ(SONIC_CMD)); diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h index 2b27f7049acb..1df6d2f06cc4 100644 --- a/drivers/net/ethernet/natsemi/sonic.h +++ b/drivers/net/ethernet/natsemi/sonic.h @@ -110,6 +110,9 @@ #define SONIC_CR_TXP 0x0002 #define SONIC_CR_HTX 0x0001 +#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \ + SONIC_CR_RXEN | SONIC_CR_TXP) + /* * SONIC data configuration bits */ @@ -175,6 +178,7 @@ #define SONIC_TCR_NCRS 0x0100 #define SONIC_TCR_CRLS 0x0080 #define SONIC_TCR_EXC 0x0040 +#define SONIC_TCR_OWC 0x0020 #define SONIC_TCR_PMB 0x0008 #define SONIC_TCR_FU 0x0004 #define SONIC_TCR_BCM 0x0002 @@ -274,8 +278,9 @@ #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ #define SONIC_NUM_TDS 16 /* number of transmit descriptors */ -#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) -#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) +#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1) +#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1) +#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1) #define SONIC_RBSIZE 1520 /* size of one resource buffer */ @@ -312,8 +317,6 @@ struct sonic_local { u32 rda_laddr; /* logical DMA address of RDA */ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */ - unsigned int rra_end; - unsigned int cur_rwp; unsigned int cur_rx; unsigned int cur_tx; /* first unacked transmit packet */ unsigned int eol_rx; @@ -322,6 +325,7 @@ struct sonic_local { int msg_enable; struct device *device; /* generic device */ struct net_device_stats stats; + spinlock_t lock; }; #define TX_TIMEOUT (3 * HZ) @@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev); as far as we can tell. */ /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put() is a much better name. */ -static inline void sonic_buf_put(void* base, int bitmode, +static inline void sonic_buf_put(u16 *base, int bitmode, int offset, __u16 val) { if (bitmode) #ifdef __BIG_ENDIAN - ((__u16 *) base + (offset*2))[1] = val; + __raw_writew(val, base + (offset * 2) + 1); #else - ((__u16 *) base + (offset*2))[0] = val; + __raw_writew(val, base + (offset * 2) + 0); #endif else - ((__u16 *) base)[offset] = val; + __raw_writew(val, base + (offset * 1) + 0); } -static inline __u16 sonic_buf_get(void* base, int bitmode, +static inline __u16 sonic_buf_get(u16 *base, int bitmode, int offset) { if (bitmode) #ifdef __BIG_ENDIAN - return ((volatile __u16 *) base + (offset*2))[1]; + return __raw_readw(base + (offset * 2) + 1); #else - return ((volatile __u16 *) base + (offset*2))[0]; + return __raw_readw(base + (offset * 2) + 0); #endif else - return ((volatile __u16 *) base)[offset]; + return __raw_readw(base + (offset * 1) + 0); } /* Inlines that you should actually use for reading/writing DMA buffers */ @@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry, (entry * SIZEOF_SONIC_RR) + offset); } +static inline u16 sonic_rr_addr(struct net_device *dev, int entry) +{ + struct sonic_local *lp = netdev_priv(dev); + + return lp->rra_laddr + + entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); +} + +static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr) +{ + struct sonic_local *lp = netdev_priv(dev); + + return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR * + SONIC_BUS_SCALE(lp->dma_bitmode)); +} + static const char version[] = "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index c80bb83c8ac9..0a721f6e8676 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -2652,17 +2652,17 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, switch (meta->insn.off) { case offsetof(struct __sk_buff, len): - if (size != FIELD_SIZEOF(struct __sk_buff, len)) + if (size != sizeof_field(struct __sk_buff, len)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); break; case offsetof(struct __sk_buff, data): - if (size != FIELD_SIZEOF(struct __sk_buff, data)) + if (size != sizeof_field(struct __sk_buff, data)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); break; case offsetof(struct __sk_buff, data_end): - if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) + if (size != sizeof_field(struct __sk_buff, data_end)) return -EOPNOTSUPP; emit_alu(nfp_prog, dst, plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); @@ -2683,12 +2683,12 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, switch (meta->insn.off) { case offsetof(struct xdp_md, data): - if (size != FIELD_SIZEOF(struct xdp_md, data)) + if (size != sizeof_field(struct xdp_md, data)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); break; case offsetof(struct xdp_md, data_end): - if (size != FIELD_SIZEOF(struct xdp_md, data_end)) + if (size != sizeof_field(struct xdp_md, data_end)) return -EOPNOTSUPP; emit_alu(nfp_prog, dst, plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 8f732771d3fa..11c83a99b014 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -15,7 +15,7 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = { .nelem_hint = 4, - .key_len = FIELD_SIZEOF(struct bpf_map, id), + .key_len = sizeof_field(struct bpf_map, id), .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id), .head_offset = offsetof(struct nfp_bpf_neutral_map, l), .automatic_shrinking = true, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 95a0d3910e31..ac02369174a9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -374,7 +374,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) } use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) * - FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]); + sizeof_field(struct nfp_bpf_map, use_map[0]); nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER); if (!nfp_map) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 31d94592a7c0..e0c985fcaec1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -24,7 +24,7 @@ struct nfp_app; #define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22) #define NFP_FL_STAT_ID_STAT GENMASK(21, 0) -#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ +#define NFP_FL_STATS_ELEM_RS sizeof_field(struct nfp_fl_stats_id, \ init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 #define NFP_FLOWER_MASK_ELEMENT_RS 1 diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 7c4a15e967df..5defd31d481c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { - if (priv->active_mem_unit == priv->total_mem_units) { - priv->stats_ids.init_unalloc--; - priv->active_mem_unit = 0; - } - *stats_context_id = FIELD_PREP(NFP_FL_STAT_ID_STAT, priv->stats_ids.init_unalloc - 1) | FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, priv->active_mem_unit); - priv->active_mem_unit++; + + if (++priv->active_mem_unit == priv->total_mem_units) { + priv->stats_ids.init_unalloc--; + priv->active_mem_unit = 0; + } + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index e4977cdf7678..c0e2f4394aef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -106,7 +106,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code * the identical for PF and VF drivers. */ - ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR), + ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR), NFP_NET_CFG_BAR_SZ); if (!ctrl_bar) { dev_err(&pdev->dev, @@ -200,7 +200,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; - vf->q_bar = ioremap_nocache(map_addr, bar_sz); + vf->q_bar = ioremap(map_addr, bar_sz); if (!vf->q_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; @@ -216,7 +216,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, /* TX queues */ map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off; - nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz); + nn->tx_bar = ioremap(map_addr, tx_bar_sz); if (!nn->tx_bar) { nn_err(nn, "Failed to map resource %d\n", tx_bar_no); err = -EIO; @@ -225,7 +225,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, /* RX queues */ map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off; - nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz); + nn->rx_bar = ioremap(map_addr, rx_bar_sz); if (!nn->rx_bar) { nn_err(nn, "Failed to map resource %d\n", rx_bar_no); err = -EIO; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 85d46f206b3c..b454db283aef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -611,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */ bar = &nfp->bar[0]; if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE) - bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { int pf; @@ -677,7 +677,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) } bar = &nfp->bar[4 + i]; - bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), + bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { msg += snprintf(msg, end - msg, @@ -858,7 +858,7 @@ static int nfp6000_area_acquire(struct nfp_cpp_area *area) priv->iomem = priv->bar->iomem + priv->bar_offset; else /* Must have been too big. Sub-allocate. */ - priv->iomem = ioremap_nocache(priv->phys, priv->size); + priv->iomem = ioremap(priv->phys, priv->size); if (IS_ERR_OR_NULL(priv->iomem)) { dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n", diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 1a3008e33182..b36aa5bf3c5f 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -20,7 +20,7 @@ struct pch_gbe_stats { #define PCH_GBE_STAT(m) \ { \ .string = #m, \ - .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \ + .size = sizeof_field(struct pch_gbe_hw_stats, m), \ .offset = offsetof(struct pch_gbe_hw_stats, m), \ } diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index c303a92d5b06..e8a1b27db84d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -464,7 +464,7 @@ struct qede_fastpath { struct qede_tx_queue *txq; struct qede_tx_queue *xdp_tx; -#define VEC_NAME_SIZE (FIELD_SIZEOF(struct net_device, name) + 8) +#define VEC_NAME_SIZE (sizeof_field(struct net_device, name) + 8) char name[VEC_NAME_SIZE]; }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index d6cfe4ffbaf3..d1ce4531d01a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev, netif_addr_lock_bh(ndev); mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { + if (mc_count <= 64) { netdev_for_each_mc_addr(ha, ndev) { ether_addr_copy(temp, ha->addr); temp += ETH_ALEN; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 481b096e984d..34fa3917eb33 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1406,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->rx_buf_seg_size = roundup_pow_of_two(size); } else { rxq->rx_buf_seg_size = PAGE_SIZE; + edev->ndev->features &= ~NETIF_F_GRO_HW; } /* Allocate the parallel driver ring for Rx buffers */ @@ -1450,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); if (!edev->gro_disable) qede_set_tpa_param(rxq); err: @@ -1702,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } - - edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); } static int qede_set_real_num_queues(struct qede_dev *edev) diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b4b8ba00ee01..986f26578d34 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) int err; for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { @@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) ql_free_large_buffers(qdev); return -ENOMEM; } else { - - lrg_buf_cb = &qdev->lrg_buf[i]; - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; - lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer @@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) return -ENOMEM; } + lrg_buf_cb->skb = skb; dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index a496390b8632..07f9067affc6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, break; } entry += p_hdr->size; + cond_resched(); } p_dev->ahw->reset.seq_index = index; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index a4cd6f2cfb86..75d83c3cbf27 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -20,7 +20,7 @@ struct qlcnic_stats { int stat_offset; }; -#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) +#define QLC_SIZEOF(m) sizeof_field(struct qlcnic_adapter, m) #define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) static const u32 qlcnic_fw_dump_level[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index afa10a163da1..f34ae8c75bc5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, addr += 16; reg_read -= 16; ret += 16; + cond_resched(); } out: mutex_unlock(&adapter->ahw->mem_lock); @@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; + cond_resched(); } fw_dump->clr = 1; diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c index 355cc810e322..cbc6b846ded5 100644 --- a/drivers/net/ethernet/realtek/r8169_firmware.c +++ b/drivers/net/ethernet/realtek/r8169_firmware.c @@ -37,7 +37,7 @@ struct fw_info { u8 chksum; } __packed; -#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0]) +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) { diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e19b49c4013e..3591285250e1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2204,24 +2204,28 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) if (cd->tsu) { add_tsu_reg(ARSTR); add_tsu_reg(TSU_CTRST); - add_tsu_reg(TSU_FWEN0); - add_tsu_reg(TSU_FWEN1); - add_tsu_reg(TSU_FCM); - add_tsu_reg(TSU_BSYSL0); - add_tsu_reg(TSU_BSYSL1); - add_tsu_reg(TSU_PRISL0); - add_tsu_reg(TSU_PRISL1); - add_tsu_reg(TSU_FWSL0); - add_tsu_reg(TSU_FWSL1); + if (cd->dual_port) { + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + } add_tsu_reg(TSU_FWSLC); - add_tsu_reg(TSU_QTAGM0); - add_tsu_reg(TSU_QTAGM1); - add_tsu_reg(TSU_FWSR); - add_tsu_reg(TSU_FWINMK); - add_tsu_reg(TSU_ADQT0); - add_tsu_reg(TSU_ADQT1); - add_tsu_reg(TSU_VTAG0); - add_tsu_reg(TSU_VTAG1); + if (cd->dual_port) { + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + } add_tsu_reg(TSU_ADSBSY); add_tsu_reg(TSU_TEN); add_tsu_reg(TSU_POST1); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 0775b9464b4e..466483c4ac67 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -30,7 +30,7 @@ struct sxgbe_stats { #define SXGBE_STAT(m) \ { \ #m, \ - FIELD_SIZEOF(struct sxgbe_extra_stats, m), \ + sizeof_field(struct sxgbe_extra_stats, m), \ offsetof(struct sxgbe_priv_data, xstats.m) \ } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index c56fcbb37066..52ed111d98f4 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2296,7 +2296,7 @@ __setup("sxgbeeth=", sxgbe_cmdline_opt); -MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver"); +MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver"); MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value"); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 4d9bbccc6f89..a6ae2cdc1986 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1401,7 +1401,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) } /* Shrink the original UC mapping of the memory BAR */ - membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); + membase = ioremap(efx->membase_phys, uc_mem_map_size); if (!membase) { netif_err(efx, probe, efx->net_dev, "could not shrink memory BAR to %x\n", diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 992c773620ec..6891df471538 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1338,7 +1338,7 @@ static int efx_init_io(struct efx_nic *efx) rc = -EIO; goto fail3; } - efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); + efx->membase = ioremap(efx->membase_phys, mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", @@ -1472,6 +1472,12 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, n_xdp_tx = num_possible_cpus(); n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES); + vec_count = pci_msix_vec_count(efx->pci_dev); + if (vec_count < 0) + return vec_count; + + max_channels = min_t(unsigned int, vec_count, max_channels); + /* Check resources. * We need a channel per event queue, plus a VI per tx queue. * This may be more pessimistic than it needs to be. @@ -1493,11 +1499,6 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, n_xdp_tx, n_xdp_ev); } - n_channels = min(n_channels, max_channels); - - vec_count = pci_msix_vec_count(efx->pci_dev); - if (vec_count < 0) - return vec_count; if (vec_count < n_channels) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", @@ -1507,11 +1508,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, n_channels = vec_count; } - efx->n_channels = n_channels; + n_channels = min(n_channels, max_channels); - /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */ - if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx)) - n_channels--; + efx->n_channels = n_channels; /* Ignore XDP tx channels when creating rx channels. */ n_channels -= efx->n_xdp_channels; @@ -1531,11 +1530,10 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, efx->n_rx_channels = n_channels; } - if (efx->n_xdp_channels) - efx->xdp_channel_offset = efx->tx_channel_offset + - efx->n_tx_channels; - else - efx->xdp_channel_offset = efx->n_channels; + efx->n_rx_channels = min(efx->n_rx_channels, parallelism); + efx->n_tx_channels = min(efx->n_tx_channels, parallelism); + + efx->xdp_channel_offset = n_channels; netif_dbg(efx, drv, efx->net_dev, "Allocating %u RX channels\n", @@ -1550,6 +1548,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, static int efx_probe_interrupts(struct efx_nic *efx) { unsigned int extra_channels = 0; + unsigned int rss_spread; unsigned int i, j; int rc; @@ -1631,8 +1630,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { if (!efx->extra_channel_type[i]) continue; - if (efx->interrupt_mode != EFX_INT_MODE_MSIX || - efx->n_channels <= extra_channels) { + if (j <= efx->tx_channel_offset + efx->n_tx_channels) { efx->extra_channel_type[i]->handle_no_channel(efx); } else { --j; @@ -1643,16 +1641,17 @@ static int efx_probe_interrupts(struct efx_nic *efx) } } + rss_spread = efx->n_rx_channels; /* RSS might be usable on VFs even if it is disabled on the PF */ #ifdef CONFIG_SFC_SRIOV if (efx->type->sriov_wanted) { - efx->rss_spread = ((efx->n_rx_channels > 1 || + efx->rss_spread = ((rss_spread > 1 || !efx->type->sriov_wanted(efx)) ? - efx->n_rx_channels : efx_vf_size(efx)); + rss_spread : efx_vf_size(efx)); return 0; } #endif - efx->rss_spread = efx->n_rx_channels; + efx->rss_spread = rss_spread; return 0; } diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index eecc348b1c32..53ae9faeb4c3 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -1265,7 +1265,7 @@ static int ef4_init_io(struct ef4_nic *efx) rc = -EIO; goto fail3; } - efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); + efx->membase = ioremap(efx->membase_phys, mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 1f88212be085..dfd5182d9e47 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1533,9 +1533,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel) static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) { - return efx_channel_is_xdp_tx(channel) || - (channel->type && channel->type->want_txqs && - channel->type->want_txqs(channel)); + return true; } static inline struct efx_tx_queue * diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index ef52b24ad9e7..c29bf862a94c 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -96,11 +96,12 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + + XDP_PACKET_HEADROOM, EFX_RX_BUF_ALIGNMENT); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / - (efx->rx_page_buf_step + XDP_PACKET_HEADROOM)); + efx->rx_page_buf_step); efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / efx->rx_bufs_per_page; efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, @@ -190,14 +191,13 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) page_offset = sizeof(struct efx_rx_page_state); do { - page_offset += XDP_PACKET_HEADROOM; - dma_addr += XDP_PACKET_HEADROOM; - index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + efx->rx_ip_align; + rx_buf->dma_addr = dma_addr + efx->rx_ip_align + + XDP_PACKET_HEADROOM; rx_buf->page = page; - rx_buf->page_offset = page_offset + efx->rx_ip_align; + rx_buf->page_offset = page_offset + efx->rx_ip_align + + XDP_PACKET_HEADROOM; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 38068fc34141..6d90a097ce4e 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2454,7 +2454,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) pdata = netdev_priv(dev); dev->irq = irq; - pdata->ioaddr = ioremap_nocache(res->start, res_size); + pdata->ioaddr = ioremap(res->start, res_size); if (!pdata->ioaddr) { retval = -ENOMEM; goto out_ioremap_fail; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7e927ad67fa..b7032422393f 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -424,16 +424,22 @@ static void ave_ethtool_get_wol(struct net_device *ndev, phy_ethtool_get_wol(ndev->phydev, wol); } -static int ave_ethtool_set_wol(struct net_device *ndev, - struct ethtool_wolinfo *wol) +static int __ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) { - int ret; - if (!ndev->phydev || (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) return -EOPNOTSUPP; - ret = phy_ethtool_set_wol(ndev->phydev, wol); + return phy_ethtool_set_wol(ndev->phydev, wol); +} + +static int ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + int ret; + + ret = __ave_ethtool_set_wol(ndev, wol); if (!ret) device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); @@ -1216,7 +1222,7 @@ static int ave_init(struct net_device *ndev) /* set wol initial state disabled */ wol.wolopts = 0; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (!phy_interface_is_rgmii(phydev)) phy_set_max_speed(phydev, SPEED_100); @@ -1768,7 +1774,7 @@ static int ave_resume(struct device *dev) ave_ethtool_get_wol(ndev, &wol); wol.wolopts = priv->wolopts; - ave_ethtool_set_wol(ndev, &wol); + __ave_ethtool_set_wol(ndev, &wol); if (ndev->phydev) { ret = phy_resume(ndev->phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index b210e987a1db..94f94686cf7d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -365,9 +365,8 @@ struct dma_features { unsigned int arpoffsel; }; -/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ -#define BUF_SIZE_16KiB 16384 -/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ +/* RX Buffer size must be multiple of 4/8/16 bytes */ +#define BUF_SIZE_16KiB 16368 #define BUF_SIZE_8KiB 8188 #define BUF_SIZE_4KiB 4096 #define BUF_SIZE_2KiB 2048 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index bd6c01004913..0e2fa14f1423 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -112,6 +112,14 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) struct device *dev = dwmac->dev; const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS]; struct meson8b_dwmac_clk_configs *clk_configs; + static const struct clk_div_table div_table[] = { + { .div = 2, .val = 2, }, + { .div = 3, .val = 3, }, + { .div = 4, .val = 4, }, + { .div = 5, .val = 5, }, + { .div = 6, .val = 6, }, + { .div = 7, .val = 7, }, + }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); if (!clk_configs) @@ -146,9 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0; clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; - clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED | - CLK_DIVIDER_ALLOW_ZERO | - CLK_DIVIDER_ROUND_CLOSEST; + clk_configs->m250_div.table = div_table; + clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO | + CLK_DIVIDER_ROUND_CLOSEST; clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1, &clk_divider_ops, &clk_configs->m250_div.hw); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 1c8d84ed8410..01b484cb177e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -957,6 +957,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) /* default */ break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII; break; case PHY_INTERFACE_MODE_RMII: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 26353ef616b8..7d40760e9ba8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -44,7 +44,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) * rate, which then uses the auto-reparenting feature of the * clock driver, and enabling/disabling the clock. */ - if (gmac->interface == PHY_INTERFACE_MODE_RGMII) { + if (phy_interface_mode_is_rgmii(gmac->interface)) { clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); clk_prepare_enable(gmac->tx_clk); gmac->clk_enabled = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 3b6e559aa0b9..ef8a07c68ca7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -343,6 +343,8 @@ #define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x))) #define XGMAC_RxPBL GENMASK(21, 16) #define XGMAC_RxPBL_SHIFT 16 +#define XGMAC_RBSZ GENMASK(14, 1) +#define XGMAC_RBSZ_SHIFT 1 #define XGMAC_RXST BIT(0) #define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x))) #define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x))) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 22a7f0cc1b90..f3f08ccc379b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -482,7 +482,8 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) u32 value; value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); - value |= bfsize << 1; + value &= ~XGMAC_RBSZ; + value |= bfsize << XGMAC_RBSZ_SHIFT; writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 1a768837ca72..b29603ec744c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -34,7 +34,7 @@ struct stmmac_stats { }; #define STMMAC_STAT(m) \ - { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ + { #m, sizeof_field(struct stmmac_extra_stats, m), \ offsetof(struct stmmac_priv, xstats.m)} static const struct stmmac_stats stmmac_gstrings_stats[] = { @@ -163,7 +163,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { /* HW MAC Management counters (if supported) */ #define STMMAC_MMC_STAT(m) \ - { #m, FIELD_SIZEOF(struct stmmac_counters, m), \ + { #m, sizeof_field(struct stmmac_counters, m), \ offsetof(struct stmmac_priv, mmc.m)} static const struct stmmac_stats stmmac_mmc[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index bbc65bd332a8..80d59b775907 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -46,7 +46,7 @@ #include "dwxgmac2.h" #include "hwif.h" -#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) +#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ @@ -106,6 +106,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_DEBUG_FS +static const struct net_device_ops stmmac_netdev_ops; static void stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(struct net_device *dev); #endif @@ -1109,7 +1110,9 @@ static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; - if (mtu >= BUF_SIZE_4KiB) + if (mtu >= BUF_SIZE_8KiB) + ret = BUF_SIZE_16KiB; + else if (mtu >= BUF_SIZE_4KiB) ret = BUF_SIZE_8KiB; else if (mtu >= BUF_SIZE_2KiB) ret = BUF_SIZE_4KiB; @@ -1293,19 +1296,9 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; int ret = -ENOMEM; - int bfsize = 0; int queue; int i; - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); - if (bfsize < 0) - bfsize = 0; - - if (bfsize < BUF_SIZE_16KiB) - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); - - priv->dma_buf_sz = bfsize; - /* RX INITIALIZATION */ netif_dbg(priv, probe, priv->dev, "SKB addresses:\nskb\t\tskb data\tdma data\n"); @@ -1347,8 +1340,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) } } - buf_sz = bfsize; - return 0; err_init_rx_buffers: @@ -2658,6 +2649,7 @@ static void stmmac_hw_teardown(struct net_device *dev) static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + int bfsize = 0; u32 chan; int ret; @@ -2677,7 +2669,16 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); + if (bfsize < 0) + bfsize = 0; + + if (bfsize < BUF_SIZE_16KiB) + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); + + priv->dma_buf_sz = bfsize; + buf_sz = bfsize; + priv->rx_copybreak = STMMAC_RX_COPYBREAK; ret = alloc_dma_desc_resources(priv); @@ -3053,8 +3054,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - } else { - stmmac_tx_timer_arm(priv, queue); } /* We've used all descriptors we need for this skb, however, @@ -3125,6 +3124,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3276,8 +3276,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - } else { - stmmac_tx_timer_arm(priv, queue); } /* We've used all descriptors we need for this skb, however, @@ -3366,6 +3364,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); + stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; @@ -3646,8 +3645,9 @@ read_again: * feature is always disabled and packets need to be * stripped manually. */ - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || - unlikely(status != llc_snap)) { + if (likely(!(status & rx_not_ls)) && + (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap))) { if (buf2_len) buf2_len -= ETH_FCS_LEN; else @@ -3829,12 +3829,24 @@ static void stmmac_set_rx_mode(struct net_device *dev) static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); + int txfifosz = priv->plat->tx_fifo_size; + + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + txfifosz /= priv->plat->tx_queues_to_use; if (netif_running(dev)) { netdev_err(priv->dev, "must be stopped to change its MTU\n"); return -EBUSY; } + new_mtu = STMMAC_ALIGN(new_mtu); + + /* If condition true, FIFO is too small or MTU too large */ + if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) + return -EINVAL; + dev->mtu = new_mtu; netdev_update_features(dev); @@ -4245,6 +4257,34 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); +/* Use network device events to rename debugfs file entries. + */ +static int stmmac_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct stmmac_priv *priv = netdev_priv(dev); + + if (dev->netdev_ops != &stmmac_netdev_ops) + goto done; + + switch (event) { + case NETDEV_CHANGENAME: + if (priv->dbgfs_dir) + priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, + priv->dbgfs_dir, + stmmac_fs_dir, + dev->name); + break; + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block stmmac_notifier = { + .notifier_call = stmmac_device_event, +}; + static void stmmac_init_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -4259,12 +4299,15 @@ static void stmmac_init_fs(struct net_device *dev) /* Entry to report the DMA HW features */ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, &stmmac_dma_cap_fops); + + register_netdevice_notifier(&stmmac_notifier); } static void stmmac_exit_fs(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + unregister_netdevice_notifier(&stmmac_notifier); debugfs_remove_recursive(priv->dbgfs_dir); } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index bedaff0c13bd..d10ac54bf385 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -320,7 +320,7 @@ out: static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, struct device_node *np, struct device *dev) { - bool mdio = true; + bool mdio = !of_phy_is_fixed_link(np); static const struct of_device_id need_mdio_ids[] = { { .compatible = "snps,dwc-qos-ethernet-4.10" }, {}, @@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) *mac = NULL; } - rc = of_get_phy_mode(np, &plat->phy_interface); - if (rc) - return ERR_PTR(rc); + plat->phy_interface = device_get_phy_mode(&pdev->dev); + if (plat->phy_interface < 0) + return ERR_PTR(plat->phy_interface); plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index f3d8b9336b8e..450d7dac3ea6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -80,7 +80,7 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv, if (attr->max_size && (attr->max_size > size)) size = attr->max_size; - skb = netdev_alloc_skb_ip_align(priv->dev, size); + skb = netdev_alloc_skb(priv->dev, size); if (!skb) return NULL; @@ -244,6 +244,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, struct net_device *orig_ndev) { struct stmmac_test_priv *tpriv = pt->af_packet_priv; + unsigned char *src = tpriv->packet->src; + unsigned char *dst = tpriv->packet->dst; struct stmmachdr *shdr; struct ethhdr *ehdr; struct udphdr *uhdr; @@ -260,15 +262,15 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb, goto out; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (tpriv->packet->dst) { - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (dst) { + if (!ether_addr_equal_unaligned(ehdr->h_dest, dst)) goto out; } if (tpriv->packet->sarc) { - if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest)) + if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest)) goto out; - } else if (tpriv->packet->src) { - if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src)) + } else if (src) { + if (!ether_addr_equal_unaligned(ehdr->h_source, src)) goto out; } @@ -624,6 +626,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv) return -EOPNOTSUPP; if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) return -EOPNOTSUPP; + if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) + return -EOPNOTSUPP; while (--tries) { /* We only need to check the mc_addr for collisions */ @@ -666,6 +670,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv) if (stmmac_filter_check(priv)) return -EOPNOTSUPP; + if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) + return -EOPNOTSUPP; if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) return -EOPNOTSUPP; @@ -710,7 +716,7 @@ static int stmmac_test_flowctrl_validate(struct sk_buff *skb, struct ethhdr *ehdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr)) + if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr)) goto out; if (ehdr->h_proto != htons(ETH_P_PAUSE)) goto out; @@ -847,12 +853,16 @@ static int stmmac_test_vlan_validate(struct sk_buff *skb, if (tpriv->vlan_id) { if (skb->vlan_proto != htons(proto)) goto out; - if (skb->vlan_tci != tpriv->vlan_id) + if (skb->vlan_tci != tpriv->vlan_id) { + /* Means filter did not work. */ + tpriv->ok = false; + complete(&tpriv->comp); goto out; + } } ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst)) goto out; ihdr = ip_hdr(skb); @@ -961,6 +971,9 @@ static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv) { int ret, prev_cap = priv->dma_cap.vlhash; + if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_vlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@ -1053,6 +1066,9 @@ static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv) { int ret, prev_cap = priv->dma_cap.vlhash; + if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER)) + return -EOPNOTSUPP; + priv->dma_cap.vlhash = 0; ret = __stmmac_test_dvlanfilt(priv); priv->dma_cap.vlhash = prev_cap; @@ -1319,16 +1335,19 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1395,7 +1414,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1440,16 +1460,19 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, struct stmmac_packet_attrs attr = { }; struct flow_dissector *dissector; struct flow_cls_offload *cls; + int ret, old_enable = 0; struct flow_rule *rule; - int ret; if (!tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (!priv->dma_cap.l3l4fnum) return -EOPNOTSUPP; - if (priv->rss.enable) + if (priv->rss.enable) { + old_enable = priv->rss.enable; + priv->rss.enable = false; stmmac_rss_configure(priv, priv->hw, NULL, priv->plat->rx_queues_to_use); + } dissector = kzalloc(sizeof(*dissector), GFP_KERNEL); if (!dissector) { @@ -1521,7 +1544,8 @@ cleanup_cls: cleanup_dissector: kfree(dissector); cleanup_rss: - if (priv->rss.enable) { + if (old_enable) { + priv->rss.enable = old_enable; stmmac_rss_configure(priv, priv->hw, &priv->rss, priv->plat->rx_queues_to_use); } @@ -1574,7 +1598,7 @@ static int stmmac_test_arp_validate(struct sk_buff *skb, struct arphdr *ahdr; ehdr = (struct ethhdr *)skb_mac_header(skb); - if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src)) + if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src)) goto out; ahdr = arp_hdr(skb); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 7d972e0fd2b0..9ffae12a2122 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -577,6 +577,10 @@ static int tc_setup_cls(struct stmmac_priv *priv, { int ret = 0; + /* When RSS is enabled, the filtering will be bypassed */ + if (priv->rss.enable) + return -EBUSY; + switch (cls->command) { case FLOW_CLS_REPLACE: ret = tc_add_flow(priv, cls); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index a46f4189fde3..bf98e0fa7d8b 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -63,6 +63,7 @@ config TI_CPSW_SWITCHDEV tristate "TI CPSW Switch Support with switchdev" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST depends on NET_SWITCHDEV + select PAGE_POOL select TI_DAVINCI_MDIO select MFD_SYSCON select REGMAP diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index d34df8e5cf94..ecf776ad8689 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_TI_CPSW) += cpsw-common.o obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o +obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_CPMAC) += cpmac.o diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 31248a6cc642..fa54efe3be63 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -73,13 +73,13 @@ enum { }; #define CPSW_STAT(m) CPSW_STATS, \ - FIELD_SIZEOF(struct cpsw_hw_stats, m), \ + sizeof_field(struct cpsw_hw_stats, m), \ offsetof(struct cpsw_hw_stats, m) #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ - FIELD_SIZEOF(struct cpdma_chan_stats, m), \ + sizeof_field(struct cpdma_chan_stats, m), \ offsetof(struct cpdma_chan_stats, m) #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ - FIELD_SIZEOF(struct cpdma_chan_stats, m), \ + sizeof_field(struct cpdma_chan_stats, m), \ offsetof(struct cpdma_chan_stats, m) static const struct cpsw_stats cpsw_gstrings_stats[] = { diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 37ba708ac781..6614fa3089b2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1018,7 +1018,6 @@ static int cpdma_chan_submit_si(struct submit_info *si) struct cpdma_chan *chan = si->chan; struct cpdma_ctlr *ctlr = chan->ctlr; int len = si->len; - int swlen = len; struct cpdma_desc __iomem *desc; dma_addr_t buffer; u32 mode; @@ -1046,7 +1045,6 @@ static int cpdma_chan_submit_si(struct submit_info *si) if (si->data_dma) { buffer = si->data_dma; dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); - swlen |= CPDMA_DMA_EXT_MAP; } else { buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); ret = dma_mapping_error(ctlr->dev, buffer); @@ -1065,7 +1063,8 @@ static int cpdma_chan_submit_si(struct submit_info *si) writel_relaxed(mode | len, &desc->hw_mode); writel_relaxed((uintptr_t)si->token, &desc->sw_token); writel_relaxed(buffer, &desc->sw_buffer); - writel_relaxed(swlen, &desc->sw_len); + writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len, + &desc->sw_len); desc_read(desc, sw_len); __cpdma_chan_submit(chan, desc); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1b2702f74455..675f31de59dd 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2019,7 +2019,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, goto quit; } - efuse = devm_ioremap_nocache(dev, res.start, size); + efuse = devm_ioremap(dev, res.start, size); if (!efuse) { dev_err(dev, "could not map resource\n"); devm_release_mem_region(dev, res.start, size); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 86a3f42a3dcc..d6a192c1f337 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -783,28 +783,28 @@ struct netcp_ethtool_stat { #define GBE_STATSA_INFO(field) \ { \ "GBE_A:"#field, GBE_STATSA_MODULE, \ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ + sizeof_field(struct gbe_hw_stats, field), \ offsetof(struct gbe_hw_stats, field) \ } #define GBE_STATSB_INFO(field) \ { \ "GBE_B:"#field, GBE_STATSB_MODULE, \ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ + sizeof_field(struct gbe_hw_stats, field), \ offsetof(struct gbe_hw_stats, field) \ } #define GBE_STATSC_INFO(field) \ { \ "GBE_C:"#field, GBE_STATSC_MODULE, \ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ + sizeof_field(struct gbe_hw_stats, field), \ offsetof(struct gbe_hw_stats, field) \ } #define GBE_STATSD_INFO(field) \ { \ "GBE_D:"#field, GBE_STATSD_MODULE, \ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ + sizeof_field(struct gbe_hw_stats, field), \ offsetof(struct gbe_hw_stats, field) \ } @@ -957,7 +957,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { #define GBENU_STATS_HOST(field) \ { \ "GBE_HOST:"#field, GBENU_STATS0_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } @@ -967,56 +967,56 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { #define GBENU_STATS_P1(field) \ { \ "GBE_P1:"#field, GBENU_STATS1_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } #define GBENU_STATS_P2(field) \ { \ "GBE_P2:"#field, GBENU_STATS2_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } #define GBENU_STATS_P3(field) \ { \ "GBE_P3:"#field, GBENU_STATS3_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } #define GBENU_STATS_P4(field) \ { \ "GBE_P4:"#field, GBENU_STATS4_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } #define GBENU_STATS_P5(field) \ { \ "GBE_P5:"#field, GBENU_STATS5_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } #define GBENU_STATS_P6(field) \ { \ "GBE_P6:"#field, GBENU_STATS6_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } #define GBENU_STATS_P7(field) \ { \ "GBE_P7:"#field, GBENU_STATS7_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } #define GBENU_STATS_P8(field) \ { \ "GBE_P8:"#field, GBENU_STATS8_MODULE, \ - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + sizeof_field(struct gbenu_hw_stats, field), \ offsetof(struct gbenu_hw_stats, field) \ } @@ -1607,21 +1607,21 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { #define XGBE_STATS0_INFO(field) \ { \ "GBE_0:"#field, XGBE_STATS0_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + sizeof_field(struct xgbe_hw_stats, field), \ offsetof(struct xgbe_hw_stats, field) \ } #define XGBE_STATS1_INFO(field) \ { \ "GBE_1:"#field, XGBE_STATS1_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + sizeof_field(struct xgbe_hw_stats, field), \ offsetof(struct xgbe_hw_stats, field) \ } #define XGBE_STATS2_INFO(field) \ { \ "GBE_2:"#field, XGBE_STATS2_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + sizeof_field(struct xgbe_hw_stats, field), \ offsetof(struct xgbe_hw_stats, field) \ } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 21c1b4322ea7..c66aab78dcac 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1202,7 +1202,7 @@ static int temac_probe(struct platform_device *pdev) /* map device registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lp->regs = devm_ioremap_nocache(&pdev->dev, res->start, + lp->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map TEMAC registers\n"); @@ -1296,7 +1296,7 @@ static int temac_probe(struct platform_device *pdev) } else if (pdata) { /* 2nd memory resource specifies DMA registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start, + lp->sdma_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(lp->sdma_regs)) { dev_err(&pdev->dev, diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 56b7791911bf..077c68498f04 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -614,7 +614,7 @@ static int dfx_register(struct device *bdev) /* Set up I/O base address. */ if (dfx_use_mmio) { - bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]); + bp->base.mem = ioremap(bar_start[0], bar_len[0]); if (!bp->base.mem) { printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); err = -ENOMEM; diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 060712c666bf..eaf85db53a5e 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -1318,7 +1318,7 @@ static int fza_probe(struct device *bdev) } /* MMIO mapping setup. */ - mmio = ioremap_nocache(start, len); + mmio = ioremap(start, len); if (!mmio) { pr_err("%s: cannot map MMIO\n", fp->name); ret = -ENOMEM; diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c index 09f3604cfbf8..746736c83873 100644 --- a/drivers/net/fjes/fjes_ethtool.c +++ b/drivers/net/fjes/fjes_ethtool.c @@ -21,7 +21,7 @@ struct fjes_stats { #define FJES_STAT(name, stat) { \ .stat_string = name, \ - .sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \ + .sizeof_stat = sizeof_field(struct fjes_adapter, stat), \ .stat_offset = offsetof(struct fjes_adapter, stat) \ } diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index 8a4fbfacad7e..065bb0a40b1d 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -40,7 +40,7 @@ static u8 *fjes_hw_iomap(struct fjes_hw *hw) return NULL; } - base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size); + base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size); return base; } diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b517c1af9de0..91a1059517f5 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -166,6 +166,9 @@ static int fjes_acpi_add(struct acpi_device *device) /* create platform_device */ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource, ARRAY_SIZE(fjes_resource)); + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + device->driver_data = plat_dev; return 0; diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h index c611b6a80b20..9237b69d8e21 100644 --- a/drivers/net/fjes/fjes_trace.h +++ b/drivers/net/fjes/fjes_trace.h @@ -28,7 +28,7 @@ TRACE_EVENT(fjes_hw_issue_request_command, __field(u8, cs_busy) __field(u8, cs_complete) __field(int, timeout) - __field(int, ret); + __field(int, ret) ), TP_fast_assign( __entry->cr_req = cr->bits.req_code; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 5c6b7fc04ea6..75757e9954ba 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1156,7 +1156,7 @@ static void geneve_setup(struct net_device *dev) static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_ID] = { .type = NLA_U32 }, - [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, + [IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) }, [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index ecfe26215935..9b3ba98726d7 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -38,7 +38,6 @@ struct pdp_ctx { struct hlist_node hlist_addr; union { - u64 tid; struct { u64 tid; u16 flow; @@ -541,7 +540,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, mtu = dst_mtu(&rt->dst); } - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); + rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && mtu < ntohs(iph->tot_len)) { @@ -641,9 +640,16 @@ static void gtp_link_setup(struct net_device *dev) } static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); -static void gtp_hashtable_free(struct gtp_dev *gtp); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); +static void gtp_destructor(struct net_device *dev) +{ + struct gtp_dev *gtp = netdev_priv(dev); + + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); +} + static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -661,10 +667,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, if (err < 0) return err; - if (!data[IFLA_GTP_PDP_HASHSIZE]) + if (!data[IFLA_GTP_PDP_HASHSIZE]) { hashsize = 1024; - else + } else { hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); + if (!hashsize) + hashsize = 1024; + } err = gtp_hashtable_new(gtp, hashsize); if (err < 0) @@ -678,13 +687,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gn = net_generic(dev_net(dev), gtp_net_id); list_add_rcu(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; netdev_dbg(dev, "registered new GTP interface\n"); return 0; out_hashtable: - gtp_hashtable_free(gtp); + kfree(gtp->addr_hash); + kfree(gtp->tid_hash); out_encap: gtp_encap_disable(gtp); return err; @@ -693,8 +704,13 @@ out_encap: static void gtp_dellink(struct net_device *dev, struct list_head *head) { struct gtp_dev *gtp = netdev_priv(dev); + struct pdp_ctx *pctx; + int i; + + for (i = 0; i < gtp->hash_size; i++) + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); - gtp_hashtable_free(gtp); list_del_rcu(>p->list); unregister_netdevice_queue(dev, head); } @@ -772,20 +788,6 @@ err1: return -ENOMEM; } -static void gtp_hashtable_free(struct gtp_dev *gtp) -{ - struct pdp_ctx *pctx; - int i; - - for (i = 0; i < gtp->hash_size; i++) - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) - pdp_context_delete(pctx); - - synchronize_rcu(); - kfree(gtp->addr_hash); - kfree(gtp->tid_hash); -} - static struct sock *gtp_encap_enable_socket(int fd, int type, struct gtp_dev *gtp) { @@ -802,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, return NULL; } - if (sock->sk->sk_protocol != IPPROTO_UDP) { + sk = sock->sk; + if (sk->sk_protocol != IPPROTO_UDP || + sk->sk_type != SOCK_DGRAM || + (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { pr_debug("socket fd=%d not UDP\n", fd); sk = ERR_PTR(-EINVAL); goto out_sock; } - lock_sock(sock->sk); - if (sock->sk->sk_user_data) { + lock_sock(sk); + if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); - goto out_sock; + goto out_rel_sock; } - sk = sock->sk; sock_hold(sk); tuncfg.sk_user_data = gtp; @@ -824,8 +828,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); -out_sock: +out_rel_sock: release_sock(sock->sk); +out_sock: sockfd_put(sock); return sk; } @@ -926,24 +931,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) } } -static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, - struct genl_info *info) +static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, + struct genl_info *info) { + struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; - struct pdp_ctx *pctx; + unsigned int version; bool found = false; __be32 ms_addr; ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + version = nla_get_u32(info->attrs[GTPA_VERSION]); - hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) { - if (pctx->ms_addr_ip4.s_addr == ms_addr) { - found = true; - break; - } - } + pctx = ipv4_pdp_find(gtp, ms_addr); + if (pctx) + found = true; + if (version == GTP_V0) + pctx_tid = gtp0_pdp_find(gtp, + nla_get_u64(info->attrs[GTPA_TID])); + else if (version == GTP_V1) + pctx_tid = gtp1_pdp_find(gtp, + nla_get_u32(info->attrs[GTPA_I_TEI])); + if (pctx_tid) + found = true; if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) @@ -951,6 +963,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk, if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; + if (pctx && pctx_tid) + return -EEXIST; + if (!pctx) + pctx = pctx_tid; + ipv4_pdp_fill(pctx, info); if (pctx->gtp_version == GTP_V0) @@ -1074,7 +1091,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } - err = ipv4_pdp_add(gtp, sk, info); + err = gtp_pdp_add(gtp, sk, info); out_unlock: rcu_read_unlock(); @@ -1232,43 +1249,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, struct netlink_callback *cb) { struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + int i, j, bucket = cb->args[0], skip = cb->args[1]; struct net *net = sock_net(skb->sk); - struct gtp_net *gn = net_generic(net, gtp_net_id); - unsigned long tid = cb->args[1]; - int i, k = cb->args[0], ret; struct pdp_ctx *pctx; + struct gtp_net *gn; + + gn = net_generic(net, gtp_net_id); if (cb->args[4]) return 0; + rcu_read_lock(); list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { if (last_gtp && last_gtp != gtp) continue; else last_gtp = NULL; - for (i = k; i < gtp->hash_size; i++) { - hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) { - if (tid && tid != pctx->u.tid) - continue; - else - tid = 0; - - ret = gtp_genl_fill_info(skb, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - cb->nlh->nlmsg_type, pctx); - if (ret < 0) { + for (i = bucket; i < gtp->hash_size; i++) { + j = 0; + hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], + hlist_tid) { + if (j >= skip && + gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; - cb->args[1] = pctx->u.tid; + cb->args[1] = j; cb->args[2] = (unsigned long)gtp; goto out; } + j++; } + skip = 0; } + bucket = 0; } cb->args[4] = 1; out: + rcu_read_unlock(); return skb->len; } diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 23281aeeb222..71d6629e65c9 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -654,10 +654,10 @@ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!sp) return; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index c5bfa19ddb93..deef14215110 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -773,10 +773,10 @@ static void mkiss_close(struct tty_struct *tty) { struct mkiss *ax; - write_lock_bh(&disc_data_lock); + write_lock_irq(&disc_data_lock); ax = tty->disc_data; tty->disc_data = NULL; - write_unlock_bh(&disc_data_lock); + write_unlock_irq(&disc_data_lock); if (!ax) return; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 9caa876ce6e8..dc44819946e6 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -169,7 +169,6 @@ struct rndis_device { u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; - u16 rx_table[ITAB_NUM]; }; @@ -940,6 +939,8 @@ struct net_device_context { u32 tx_table[VRSS_SEND_TAB_SIZE]; + u16 rx_table[ITAB_NUM]; + /* Ethtool settings */ u8 duplex; u32 speed; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index eff8fef4f775..f3f9eb8a402a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -571,7 +571,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* Use the skb control buffer for building up the packet */ BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > - FIELD_SIZEOF(struct sk_buff, cb)); + sizeof_field(struct sk_buff, cb)); packet = (struct hv_netvsc_packet *)skb->cb; packet->q_idx = skb_get_queue_mapping(skb); @@ -1662,7 +1662,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - indir[i] = rndis_dev->rx_table[i]; + indir[i] = ndc->rx_table[i]; } if (key) @@ -1692,7 +1692,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, return -EINVAL; for (i = 0; i < ITAB_NUM; i++) - rndis_dev->rx_table[i] = indir[i]; + ndc->rx_table[i] = indir[i]; } if (!key) { diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 206b4e77eaf0..e66d77dc28c8 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -773,6 +773,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev, const u8 *rss_key, u16 flag) { struct net_device *ndev = rdev->ndev; + struct net_device_context *ndc = netdev_priv(ndev); struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; @@ -812,7 +813,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev, /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = rdev->rx_table[i]; + itab[i] = ndc->rx_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset); @@ -1171,6 +1172,9 @@ int rndis_set_subchannel(struct net_device *ndev, wait_event(nvdev->subchan_open, atomic_read(&nvdev->open_chn) == nvdev->num_chn); + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) + ndev_ctx->tx_table[i] = i % nvdev->num_chn; + /* ignore failures from setting rss parameters, still have channels */ if (dev_info) rndis_filter_set_rss_param(rdev, dev_info->rss_key); @@ -1180,9 +1184,6 @@ int rndis_set_subchannel(struct net_device *ndev, netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); - for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) - ndev_ctx->tx_table[i] = i % nvdev->num_chn; - return 0; } @@ -1312,6 +1313,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); + struct net_device_context *ndc = netdev_priv(net); struct netvsc_device *net_device; struct rndis_device *rndis_device; struct ndis_recv_scale_cap rsscap; @@ -1398,9 +1400,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, /* We will use the given number of channels if available. */ net_device->num_chn = min(net_device->max_chn, device_info->num_chn); - for (i = 0; i < ITAB_NUM; i++) - rndis_device->rx_table[i] = ethtool_rxfh_indir_default( + if (!netif_is_rxfh_configured(net)) { + for (i = 0; i < ITAB_NUM; i++) + ndc->rx_table[i] = ethtool_rxfh_indir_default( i, net_device->num_chn); + } atomic_set(&net_device->open_chn, 1); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); @@ -1439,8 +1443,6 @@ void rndis_filter_device_remove(struct hv_device *dev, /* Halt and release the rndis device */ rndis_filter_halt_device(net_dev, rndis_dev); - net_dev->extension = NULL; - netvsc_device_remove(dev); } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05631d97eeb4..c5bf61565726 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -513,10 +513,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *dest; if (vlan->mode == MACVLAN_MODE_BRIDGE) { - const struct ethhdr *eth = (void *)skb->data; + const struct ethhdr *eth = skb_eth_hdr(skb); /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 059711edfc61..4b39aba2e9c4 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -53,7 +53,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file, get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE); - id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev)); + id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev)); err = devlink_region_snapshot_create(nsim_dev->dummy_region, dummy_data, id, kfree); if (err) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 5848219005d7..8dc461f7574b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -340,14 +340,14 @@ config DAVICOM_PHY Currently supports dm9161e and dm9131 config DP83822_PHY - tristate "Texas Instruments DP83822 PHY" + tristate "Texas Instruments DP83822/825 PHYs" ---help--- - Supports the DP83822 PHY. + Supports the DP83822 and DP83825I PHYs. config DP83TC811_PHY - tristate "Texas Instruments DP83TC822 PHY" + tristate "Texas Instruments DP83TC811 PHY" ---help--- - Supports the DP83TC822 PHY. + Supports the DP83TC811 PHY. config DP83848_PHY tristate "Texas Instruments DP83848 PHY" diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 3b29d381116f..975789d9349d 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -627,6 +627,8 @@ static struct phy_driver aqr_driver[] = { .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, .read_status = aqr_read_status, + .suspend = aqr107_suspend, + .resume = aqr107_resume, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR106), diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 9cd9dcee4eb2..01cf71358359 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -97,6 +97,7 @@ #define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03 #define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14) #define DP83867_PHYCR_RESERVED_MASK BIT(11) +#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10) /* RGMIIDCTL bits */ #define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf @@ -599,7 +600,12 @@ static int dp83867_phy_reset(struct phy_device *phydev) usleep_range(10, 20); - return 0; + /* After reset FORCE_LINK_GOOD bit is set. Although the + * default value should be unset. Disable FORCE_LINK_GOOD + * for the phy to work properly. + */ + return phy_modify(phydev, MII_DP83867_PHYCTRL, + DP83867_PHYCR_FORCE_LINK_GOOD, 0); } static struct phy_driver dp83867_driver[] = { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0887ed2bb050..b13c52873ef5 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -553,7 +553,7 @@ static const struct device_type mdio_bus_phy_type = { .pm = MDIO_BUS_PHY_PM_OPS, }; -static int phy_request_driver_module(struct phy_device *dev, int phy_id) +static int phy_request_driver_module(struct phy_device *dev, u32 phy_id) { int ret; @@ -565,15 +565,15 @@ static int phy_request_driver_module(struct phy_device *dev, int phy_id) * then modprobe isn't available. */ if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) { - phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n", - ret, phy_id); + phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n", + ret, (unsigned long)phy_id); return ret; } return 0; } -struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, +struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 9a616d6bc4eb..ee7a718662c6 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -442,8 +442,7 @@ static void phylink_mac_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; pl->ops->mac_link_up(pl->config, pl->link_an_mode, - pl->phy_state.interface, - pl->phydev); + pl->cur_interface, pl->phydev); if (ndev) netif_carrier_on(ndev); @@ -567,6 +566,9 @@ static int phylink_register_sfp(struct phylink *pl, struct sfp_bus *bus; int ret; + if (!fwnode) + return 0; + bus = sfp_bus_find_fwnode(fwnode); if (IS_ERR(bus)) { ret = PTR_ERR(bus); diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 2a91c192659f..61d7e0d1d77d 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work) */ static void slip_write_wakeup(struct tty_struct *tty) { - struct slip *sl = tty->disc_data; + struct slip *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } static void sl_tx_timeout(struct net_device *dev) @@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* VSV = very important to remove timers */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 683d371e6e82..35e884a8242d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1936,6 +1936,10 @@ drop: if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); + if (frags) { + tfile->napi.skb = NULL; + mutex_unlock(&tfile->napi_mutex); + } return total_len; } } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index cf1f3f0a4b9b..c2a58f05b9a1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -20,6 +20,7 @@ #include <linux/mdio.h> #include <linux/phy.h> #include <net/ip6_checksum.h> +#include <net/vxlan.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irq.h> @@ -511,7 +512,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, } } else { netdev_warn(dev->net, - "Failed to read stat ret = 0x%x", ret); + "Failed to read stat ret = %d", ret); } kfree(stats); @@ -1808,6 +1809,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) dev->mdiobus->read = lan78xx_mdiobus_read; dev->mdiobus->write = lan78xx_mdiobus_write; dev->mdiobus->name = "lan78xx-mdiobus"; + dev->mdiobus->parent = &dev->udev->dev; snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); @@ -2723,11 +2725,6 @@ static int lan78xx_stop(struct net_device *net) return 0; } -static int lan78xx_linearize(struct sk_buff *skb) -{ - return skb_linearize(skb); -} - static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { @@ -2739,8 +2736,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, return NULL; } - if (lan78xx_linearize(skb) < 0) + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); return NULL; + } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; @@ -3670,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net) tasklet_schedule(&dev->bh); } +static netdev_features_t lan78xx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE) + features &= ~NETIF_F_GSO_MASK; + + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + return features; +} + static const struct net_device_ops lan78xx_netdev_ops = { .ndo_open = lan78xx_open, .ndo_stop = lan78xx_stop, @@ -3683,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = { .ndo_set_features = lan78xx_set_features, .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, + .ndo_features_check = lan78xx_features_check, }; static void lan78xx_stat_monitor(struct timer_list *t) @@ -3752,6 +3765,7 @@ static int lan78xx_probe(struct usb_interface *intf, /* MTU range: 68 - 9000 */ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; + netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4196c0e32740..9485c8d1de8a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c5ebf35d2488..3f425f974d03 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -31,7 +31,7 @@ #define NETNEXT_VERSION "11" /* Information for net */ -#define NET_VERSION "10" +#define NET_VERSION "11" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -68,6 +68,7 @@ #define PLA_LED_FEATURE 0xdd92 #define PLA_PHYAR 0xde00 #define PLA_BOOT_CTRL 0xe004 +#define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 #define PLA_EEEP_CR 0xe080 @@ -95,6 +96,7 @@ #define PLA_TALLYCNT 0xe890 #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 +#define PLA_CONFIG6 0xe90a /* CONFIG6 */ #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a @@ -107,6 +109,7 @@ #define PLA_BP_EN 0xfc38 #define USB_USB2PHY 0xb41e +#define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 @@ -300,6 +303,9 @@ #define LINK_ON_WAKE_EN 0x0010 #define LINK_OFF_WAKE_EN 0x0008 +/* PLA_CONFIG6 */ +#define LANWAKE_CLR_EN BIT(0) + /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 @@ -312,6 +318,7 @@ /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 +#define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 @@ -324,6 +331,7 @@ #define MAC_CLK_SPDWN_EN BIT(15) /* PLA_MAC_PWR_CTRL3 */ +#define PLA_MCU_SPDWN_EN BIT(14) #define PKT_AVAIL_SPDWN_EN 0x0100 #define SUSPEND_SPDWN_EN 0x0004 #define U1U2_SPDWN_EN 0x0002 @@ -354,6 +362,9 @@ /* PLA_BOOT_CTRL */ #define AUTOLOAD_DONE 0x0002 +/* PLA_LWAKE_CTRL_REG */ +#define LANWAKE_PIN BIT(7) + /* PLA_SUSPEND_FLAG */ #define LINK_CHG_EVENT BIT(0) @@ -365,13 +376,18 @@ #define DEBUG_LTSSM 0x0082 /* PLA_EXTRA_STATUS */ +#define CUR_LINK_OK BIT(15) #define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */ #define LINK_CHANGE_FLAG BIT(8) +#define POLL_LINK_CHG BIT(0) /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 +/* USB_SSPHYLINK1 */ +#define DELAY_PHY_PWR_CHG BIT(1) + /* USB_SSPHYLINK2 */ #define pwd_dn_scale_mask 0x3ffe #define pwd_dn_scale(x) ((x) << 1) @@ -2863,6 +2879,17 @@ static int rtl8153_enable(struct r8152 *tp) r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); + if (tp->version == RTL_VER_09) { + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data &= ~FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + usleep_range(1000, 2000); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + } + return rtl_enable(tp); } @@ -3376,8 +3403,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - r8153_u2p3en(tp, true); - r8153b_u1u2en(tp, true); + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); } } @@ -4675,7 +4702,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) r8153_aldps_en(tp, true); r8152b_enable_fc(tp); - r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } @@ -4954,6 +4980,8 @@ static void rtl8152_down(struct r8152 *tp) static void rtl8153_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -4961,6 +4989,19 @@ static void rtl8153_up(struct r8152 *tp) r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1); + ocp_data &= ~DELAY_PHY_PWR_CHG; + ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data); + r8153_aldps_en(tp, true); switch (tp->version) { @@ -4979,11 +5020,17 @@ static void rtl8153_up(struct r8152 *tp) static void rtl8153_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data &= ~LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); @@ -4994,6 +5041,8 @@ static void rtl8153_down(struct r8152 *tp) static void rtl8153b_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -5004,18 +5053,29 @@ static void rtl8153b_up(struct r8152 *tp) r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153_aldps_en(tp, true); - r8153_u2p3en(tp, true); - r8153b_u1u2en(tp, true); + + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); @@ -5387,6 +5447,16 @@ static void r8153_init(struct r8152 *tp) else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); + + r8153_queue_wake(tp, false); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); @@ -5416,10 +5486,19 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); r8153_power_cut_en(tp, false); + rtl_runtime_suspend_enable(tp, false); r8153_u1u2en(tp, true); r8153_mac_clk_spd(tp, false); usb_enable_lpm(tp->udev); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); @@ -5484,7 +5563,17 @@ static void r8153b_init(struct r8152 *tp) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - r8153b_u1u2en(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ @@ -5492,6 +5581,19 @@ static void r8153b_init(struct r8152 *tp) ocp_data |= MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + if (tp->version == RTL_VER_09) { + /* Disable Test IO for 32QFN */ + if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= TEST_IO_OFF; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + } + } + set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ @@ -6597,6 +6699,9 @@ static int rtl8152_probe(struct usb_interface *intf, return -ENODEV; } + if (intf->cur_altsetting->desc.bNumEndpoints < 3) + return -ENODEV; + usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { @@ -6704,6 +6809,11 @@ static int rtl8152_probe(struct usb_interface *intf, intf->needs_remote_wakeup = 1; + if (!rtl_can_wakeup(tp)) + __rtl_set_wol(tp, 0); + else + tp->saved_wolopts = __rtl_get_wol(tp); + tp->rtl_ops.init(tp); #if IS_BUILTIN(CONFIG_USB_RTL8152) /* Retry in case request_firmware() is not ready yet. */ @@ -6721,10 +6831,6 @@ static int rtl8152_probe(struct usb_interface *intf, goto out1; } - if (!rtl_can_wakeup(tp)) - __rtl_set_wol(tp, 0); - - tp->saved_wolopts = __rtl_get_wol(tp); if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); else diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 34c1eaba536c..389d19dd7909 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -865,7 +865,7 @@ static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, u16 len; bool need_tail; - BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data) + BUILD_BUG_ON(sizeof_field(struct usbnet, data) < sizeof(struct cdc_state)); dev_dbg(&dev->udev->dev, "%s", __func__); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 30e511c2c8d0..9ce6d30576dd 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -2184,7 +2184,7 @@ static int __init usbnet_init(void) { /* Compiler should optimize this out. */ BUILD_BUG_ON( - FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); + sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); eth_random_addr(node_id); return 0; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 4c34375c2e22..1c5159dcc720 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2541,7 +2541,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2581,7 +2581,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), @@ -3069,10 +3069,10 @@ static void vxlan_raw_setup(struct net_device *dev) static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_ID] = { .type = NLA_U32 }, - [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, + [IFLA_VXLAN_GROUP] = { .len = sizeof_field(struct iphdr, daddr) }, [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, - [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, + [IFLA_VXLAN_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) }, [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index ca0f3be2b6bf..aef7de225783 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -73,7 +73,7 @@ static struct ucc_tdm_info utdm_primary_info = { }, }; -static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; +static struct ucc_tdm_info utdm_info[UCC_MAX_NUM]; static int uhdlc_init(struct ucc_hdlc_private *priv) { diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 0f1217b506ad..e30d91a38cfb 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -64,7 +64,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev) { struct lapbethdev *lapbeth; - list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) { + list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) { if (lapbeth->ethdev == dev) return lapbeth; } diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index e2e679a01b65..77ccf3672ede 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -708,7 +708,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb, spin_lock_irqsave(&sdla_lock, flags); SDLA_WINDOW(dev, addr); - pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK)); + pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK)); __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); SDLA_WINDOW(dev, addr); pbuf->opp_flag = 1; diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index 34e94ee806d6..23f93f1c815d 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -635,7 +635,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, /* set up PLX mapping */ plx_phy = pci_resource_start(pdev, 0); - card->plx = ioremap_nocache(plx_phy, 0x70); + card->plx = ioremap(plx_phy, 0x70); if (!card->plx) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); @@ -704,7 +704,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, PCI_DMA_FROMDEVICE); } - mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware)); + mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware)); if (!mem) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index f80854180e21..ed87bc00f2aa 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -458,7 +458,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar) ar_ahb->mem_len = resource_size(res); - ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE, + ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE, ATH10K_GCC_REG_SIZE); if (!ar_ahb->gcc_mem) { ath10k_err(ar, "gcc mem ioremap error\n"); @@ -466,7 +466,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar) goto err_mem_unmap; } - ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE, + ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE, ATH10K_TCSR_REG_SIZE); if (!ar_ahb->tcsr_mem) { ath10k_err(ar, "tcsr mem ioremap error\n"); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 83cc8778ca1e..978f0037ed52 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -8958,6 +8958,7 @@ int ath10k_mac_register(struct ath10k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL); if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index ab916459d237..842e42ec814f 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -239,7 +239,7 @@ TRACE_EVENT(ath10k_wmi_dbglog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) - __field(u8, hw_type); + __field(u8, hw_type) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), @@ -269,7 +269,7 @@ TRACE_EVENT(ath10k_htt_pktlog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) - __field(u8, hw_type); + __field(u8, hw_type) __field(u16, buf_len) __dynamic_array(u8, pktlog, buf_len) ), @@ -435,7 +435,7 @@ TRACE_EVENT(ath10k_htt_rx_desc, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) - __field(u8, hw_type); + __field(u8, hw_type) __field(u16, len) __dynamic_array(u8, rxdesc, len) ), diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index c0794f5988b3..2c9cec8b53d9 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -106,7 +106,7 @@ static int ath_ahb_probe(struct platform_device *pdev) goto err_out; } - mem = ioremap_nocache(res->start, resource_size(res)); + mem = ioremap(res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 63019c3de034..cdefb8e2daf1 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -92,7 +92,7 @@ static int ath_ahb_probe(struct platform_device *pdev) return -ENXIO; } - mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); + mem = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c index 956fa7828d0c..56d1a7764b9f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c @@ -83,7 +83,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, val = swahb32(val); } - __raw_writel(val, mem + reg); + iowrite32(val, mem + reg); usleep_range(100, 120); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f64ce5074a55..c85840cabebe 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1643,8 +1643,8 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) return -EINVAL; } - devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); - devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size); + devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); + devinfo->tcm = ioremap(bar1_addr, bar1_size); if (!devinfo->regs || !devinfo->tcm) { brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs, diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index f43c06569ea1..c4c8f1b62e1e 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { case AIROGVLIST: ridcode = RID_APLIST; break; case AIROGDRVNAM: ridcode = RID_DRVNAME; break; case AIROGEHTENC: ridcode = RID_ETHERENCAP; break; - case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; - case AIROGWEPKNV: ridcode = RID_WEP_PERM; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; + case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break; + case AIROGWEPKNV: ridcode = RID_WEP_PERM; break; case AIROGSTAT: ridcode = RID_STATUS; break; case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; case AIROGSTATSC32: ridcode = RID_STATS; break; @@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { return -EINVAL; } - if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) + if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) { + /* Only super-user can read WEP keys */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + } + + if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index c4c83ab60cbc..e85858eec8ff 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -6167,7 +6167,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, ioaddr = pci_iomap(pci_dev, 0, 0); if (!ioaddr) { printk(KERN_WARNING DRV_NAME - "Error calling ioremap_nocache.\n"); + "Error calling ioremap.\n"); err = -EIO; goto fail; } diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index cd73fc5cfcbb..fd454836adbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_station_priv *sta_priv = NULL; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; __le16 fc; u8 hdr_len; @@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, if (unlikely(!dev_cmd)) goto drop_unlock_priv; - memset(dev_cmd, 0, sizeof(*dev_cmd)); dev_cmd->hdr.cmd = REPLY_TX; tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 40fe2d667622..48d375a86d86 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; bool enabled; - int i, n_profiles, tbl_rev; - int ret = 0; + int i, n_profiles, tbl_rev, pos; + int ret = 0; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) @@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - for (i = 0; i < n_profiles; i++) { - /* the tables start at element 3 */ - int pos = 3; + /* the tables start at element 3 */ + pos = 3; + for (i = 0; i < n_profiles; i++) { /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index ed90dd104366..4c60f9959f7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, { int ret = 0; - /* if the FW crashed or not debug monitor cfg was given, there is - * no point in changing the recording state - */ - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) || - (!fwrt->trans->dbg.dest_tlv && - fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return 0; if (fw_has_capa(&fwrt->fw->ucode_capa, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 92d9898ab7c2..c2f7252ae4e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -379,7 +379,7 @@ enum { /* CSR GIO */ -#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) +#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002) /* * UCODE-DRIVER GP (general purpose) mailbox register 1 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index f266647dc08c..ce8f248c33ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt, if (!frag || frag->size || !pages) return -EIO; - while (pages) { + /* + * We try to allocate as many pages as we can, starting with + * the requested amount and going down until we can allocate + * something. Because of DIV_ROUND_UP(), pages will never go + * down to 0 and stop the loop, so stop when pages reaches 1, + * which is too small anyway. + */ + while (pages > 1) { block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE, &physical, GFP_KERNEL | __GFP_NOWARN); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 4096ccf58b07..bc8c959588ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling, module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); -module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444); -MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); - module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index ebea3f308b5d..82e5cac23d8d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -115,7 +115,6 @@ enum iwl_uapsd_disable { * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT - * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. @@ -136,7 +135,6 @@ struct iwl_mod_params { int antenna_coupling; char *nvm_file; u32 uapsd_disable; - bool lar_disable; bool fw_monitor; bool disable_11ac; /** diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 1e240a2a8329..d4f834b52f50 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags { NVM_CHANNEL_DC_HIGH = BIT(12), }; +/** + * enum iwl_reg_capa_flags - global flags applied for the whole regulatory + * domain. + * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the + * 2.4Ghz band is allowed. + * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the + * 5Ghz band is allowed. + * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. + * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. + * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. + */ +enum iwl_reg_capa_flags { + REG_CAPA_BF_CCD_LOW_BAND = BIT(0), + REG_CAPA_BF_CCD_HIGH_BAND = BIT(1), + REG_CAPA_160MHZ_ALLOWED = BIT(2), + REG_CAPA_80MHZ_ALLOWED = BIT(3), + REG_CAPA_MCS_8_ALLOWED = BIT(4), + REG_CAPA_MCS_9_ALLOWED = BIT(5), + REG_CAPA_40MHZ_FORBIDDEN = BIT(7), + REG_CAPA_DC_HIGH_ENABLED = BIT(9), +}; + static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { @@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported) + u8 tx_chains, u8 rx_chains) { struct iwl_nvm_data *data; bool lar_enabled; @@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; } - if (lar_fw_supported && lar_enabled) + if (lar_enabled && + fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw)) @@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, + u16 cap_flags, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; @@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, (flags & NL80211_RRF_NO_IR)) flags |= NL80211_RRF_GO_CONCURRENT; + /* + * cap_flags is per regulatory domain so apply it for every channel + */ + if (ch_idx >= NUM_2GHZ_CHANNELS) { + if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN) + flags |= NL80211_RRF_NO_HT40; + + if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_80MHZ; + + if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_160MHZ; + } + return flags; } struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info) + u16 geo_info, u16 cap) { int ch_idx; u16 ch_flags; @@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, - ch_flags, cfg); + ch_flags, cap, + cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || @@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) }; int ret; - bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; @@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { + if (le32_to_cpu(rsp->regulatory.lar_enabled) && + fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) { nvm->lar_enabled = true; sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index b7e1ddf8f177..fb0b385d10fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags { */ struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported); + u8 tx_chains, u8 rx_chains); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW @@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info); + u16 geo_info, u16 cap); /** * struct iwl_nvm_section - describes an NVM section in memory. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 28bdc9a9617e..f91197e4ae40 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -66,7 +66,9 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops) + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP @@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd), - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); + cmd_pool_size, cmd_pool_align, + SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 8cadad7364ac..e33df5ad00e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -193,6 +193,18 @@ struct iwl_device_cmd { }; } __packed; +/** + * struct iwl_device_tx_cmd - buffer for TX command + * @hdr: the header + * @payload: the payload placeholder + * + * The actual structure is sized dynamically according to need. + */ +struct iwl_device_tx_cmd { + struct iwl_cmd_header hdr; + u8 payload[]; +} __packed; + #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) /* @@ -544,7 +556,7 @@ struct iwl_trans_ops { int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue); + struct iwl_device_tx_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); @@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) return trans->ops->dump_data(trans, dump_mask); } -static inline struct iwl_device_cmd * +static inline struct iwl_device_tx_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { - return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); + return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, - struct iwl_device_cmd *dev_cmd) + struct iwl_device_tx_cmd *dev_cmd) { kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue) + struct iwl_device_tx_cmd *dev_cmd, int queue) { if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; @@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops); + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 60aff2ecec12..58df25e2fb32 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -154,5 +154,6 @@ #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT false #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 +#define IWL_MVM_USE_NSSN_SYNC 0 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index dd685f7eb410..c09624d8d7ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) return 0; } + if (!mvm->fwrt.ppag_table.enabled) { + IWL_DEBUG_RADIO(mvm, + "PPAG not enabled, command not sent.\n"); + return 0; + } + IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); - IWL_DEBUG_RADIO(mvm, "PPAG is %s\n", - mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled"); for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 32dc9d6f0fb6..6717f25c46b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), - __le16_to_cpu(resp->geo_info)); + __le16_to_cpu(resp->geo_info), + __le16_to_cpu(resp->cap)); /* Store the return source id */ src_id = resp->source_id; kfree(resp); @@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) return ret; } +static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + if (likely(sta)) { + if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) + return; + } else { + if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) + return; + } + + ieee80211_free_txskb(mvm->hw, skb); +} + static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } } - if (sta) { - if (iwl_mvm_tx_skb(mvm, skb, sta)) - goto drop; - return; - } - - if (iwl_mvm_tx_skb_non_sta(mvm, skb)) - goto drop; + iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); @@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) break; } - if (!txq->sta) - iwl_mvm_tx_skb_non_sta(mvm, skb); - else - iwl_mvm_tx_skb(mvm, skb, txq->sta); + iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); @@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, return ret; } +static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) +{ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + rinfo->bw = RATE_INFO_BW_20; + break; + case RATE_MCS_CHAN_WIDTH_40: + rinfo->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rinfo->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rinfo->bw = RATE_INFO_BW_160; + break; + } + + if (rate_n_flags & RATE_MCS_HT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_HT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_HE_MSK) { + u32 gi_ltf = u32_get_bits(rate_n_flags, + RATE_MCS_HE_GI_LTF_MSK); + + rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + + if (rate_n_flags & RATE_MCS_HE_106T_MSK) { + rinfo->bw = RATE_INFO_BW_HE_RU; + rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_MU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_TRIG: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) + rinfo->he_dcm = 1; + } else { + switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) { + case IWL_RATE_1M_PLCP: + rinfo->legacy = 10; + break; + case IWL_RATE_2M_PLCP: + rinfo->legacy = 20; + break; + case IWL_RATE_5M_PLCP: + rinfo->legacy = 55; + break; + case IWL_RATE_11M_PLCP: + rinfo->legacy = 110; + break; + case IWL_RATE_6M_PLCP: + rinfo->legacy = 60; + break; + case IWL_RATE_9M_PLCP: + rinfo->legacy = 90; + break; + case IWL_RATE_12M_PLCP: + rinfo->legacy = 120; + break; + case IWL_RATE_18M_PLCP: + rinfo->legacy = 180; + break; + case IWL_RATE_24M_PLCP: + rinfo->legacy = 240; + break; + case IWL_RATE_36M_PLCP: + rinfo->legacy = 360; + break; + case IWL_RATE_48M_PLCP: + rinfo->legacy = 480; + break; + case IWL_RATE_54M_PLCP: + rinfo->legacy = 540; + break; + } + } +} + static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } + if (iwl_mvm_has_tlc_offload(mvm)) { + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + + iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } + /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 3ec8de00f3aa..67ab7e7e9c9d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - if (iwlwifi_mod_params.lar_disable) - return false; - /* * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM @@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta); +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 945c1ea5cda8..46128a2a9c6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __be16 *hw; const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; - bool lar_enabled; int regulatory_type; /* Checking for required sections */ - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { + if (mvm->trans->cfg->nvm_type == IWL_NVM) { if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); @@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; - lar_enabled = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, + return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, regulatory, mac_override, phy_sku, - mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, - lar_enabled); + mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant); } /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index ef99c49247b7..c15f7dbc9516 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, - .metadata.sync = 0, - .nssn_sync.baid = baid, - .nssn_sync.nssn = nssn, - }; - - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); + if (IWL_MVM_USE_NSSN_SYNC) { + struct iwl_mvm_rss_sync_notif notif = { + .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, + .metadata.sync = 0, + .nssn_sync.baid = baid, + .nssn_sync.nssn = nssn, + }; + + iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, + sizeof(notif)); + } } #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a046ac9fa852..a5af8f4128b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) cmd_size = sizeof(struct iwl_scan_config_v2); else cmd_size = sizeof(struct iwl_scan_config_v1); - cmd_size += num_channels; + cmd_size += mvm->fw->ucode_capa.n_scan_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); if (!cfg) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dc5c02fbc65a..ddfc9a668036 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ -static struct iwl_device_cmd * +static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); @@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, if (unlikely(!dev_cmd)) return NULL; - /* Make sure we zero enough of dev_cmd */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); - - memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { @@ -597,7 +592,7 @@ out: } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, - struct iwl_device_cmd *cmd) + struct iwl_device_tx_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); @@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; @@ -1078,7 +1073,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; @@ -1154,7 +1149,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); - return 0; + return -1; } if (!iwl_mvm_has_new_tx_api(mvm)) { @@ -1206,8 +1201,8 @@ drop: return -1; } -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta) +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index d38cefbb779e..e249e3fd14c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -57,6 +57,42 @@ #include "internal.h" #include "iwl-prph.h" +static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys, + int depth) +{ + void *result; + + if (WARN(depth > 2, + "failed to allocate DMA memory not crossing 2^32 boundary")) + return NULL; + + result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL); + + if (!result) + return NULL; + + if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) { + void *old = result; + dma_addr_t oldphys = *phys; + + result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, + phys, + depth + 1); + dma_free_coherent(trans->dev, size, old, oldphys); + } + + return result; +} + +static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys) +{ + return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); +} + void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { struct iwl_self_init_dram *dram = &trans->init_dram; @@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; u32 control_flags = 0, rb_size; + dma_addr_t phys; int ret; - ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), - &trans_pcie->ctxt_info_dma_addr, - GFP_KERNEL); + ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, + sizeof(*ctxt_info), + &phys); if (!ctxt_info) return -ENOMEM; + trans_pcie->ctxt_info_dma_addr = phys; + ctxt_info->version.version = 0; ctxt_info->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 040cec17d3ad..b0b7eca1754e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1111,18 +1111,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; - else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; - else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; - else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) - iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + if (cfg == &iwl_ax101_cfg_qu_hr) + cfg = &iwl_ax101_cfg_quz_hr; + else if (cfg == &iwl_ax201_cfg_qu_hr) + cfg = &iwl_ax201_cfg_quz_hr; + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index a091690f6c79..f14bcef3495e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -305,7 +305,7 @@ struct iwl_cmd_meta { #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { - struct iwl_device_cmd *cmd; + void *cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; @@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ +/* + * We need this inline in case dma_addr_t is only 32-bits - since the + * hardware is always 64-bit, the issue can still occur in that case, + * so use u64 for 'phys' here to force the addition in 64-bit. + */ +static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len) +{ + return upper_32_bits(phys) != upper_32_bits(phys + len); +} + int iwl_pcie_tx_init(struct iwl_trans *trans); int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size); @@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); @@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans); void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb); #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb); #endif /* common functions that are used by gen3 transport */ @@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 452da44a21e0..f0b8ff67a1bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1529,13 +1529,13 @@ out: napi = &rxq->napi; if (napi->poll) { + napi_gro_flush(napi, false); + if (napi->rx_count) { netif_receive_skb_list(&napi->rx_list); INIT_LIST_HEAD(&napi->rx_list); napi->rx_count = 0; } - - napi_gro_flush(napi, false); } iwl_pcie_rxq_restock(trans, rxq); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 0252716c0b24..0d8b2a8ffa5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -57,24 +57,6 @@ #include "internal.h" #include "fw/dbg.h" -static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) -{ - iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - udelay(20); - iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_PG_EN | - HPM_HIPM_GEN_CFG_CR_SLP_EN); - udelay(20); - iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, - HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - - iwl_trans_sw_reset(trans); - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - - return 0; -} - /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) @@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && - trans->cfg->integrated) { - ret = iwl_pcie_gen2_force_power_gating(trans); - if (ret) - return ret; - } - ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index af9bc6b64542..f60d66f1e55b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -79,6 +79,7 @@ #include "iwl-agn-hw.h" #include "fw/error-dump.h" #include "fw/dbg.h" +#include "fw/api/tx.h" #include "internal.h" #include "iwl-fh.h" @@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans) u16 cap; /* - * HW bug W/A for instability in PCIe bus L0S->L1 transition. - * Check if BIOS (or OS) enabled L1-ASPM on this device. - * If so (likely), disable L0S, so device moves directly L0->L1; - * costs negligible amount of power savings. - * If not (unlikely), enable L0S, so there is at least some - * power savings, even without L1. + * L0S states have been found to be unstable with our devices + * and in newer hardware they are not officially supported at + * all, so we must always set the L0S_DISABLED bit. */ + iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) - iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - else - iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); @@ -1783,6 +1779,29 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) return 0; } +static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) +{ + int ret; + + ret = iwl_finish_nic_init(trans, trans->trans_cfg); + if (ret < 0) + return ret; + + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + udelay(20); + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_PG_EN | + HPM_HIPM_GEN_CFG_CR_SLP_EN); + udelay(20); + iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + + iwl_trans_pcie_sw_reset(trans); + + return 0; +} + static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1802,6 +1821,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) iwl_trans_pcie_sw_reset(trans); + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + trans->cfg->integrated) { + err = iwl_pcie_gen2_force_power_gating(trans); + if (err) + return err; + } + err = iwl_pcie_apm_init(trans); if (err) return err; @@ -3430,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; - int ret, addr_size; + int ret, addr_size, txcmd_size, txcmd_align; + const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; + + if (!cfg_trans->gen2) { + ops = &trans_ops_pcie; + txcmd_size = sizeof(struct iwl_tx_cmd); + txcmd_align = sizeof(void *); + } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { + txcmd_size = sizeof(struct iwl_tx_cmd_gen2); + txcmd_align = 64; + } else { + txcmd_size = sizeof(struct iwl_tx_cmd_gen3); + txcmd_align = 128; + } + + txcmd_size += sizeof(struct iwl_cmd_header); + txcmd_size += 36; /* biggest possible 802.11 header */ + + /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ + if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) + return ERR_PTR(-EINVAL); ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); - if (cfg_trans->gen2) - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie_gen2); - else - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie); - + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, + txcmd_size, txcmd_align); if (!trans) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 8ca0250de99e..bfb984b2e00c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); struct iwl_tfh_tb *tb; + /* + * Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_pcie_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) return -EINVAL; tb = &tfd->tbs[idx]; @@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, return idx; } +static struct page *get_workaround_page(struct iwl_trans *trans, + struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct page **page_ptr; + struct page *ret; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + + ret = alloc_page(GFP_ATOMIC); + if (!ret) + return NULL; + + /* set the chaining pointer to the previous page if there */ + *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; + *page_ptr = ret; + + return ret; +} + +/* + * Add a TB and if needed apply the FH HW bug workaround; + * meta != NULL indicates that it's a page mapping and we + * need to dma_unmap_page() and set the meta->tbs bit in + * this case. + */ +static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + dma_addr_t phys, void *virt, + u16 len, struct iwl_cmd_meta *meta) +{ + dma_addr_t oldphys = phys; + struct page *page; + int ret; + + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + + if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) { + ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); + + if (ret < 0) + goto unmap; + + if (meta) + meta->tbs |= BIT(ret); + + ret = 0; + goto trace; + } + + /* + * Work around a hardware bug. If (as expressed in the + * condition above) the TB ends on a 32-bit boundary, + * then the next TB may be accessed with the wrong + * address. + * To work around it, copy the data elsewhere and make + * a new mapping for it so the device will not fail. + */ + + if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { + ret = -ENOBUFS; + goto unmap; + } + + page = get_workaround_page(trans, skb); + if (!page) { + ret = -ENOMEM; + goto unmap; + } + + memcpy(page_address(page), virt, len); + + phys = dma_map_single(trans->dev, page_address(page), len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); + if (ret < 0) { + /* unmap the new allocation as single */ + oldphys = phys; + meta = NULL; + goto unmap; + } + IWL_WARN(trans, + "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", + len, (unsigned long long)oldphys, (unsigned long long)phys); + + ret = 0; +unmap: + if (meta) + dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); +trace: + trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); + + return ret; +} + static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, int start_len, - u8 hdr_len, struct iwl_device_cmd *dev_cmd) + u8 hdr_len, + struct iwl_device_tx_cmd *dev_cmd) { #ifdef CONFIG_INET - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; @@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, u16 length, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; struct tso_t tso; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), @@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; /* * Pull the ieee80211 header to be able to use TSO core, @@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, dev_kfree_skb(csum_skb); goto out_err; } + /* + * No need for _with_wa, this is from the TSO page and + * we leave some space at the end of it so can't hit + * the buggy scenario. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_phys, tb_len); @@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, /* put the payload */ while (data_left) { + int ret; + tb_len = min_t(unsigned int, tso.size, data_left); tb_phys = dma_map_single(trans->dev, tso.data, tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, + tb_phys, tso.data, + tb_len, NULL); + if (ret) { dev_kfree_skb(csum_skb); goto out_err; } - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, - tb_phys, tb_len); data_left -= tb_len; tso_build_data(skb, &tso, tb_len); @@ -372,7 +487,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* @@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, @@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; - int tb_idx; + unsigned int fragsz = skb_frag_size(frag); + int ret; - if (!skb_frag_size(frag)) + if (!fragsz) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - return -ENOMEM; - tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, - skb_frag_size(frag)); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), - tb_phys, skb_frag_size(frag)); - if (tb_idx < 0) - return tb_idx; - - out_meta->tbs |= BIT(tb_idx); + fragsz, DMA_TO_DEVICE); + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb_frag_address(frag), + fragsz, out_meta); + if (ret) + return ret; } return 0; @@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, /* The first TB points to bi-directional DMA data */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* @@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { + int ret; + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, tb2_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb->data + hdr_len, tb2_len, + NULL); + if (ret) goto out_err; - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, - tb_phys, tb2_len); } if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; skb_walk_frags(skb, frag) { + int ret; + tb_phys = dma_map_single(trans->dev, frag->data, skb_headlen(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + frag->data, + skb_headlen(frag), NULL); + if (ret) goto out_err; - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag)); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data, - tb_phys, skb_headlen(frag)); if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta)) goto out_err; } @@ -538,7 +670,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { @@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, } int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_cmd_meta *out_meta; @@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index f21f16ab2a97..b0eb52b4951b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->write_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[read_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb) { struct page **page_ptr; + struct page *next; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + next = *page_ptr; + *page_ptr = NULL; - if (*page_ptr) { - __free_page(*page_ptr); - *page_ptr = NULL; + while (next) { + struct page *tmp = next; + + next = *(void **)(page_address(next) + PAGE_SIZE - + sizeof(void *)); + __free_page(tmp); } } @@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct iwl_device_cmd *dev_cmd_ptr; + struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); @@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); + struct page **page_ptr; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + + if (WARN_ON(*page_ptr)) + return NULL; if (!p->page) goto alloc; - /* enough room on this page */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) - return p; + /* + * Check if there's enough room on this page + * + * Note that we put a page chaining pointer *last* in the + * page - we need it somewhere, and if it's there then we + * avoid DMA mapping the last bits of the page which may + * trigger the 32-bit boundary hardware bug. + * + * (see also get_workaround_page() in tx-gen2.c) + */ + if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - + sizeof(void *)) + goto out; /* We don't have enough room on this page, get a new one. */ __free_page(p->page); @@ -2072,6 +2095,11 @@ alloc: if (!p->page) return NULL; p->pos = page_address(p->page); + /* set the chaining pointer to NULL */ + *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; +out: + *page_ptr = p->page; + get_page(p->page); return p; } @@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; @@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; @@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); @@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; @@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 57edfada0665..c9401c121a14 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) int hw, ap, ap_max = ie[1]; u8 hw_rate; + if (ap_max > MAX_RATES) { + lbs_deb_assoc("invalid rates\n"); + return tlv; + } /* Advance past IE header */ ie += 2; @@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, struct cmd_ds_802_11_ad_hoc_join cmd; u8 preamble = RADIO_PREAMBLE_SHORT; int ret = 0; + int hw, i; + u8 rates_max; + u8 *rates; /* TODO: set preamble based on scan result */ ret = lbs_set_radio(priv, preamble, 1); @@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, if (!rates_eid) { lbs_add_rates(cmd.bss.rates); } else { - int hw, i; - u8 rates_max = rates_eid[1]; - u8 *rates = cmd.bss.rates; + rates_max = rates_eid[1]; + if (rates_max > MAX_RATES) { + lbs_deb_join("invalid rates"); + goto out; + } + rates = cmd.bss.rates; for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { u8 hw_rate = lbs_rates[hw].bitrate / 5; for (i = 0; i < rates_max; i++) { diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c index fe14814af300..c604613ab506 100644 --- a/drivers/net/wireless/marvell/libertas/debugfs.c +++ b/drivers/net/wireless/marvell/libertas/debugfs.c @@ -774,7 +774,7 @@ void lbs_debugfs_remove_one(struct lbs_private *priv) #ifdef PROC_DEBUG -#define item_size(n) (FIELD_SIZEOF(struct lbs_private, n)) +#define item_size(n) (sizeof_field(struct lbs_private, n)) #define item_addr(n) (offsetof(struct lbs_private, n)) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 74e50566db1f..6dd835f1efc2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -229,6 +229,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, "11D: skip setting domain info in FW\n"); return 0; } + + if (country_ie_len > + (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { + mwifiex_dbg(priv->adapter, ERROR, + "11D: country_ie_len overflow!, deauth AP\n"); + return -EINVAL; + } + memcpy(priv->adapter->country_code, &country_ie[2], 2); domain_info->country_code[0] = country_ie[2]; @@ -272,8 +280,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, priv->scan_block = false; if (bss) { - if (adapter->region_code == 0x00) - mwifiex_process_country_ie(priv, bss); + if (adapter->region_code == 0x00 && + mwifiex_process_country_ie(priv, bss)) + return -EINVAL; /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 09313047beed..7caf1d26124a 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -953,59 +953,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, switch (*pos) { case WLAN_EID_SUPP_RATES: + if (pos[1] > 32) + return; sta_ptr->tdls_cap.rates_len = pos[1]; for (i = 0; i < pos[1]; i++) sta_ptr->tdls_cap.rates[i] = pos[i + 2]; break; case WLAN_EID_EXT_SUPP_RATES: + if (pos[1] > 32) + return; basic = sta_ptr->tdls_cap.rates_len; + if (pos[1] > 32 - basic) + return; for (i = 0; i < pos[1]; i++) sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2]; sta_ptr->tdls_cap.rates_len += pos[1]; break; case WLAN_EID_HT_CAPABILITY: - memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos, + if (pos > end - sizeof(struct ieee80211_ht_cap) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_ht_cap)) + return; + /* copy the ie's value into ht_capb*/ + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2, sizeof(struct ieee80211_ht_cap)); sta_ptr->is_11n_enabled = 1; break; case WLAN_EID_HT_OPERATION: - memcpy(&sta_ptr->tdls_cap.ht_oper, pos, + if (pos > end - + sizeof(struct ieee80211_ht_operation) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_ht_operation)) + return; + /* copy the ie's value into ht_oper*/ + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2, sizeof(struct ieee80211_ht_operation)); break; case WLAN_EID_BSS_COEX_2040: + if (pos > end - 3) + return; + if (pos[1] != 1) + return; sta_ptr->tdls_cap.coex_2040 = pos[2]; break; case WLAN_EID_EXT_CAPABILITY: + if (pos > end - sizeof(struct ieee_types_header)) + return; + if (pos[1] < sizeof(struct ieee_types_header)) + return; + if (pos[1] > 8) + return; memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos, sizeof(struct ieee_types_header) + min_t(u8, pos[1], 8)); break; case WLAN_EID_RSN: + if (pos > end - sizeof(struct ieee_types_header)) + return; + if (pos[1] < sizeof(struct ieee_types_header)) + return; + if (pos[1] > IEEE_MAX_IE_SIZE - + sizeof(struct ieee_types_header)) + return; memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos, sizeof(struct ieee_types_header) + min_t(u8, pos[1], IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header))); break; case WLAN_EID_QOS_CAPA: + if (pos > end - 3) + return; + if (pos[1] != 1) + return; sta_ptr->tdls_cap.qos_info = pos[2]; break; case WLAN_EID_VHT_OPERATION: - if (priv->adapter->is_hw_11ac_capable) - memcpy(&sta_ptr->tdls_cap.vhtoper, pos, + if (priv->adapter->is_hw_11ac_capable) { + if (pos > end - + sizeof(struct ieee80211_vht_operation) - 2) + return; + if (pos[1] != + sizeof(struct ieee80211_vht_operation)) + return; + /* copy the ie's value into vhtoper*/ + memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2, sizeof(struct ieee80211_vht_operation)); + } break; case WLAN_EID_VHT_CAPABILITY: if (priv->adapter->is_hw_11ac_capable) { - memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos, + if (pos > end - + sizeof(struct ieee80211_vht_cap) - 2) + return; + if (pos[1] != sizeof(struct ieee80211_vht_cap)) + return; + /* copy the ie's value into vhtcap*/ + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2, sizeof(struct ieee80211_vht_cap)); sta_ptr->is_11ac_enabled = 1; } break; case WLAN_EID_AID: - if (priv->adapter->is_hw_11ac_capable) + if (priv->adapter->is_hw_11ac_capable) { + if (pos > end - 4) + return; + if (pos[1] != 2) + return; sta_ptr->tdls_cap.aid = get_unaligned_le16((pos + 2)); + } + break; default: break; } diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h index c386992abcdb..7cafcecd7b85 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.h +++ b/drivers/net/wireless/marvell/mwifiex/util.h @@ -36,11 +36,11 @@ struct mwifiex_cb { }; /* size/addr for mwifiex_debug_info */ -#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n)) +#define item_size(n) (sizeof_field(struct mwifiex_debug_info, n)) #define item_addr(n) (offsetof(struct mwifiex_debug_info, n)) /* size/addr for struct mwifiex_adapter */ -#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n)) +#define adapter_item_size(n) (sizeof_field(struct mwifiex_adapter, n)) #define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n)) struct mwifiex_debug_data { diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 53b5a4b2dcc5..59c187898132 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -281,8 +281,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno) { struct mt76_rx_tid *tid = NULL; - rcu_swap_protected(wcid->aggr[tidno], tid, - lockdep_is_held(&dev->mutex)); + tid = rcu_replace_pointer(wcid->aggr[tidno], tid, + lockdep_is_held(&dev->mutex)); if (tid) { mt76_rx_aggr_shutdown(dev, tid); kfree_rcu(tid, rcu_head); diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c index 55116f395f9a..a4a785467748 100644 --- a/drivers/net/wireless/mediatek/mt76/airtime.c +++ b/drivers/net/wireless/mediatek/mt76/airtime.c @@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status, return 0; sband = dev->hw->wiphy->bands[status->band]; - if (!sband || status->rate_idx > sband->n_bitrates) + if (!sband || status->rate_idx >= sband->n_bitrates) return 0; rate = &sband->bitrates[status->rate_idx]; diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index b9f2a401041a..96018fd65779 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; - mt76_led_cleanup(dev); + if (IS_ENABLED(CONFIG_MT76_LEDS)) + mt76_led_cleanup(dev); mt76_tx_status_check(dev, NULL, true); ieee80211_unregister_hw(hw); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index a03e2d01fba7..d1405528b504 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -342,8 +342,11 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev) dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", version, fae); - mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); + memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + ETH_ALEN); mt76_eeprom_override(&dev->mt76); + mt76x02_mac_setaddr(dev, dev->mt76.macaddr); + mt76x0_set_chip_cap(dev); mt76x0_set_freq_offset(dev); mt76x0_set_temp_offset(dev); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 68dd7bb07ca6..f15ba3de6195 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -628,18 +628,6 @@ err: static void xenvif_disconnect_queue(struct xenvif_queue *queue) { - if (queue->tx_irq) { - unbind_from_irqhandler(queue->tx_irq, queue); - if (queue->tx_irq == queue->rx_irq) - queue->rx_irq = 0; - queue->tx_irq = 0; - } - - if (queue->rx_irq) { - unbind_from_irqhandler(queue->rx_irq, queue); - queue->rx_irq = 0; - } - if (queue->task) { kthread_stop(queue->task); queue->task = NULL; @@ -655,6 +643,18 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue) queue->napi.poll = NULL; } + if (queue->tx_irq) { + unbind_from_irqhandler(queue->tx_irq, queue); + if (queue->tx_irq == queue->rx_irq) + queue->rx_irq = 0; + queue->tx_irq = 0; + } + + if (queue->rx_irq) { + unbind_from_irqhandler(queue->rx_irq, queue); + queue->rx_irq = 0; + } + xenvif_unmap_frontend_data_rings(queue); } diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 4d1909aecd6c..9f60e4dc5a90 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -278,7 +278,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client, r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios); if (r) - return r; + dev_dbg(dev, "Unable to add GPIO mapping table\n"); phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_en)) { diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index 4590fbf82dc2..f5bb7ace2ff5 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -391,7 +391,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) cmd, sizeof(cmd), false); rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), - &transferred, 0); + &transferred, 5000); kfree(buffer); if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index be110d9cef02..de613c623a2c 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -507,7 +507,10 @@ int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) struct s3fwrn5_info *info = nci_get_drvdata(ndev); struct s3fwrn5_fw_info *fw_info = &info->fw_info; - BUG_ON(fw_info->rsp); + if (WARN_ON(fw_info->rsp)) { + kfree_skb(skb); + return -EINVAL; + } fw_info->rsp = skb; diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index c6439638a419..b9358db83e96 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config NVME_CORE tristate + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY config BLK_DEV_NVME tristate "NVM Express block device" diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index dfe37a525f3a..5dc32b72e7fa 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status) case NVME_SC_CAP_EXCEEDED: return BLK_STS_NOSPC; case NVME_SC_LBA_RANGE: + case NVME_SC_CMD_INTERRUPTED: + case NVME_SC_NS_NOT_READY: return BLK_STS_TARGET; case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_ONCS_NOT_SUPPORTED: @@ -1735,6 +1737,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, if (ret) dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", ret); + if (ret > 0) + ret = 0; } return ret; } @@ -2852,6 +2856,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) * admin connect */ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { + dev_err(ctrl->device, + "Mismatching cntlid: Connect %u vs Identify " + "%u, rejecting\n", + ctrl->cntlid, le16_to_cpu(id->cntlid)); ret = -EINVAL; goto out_free; } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 679a721ae229..5a70ac395d53 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -95,7 +95,7 @@ struct nvme_fc_fcp_op { struct nvme_fcp_op_w_sgl { struct nvme_fc_fcp_op op; - struct scatterlist sgl[SG_CHUNK_SIZE]; + struct scatterlist sgl[NVME_INLINE_SG_CNT]; uint8_t priv[0]; }; @@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || - !template->max_dif_sgl_segments || !template->dma_boundary) { + !template->max_dif_sgl_segments || !template->dma_boundary || + !template->module) { ret = -EINVAL; goto out_reghost_failed; } @@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); + struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { @@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); + module_put(lport->ops->module); } static void @@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, freq->sg_table.sgl = freq->first_sgl; ret = sg_alloc_table_chained(&freq->sg_table, blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, - SG_CHUNK_SIZE); + NVME_INLINE_SG_CNT); if (ret) return -ENOMEM; @@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); if (unlikely(freq->sg_cnt <= 0)) { - sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); + sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; return -EFAULT; } @@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); - sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); + sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; } @@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) static void __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) { - nvme_stop_keep_alive(&ctrl->ctrl); + /* + * if state is connecting - the error occurred as part of a + * reconnect attempt. The create_association error paths will + * clean up any outstanding io. + * + * if it's a different state - ensure all pending io is + * terminated. Given this can delay while waiting for the + * aborted io to return, we recheck adapter state below + * before changing state. + */ + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { + nvme_stop_keep_alive(&ctrl->ctrl); - /* will block will waiting for io to terminate */ - nvme_fc_delete_association(ctrl); + /* will block will waiting for io to terminate */ + nvme_fc_delete_association(ctrl); + } if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) @@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } + if (!try_module_get(lport->ops->module)) { + ret = -EUNATCH; + goto out_free_ctrl; + } + idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; - goto out_free_ctrl; + goto out_mod_put; } ctrl->ctrl.opts = opts; @@ -3215,6 +3235,8 @@ out_free_queues: out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); +out_mod_put: + module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 3b9cbe0668fa..1024fec7914c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -28,6 +28,12 @@ extern unsigned int admin_timeout; #define NVME_DEFAULT_KATO 5 #define NVME_KATO_GRACE 10 +#ifdef CONFIG_ARCH_NO_SG_CHAIN +#define NVME_INLINE_SG_CNT 0 +#else +#define NVME_INLINE_SG_CNT 2 +#endif + extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_reset_wq; extern struct workqueue_struct *nvme_delete_wq; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index dcaad5831cee..365a2ddbeaa7 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -68,14 +68,14 @@ static int io_queue_depth = 1024; module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); -static int write_queues; -module_param(write_queues, int, 0644); +static unsigned int write_queues; +module_param(write_queues, uint, 0644); MODULE_PARM_DESC(write_queues, "Number of queues to use for writes. If not set, reads and writes " "will share a queue set."); -static int poll_queues; -module_param(poll_queues, int, 0644); +static unsigned int poll_queues; +module_param(poll_queues, uint, 0644); MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); struct nvme_dev; @@ -176,7 +176,6 @@ struct nvme_queue { u16 sq_tail; u16 last_sq_tail; u16 cq_head; - u16 last_cq_head; u16 qid; u8 cq_phase; u8 sqes; @@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data) * the irq handler, even if that was on another CPU. */ rmb(); - if (nvmeq->cq_head != nvmeq->last_cq_head) - ret = IRQ_HANDLED; nvme_process_cq(nvmeq, &start, &end, -1); - nvmeq->last_cq_head = nvmeq->cq_head; wmb(); if (start != end) { @@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) return result; - else if (result) + if (result) goto release_cq; nvmeq->cq_vector = vector; @@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) .priv = dev, }; unsigned int irq_queues, this_p_queues; - unsigned int nr_cpus = num_possible_cpus(); /* * Poll queues don't need interrupts, but we need at least one IO @@ -2069,10 +2064,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) this_p_queues = nr_io_queues - 1; irq_queues = 1; } else { - if (nr_cpus < nr_io_queues - this_p_queues) - irq_queues = nr_cpus + 1; - else - irq_queues = nr_io_queues - this_p_queues + 1; + irq_queues = nr_io_queues - this_p_queues + 1; } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; @@ -3142,6 +3134,9 @@ static int __init nvme_init(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); + + write_queues = min(write_queues, num_possible_cpus()); + poll_queues = min(poll_queues, num_possible_cpus()); return pci_register_driver(&nvme_driver); } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index dce59459ed41..2a47c6c5007e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, set->reserved_tags = 2; /* connect + keep-alive */ set->numa_node = nctrl->numa_node; set->cmd_size = sizeof(struct nvme_rdma_request) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); set->driver_data = ctrl; set->nr_hw_queues = 1; set->timeout = ADMIN_TIMEOUT; @@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE; set->cmd_size = sizeof(struct nvme_rdma_request) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); set->driver_data = ctrl; set->nr_hw_queues = nctrl->queue_count - 1; set->timeout = NVME_IO_TIMEOUT; @@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, } ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); - sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); + sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT); } static int nvme_rdma_set_sg_null(struct nvme_command *c) @@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, req->sg_table.sgl = req->first_sgl; ret = sg_alloc_table_chained(&req->sg_table, blk_rq_nr_phys_segments(rq), req->sg_table.sgl, - SG_CHUNK_SIZE); + NVME_INLINE_SG_CNT); if (ret) return -ENOMEM; @@ -1314,7 +1314,7 @@ out: out_unmap_sg: ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); out_free_table: - sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); + sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT); return ret; } diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 56c21b501185..72a7e41f3018 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) +{ + switch (cdw10 & 0xff) { + case NVME_FEAT_HOST_ID: + return sizeof(req->sq->ctrl->hostid); + default: + return 0; + } +} + u64 nvmet_get_log_page_offset(struct nvme_command *cmd) { return le64_to_cpu(cmd->get_log_page.lpo); @@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req) u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u16 status = 0; - if (!nvmet_check_data_len(req, 0)) + if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10))) return; switch (cdw10 & 0xff) { diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index b50b53db3746..1c50af6219f3 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { + .module = THIS_MODULE, .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index a758bb3d5dd4..4df4ebde208a 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -76,7 +76,7 @@ static void nvme_loop_complete_rq(struct request *req) { struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); - sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE); + sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT); nvme_complete_rq(req); } @@ -156,7 +156,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, iod->sg_table.sgl = iod->first_sgl; if (sg_alloc_table_chained(&iod->sg_table, blk_rq_nr_phys_segments(req), - iod->sg_table.sgl, SG_CHUNK_SIZE)) { + iod->sg_table.sgl, NVME_INLINE_SG_CNT)) { nvme_cleanup_cmd(req); return BLK_STS_RESOURCE; } @@ -342,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ctrl->admin_tag_set.driver_data = ctrl; ctrl->admin_tag_set.nr_hw_queues = 1; ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; @@ -516,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ctrl->tag_set.numa_node = NUMA_NO_NODE; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ctrl->tag_set.driver_data = ctrl; ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; ctrl->tag_set.timeout = NVME_IO_TIMEOUT; diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index c6b87ce2b0cc..fc757ef6eadc 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -162,7 +162,7 @@ static const struct of_device_id whitelist_phys[] = { * A device which is not a phy is expected to have a compatible string * indicating what sort of device it is. */ -static bool of_mdiobus_child_is_phy(struct device_node *child) +bool of_mdiobus_child_is_phy(struct device_node *child) { u32 phy_id; @@ -187,6 +187,7 @@ static bool of_mdiobus_child_is_phy(struct device_node *child) return false; } +EXPORT_SYMBOL(of_mdiobus_child_is_phy); /** * of_mdiobus_register - Register mii_bus and create PHYs from the device tree diff --git a/drivers/of/platform.c b/drivers/of/platform.c index d93891a05f60..3371e4a06248 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -518,10 +518,11 @@ static int __init of_platform_default_populate_init(void) { struct device_node *node; + device_links_supplier_sync_state_pause(); + if (!of_have_populated_dt()) return -ENODEV; - device_links_supplier_sync_state_pause(); /* * Handle certain compatibles explicitly, since we don't want to create * platform_devices for every node in /reserved-memory with a @@ -545,8 +546,7 @@ arch_initcall_sync(of_platform_default_populate_init); static int __init of_platform_sync_state_init(void) { - if (of_have_populated_dt()) - device_links_supplier_sync_state_resume(); + device_links_supplier_sync_state_resume(); return 0; } late_initcall_sync(of_platform_sync_state_init); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index be7a7d332332..ba43e6a3dc0a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -988,7 +988,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); INIT_LIST_HEAD(&opp_table->opp_list); kref_init(&opp_table->kref); - kref_init(&opp_table->list_kref); /* Secure the device table modification */ list_add(&opp_table->node, &opp_tables); @@ -1072,33 +1071,6 @@ static void _opp_table_kref_release(struct kref *kref) mutex_unlock(&opp_table_lock); } -void _opp_remove_all_static(struct opp_table *opp_table) -{ - struct dev_pm_opp *opp, *tmp; - - list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { - if (!opp->dynamic) - dev_pm_opp_put(opp); - } - - opp_table->parsed_static_opps = false; -} - -static void _opp_table_list_kref_release(struct kref *kref) -{ - struct opp_table *opp_table = container_of(kref, struct opp_table, - list_kref); - - _opp_remove_all_static(opp_table); - mutex_unlock(&opp_table_lock); -} - -void _put_opp_list_kref(struct opp_table *opp_table) -{ - kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release, - &opp_table_lock); -} - void dev_pm_opp_put_opp_table(struct opp_table *opp_table) { kref_put_mutex(&opp_table->kref, _opp_table_kref_release, @@ -1202,6 +1174,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +void _opp_remove_all_static(struct opp_table *opp_table) +{ + struct dev_pm_opp *opp, *tmp; + + mutex_lock(&opp_table->lock); + + if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps) + goto unlock; + + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { + if (!opp->dynamic) + dev_pm_opp_put_unlocked(opp); + } + +unlock: + mutex_unlock(&opp_table->lock); +} + /** * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs * @dev: device for which we do this operation @@ -2276,7 +2266,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev) return; } - _put_opp_list_kref(opp_table); + _opp_remove_all_static(opp_table); /* Drop reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1cbb58240b80..9cd8f0adacae 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp; /* OPP table is already initialized for the device */ + mutex_lock(&opp_table->lock); if (opp_table->parsed_static_opps) { - kref_get(&opp_table->list_kref); + opp_table->parsed_static_opps++; + mutex_unlock(&opp_table->lock); return 0; } - /* - * Re-initialize list_kref every time we add static OPPs to the OPP - * table as the reference count may be 0 after the last tie static OPPs - * were removed. - */ - kref_init(&opp_table->list_kref); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -678,15 +676,17 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); of_node_put(np); - return ret; + goto remove_static_opp; } else if (opp) { count++; } } /* There should be one of more OPP defined */ - if (WARN_ON(!count)) - return -ENOENT; + if (WARN_ON(!count)) { + ret = -ENOENT; + goto remove_static_opp; + } list_for_each_entry(opp, &opp_table->opp_list, node) pstate_count += !!opp->pstate; @@ -695,15 +695,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) if (pstate_count && pstate_count != count) { dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); - return -ENOENT; + ret = -ENOENT; + goto remove_static_opp; } if (pstate_count) opp_table->genpd_performance_state = true; - opp_table->parsed_static_opps = true; - return 0; + +remove_static_opp: + _opp_remove_all_static(opp_table); + + return ret; } /* Initializes OPP tables based on old-deprecated bindings */ @@ -738,6 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", __func__, freq, ret); + _opp_remove_all_static(opp_table); return ret; } nr -= 2; diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 01a500e2c40a..d14e27102730 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -127,11 +127,10 @@ enum opp_table_access { * @dev_list: list of devices that share these OPPs * @opp_list: table of opps * @kref: for reference count of the table. - * @list_kref: for reference count of the OPP list. * @lock: mutex protecting the opp_list and dev_list. * @np: struct device_node pointer for opp's DT node. * @clock_latency_ns_max: Max clock latency in nanoseconds. - * @parsed_static_opps: True if OPPs are initialized from DT. + * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. * @suspend_opp: Pointer to OPP to be used during device suspend. * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. @@ -167,7 +166,6 @@ struct opp_table { struct list_head dev_list; struct list_head opp_list; struct kref kref; - struct kref list_kref; struct mutex lock; struct device_node *np; @@ -176,7 +174,7 @@ struct opp_table { /* For backward compatibility with v1 bindings */ unsigned int voltage_tolerance_v1; - bool parsed_static_opps; + unsigned int parsed_static_opps; enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp; diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 1c69c404df11..e3357e91decb 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -90,7 +90,7 @@ static int _store_optimized_voltages(struct device *dev, goto out_map; } - base = ioremap_nocache(res->start, resource_size(res)); + base = ioremap(res->start, resource_size(res)); if (!base) { dev_err(dev, "Unable to map Efuse registers\n"); ret = -ENOMEM; diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index ad290f79983b..a5507f75b524 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1534,7 +1534,7 @@ static int __init ccio_probe(struct parisc_device *dev) *ioc_p = ioc; ioc->hw_path = dev->hw_path; - ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096); + ioc->ioc_regs = ioremap(dev->hpa.start, 4096); if (!ioc->ioc_regs) { kfree(ioc); return -ENOMEM; diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 2f1cac89ddf5..889d7ce282eb 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -974,7 +974,7 @@ static int __init dino_probe(struct parisc_device *dev) } dino_dev->hba.dev = dev; - dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); + dino_dev->hba.base_addr = ioremap(hpa, 4096); dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND; spin_lock_init(&dino_dev->dinosaur_pen); dino_dev->hba.iommu = ccio_get_iommu(dev); diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 37a2c5db761d..9d00a24277aa 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -354,10 +354,10 @@ static int __init eisa_probe(struct parisc_device *dev) eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR; } } - eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); + eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); if (!eisa_eeprom_addr) { result = -ENOMEM; - printk(KERN_ERR "EISA: ioremap_nocache failed!\n"); + printk(KERN_ERR "EISA: ioremap failed!\n"); goto error_free_irq; } result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 32f506f00c89..8a3b0c3a1e92 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -927,7 +927,7 @@ void *iosapic_register(unsigned long hpa) return NULL; } - isi->addr = ioremap_nocache(hpa, 4096); + isi->addr = ioremap(hpa, 4096); isi->isi_hpa = hpa; isi->isi_version = iosapic_rd_version(isi); isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1; diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index a99e385c68bd..732b516c7bf8 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -1134,7 +1134,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) ** Postable I/O port space is per PCI host adapter. ** base of 64MB PIOP region */ - lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024); + lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024); sprintf(lba_dev->hba.io_name, "PCI%02x Ports", (int)lba_dev->hba.bus_num.start); @@ -1476,7 +1476,7 @@ lba_driver_probe(struct parisc_device *dev) u32 func_class; void *tmp_obj; char *version; - void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096); + void __iomem *addr = ioremap(dev->hpa.start, 4096); int max; /* Read HW Rev First */ @@ -1575,7 +1575,7 @@ lba_driver_probe(struct parisc_device *dev) } else { if (!astro_iop_base) { /* Sprockets PDC uses NPIOP region */ - astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024); + astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024); pci_port = &lba_astro_port_ops; } @@ -1693,7 +1693,7 @@ void __init lba_init(void) */ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) { - void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096); + void __iomem * base_addr = ioremap(lba->hpa.start, 4096); imask <<= 2; /* adjust for hints - 2 more bits */ diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index de8e4e347249..7e112829d250 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1513,7 +1513,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset) { - return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); + return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); } static void sba_hw_init(struct sba_device *sba_dev) @@ -1883,7 +1883,7 @@ static int __init sba_driver_callback(struct parisc_device *dev) u32 func_class; int i; char *version; - void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); + void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE); #ifdef CONFIG_PROC_FS struct proc_dir_entry *root; #endif diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index b20651cea09f..9bf7fa99b103 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -719,7 +719,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); - base = devm_ioremap_nocache(dev, res->start, resource_size(res)); + base = devm_ioremap(dev, res->start, resource_size(res)); if (!base) return -ENOMEM; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 3dd2e2697294..cfeccd7e9fff 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -434,7 +434,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; - msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr, + msix_tbl = ioremap(ep->phys_base + tbl_addr, PCI_MSIX_ENTRY_SIZE); if (!msix_tbl) return -EINVAL; diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index d9b63bfa5dd7..94af6f5828a3 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -834,10 +834,12 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) if (!entry) return -ENODEV; + /* store the register number offset to program RC io outbound ATU */ + offset = size >> 20; + size = resource_size(entry->res); pci_addr = entry->res->start - entry->offset; - offset = size >> 20; for (reg_no = 0; reg_no < (size >> 20); reg_no++) { err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index c7709e49f0e4..6b43a5455c7a 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -688,7 +688,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) table_offset &= PCI_MSIX_TABLE_OFFSET; phys_addr = pci_resource_start(dev, bir) + table_offset; - return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); + return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); } static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e87196cc1a7f..df21e3227b57 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -184,7 +184,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res); return NULL; } - return ioremap_nocache(res->start, resource_size(res)); + return ioremap(res->start, resource_size(res)); } EXPORT_SYMBOL_GPL(pci_ioremap_bar); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 4937a088d7d8..a3a1a0ea64f4 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1571,7 +1571,7 @@ static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev) pci_read_config_dword(dev, 0xF0, &rcba); /* use bits 31:14, 16 kB aligned */ - asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); + asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000); if (asus_rcba_base == NULL) return; } @@ -4784,7 +4784,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev) if (!(rcba & INTEL_LPC_RCBA_ENABLE)) return -EINVAL; - rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK, + rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK, PAGE_ALIGN(INTEL_UPDCR_REG)); if (!rcba_mem) return -ENOMEM; @@ -5074,18 +5074,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS /* - * Some devices have a broken ATS implementation causing IOMMU stalls. - * Don't use ATS for those devices. + * Some devices require additional driver setup to enable ATS. Don't use + * ATS for those devices as ATS will be enabled before the driver has had a + * chance to load and configure the device. */ -static void quirk_no_ats(struct pci_dev *pdev) +static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - pci_info(pdev, "disabling ATS (broken on this device)\n"); + if (pdev->device == 0x7340 && pdev->revision != 0xc5) + return; + + pci_info(pdev, "disabling ATS\n"); pdev->ats_cap = 0; } /* AMD Stoney platform GPU */ -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); +/* AMD Iceland dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi14 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ /* Freescale PCIe doesn't support MSI in RC mode */ diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 773128f411f1..d704eccc548f 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -814,7 +814,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) if (err) { dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", err, &res_0->start); - goto out_cpuhp_err; + return err; } err = perf_pmu_register(&smmu_pmu->pmu, name, -1); @@ -833,8 +833,6 @@ static int smmu_pmu_probe(struct platform_device *pdev) out_unregister: cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); -out_cpuhp_err: - put_cpu(); return err; } diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 55083c67b2bb..95dca2cb5265 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); - goto ddr_perf_err; + goto cpuhp_state_err; } pmu->cpuhp_state = ret; /* Register the pmu instance for cpu hotplug */ - cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + goto cpuhp_instance_err; + } /* Request irq */ irq = of_irq_get(np, 0); @@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev) return 0; ddr_perf_err: - if (pmu->cpuhp_state) - cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); - + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(pmu->cpuhp_state); +cpuhp_state_err: ida_simple_remove(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; @@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev) struct ddr_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); + cpuhp_remove_multi_state(pmu->cpuhp_state); irq_set_affinity_hint(pmu->irq, NULL); perf_pmu_unregister(&pmu->pmu); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 96183e31b96a..584de8f807cc 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu) hisi_pmu->ops->stop_counters(hisi_pmu); } + /* - * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1. - * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu - * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID - * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID - * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID - * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1]. + * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be + * determined from the MPIDR_EL1, but the encoding varies by CPU: + * + * - For MT variants of TSV110: + * SCCL is Aff2[7:3], CCL is Aff2[2:0] + * + * - For other MT parts: + * SCCL is Aff3[7:0], CCL is Aff2[7:0] + * + * - For non-MT parts: + * SCCL is Aff2[7:0], CCL is Aff1[7:0] */ -static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id) +static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) { u64 mpidr = read_cpuid_mpidr(); - - if (mpidr & MPIDR_MT_BITMASK) { - if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) { - int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (sccl_id) - *sccl_id = aff2 >> 3; - if (ccl_id) - *ccl_id = aff2 & 0x7; - } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - } + int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + bool mt = mpidr & MPIDR_MT_BITMASK; + int sccl, ccl; + + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + } else if (mt) { + sccl = aff3; + ccl = aff2; } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + sccl = aff2; + ccl = aff1; } + + if (scclp) + *scclp = sccl; + if (cclp) + *cclp = ccl; } /* diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index ead06c6c2601..12e71a315a2c 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -115,7 +115,7 @@ struct cpcap_usb_ints_state { enum cpcap_gpio_mode { CPCAP_DM_DP, CPCAP_MDM_RX_TX, - CPCAP_UNKNOWN, + CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */ CPCAP_OTG_DM_DP, }; @@ -134,6 +134,8 @@ struct cpcap_phy_ddata { struct iio_channel *id; struct regulator *vusb; atomic_t active; + unsigned int vbus_provider:1; + unsigned int docked:1; }; static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata) @@ -207,6 +209,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata, static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata); static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata); +static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata, + enum musb_vbus_id_status status) +{ + int error; + + error = musb_mailbox(status); + if (!error) + return; + + dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n", + __func__, error); +} + static void cpcap_usb_detect(struct work_struct *work) { struct cpcap_phy_ddata *ddata; @@ -220,16 +235,66 @@ static void cpcap_usb_detect(struct work_struct *work) if (error) return; - if (s.id_ground) { - dev_dbg(ddata->dev, "id ground, USB host mode\n"); + vbus = cpcap_usb_vbus_valid(ddata); + + /* We need to kick the VBUS as USB A-host */ + if (s.id_ground && ddata->vbus_provider) { + dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n"); + + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, + CPCAP_BIT_VBUSSTBY_EN | + CPCAP_BIT_VBUSEN_SPI, + CPCAP_BIT_VBUSEN_SPI); + if (error) + goto out_err; + + return; + } + + if (vbus && s.id_ground && ddata->docked) { + dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n"); + + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + return; + } + + /* No VBUS needed with docks */ + if (vbus && s.id_ground && !ddata->vbus_provider) { + dev_dbg(ddata->dev, "connected to a dock\n"); + + ddata->docked = true; + error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + + /* + * Force check state again after musb has reoriented, + * otherwise devices won't enumerate after loading PHY + * driver. + */ + schedule_delayed_work(&ddata->detect_work, + msecs_to_jiffies(1000)); + + return; + } + + if (s.id_ground && !ddata->docked) { + dev_dbg(ddata->dev, "id ground, USB host mode\n"); + + ddata->vbus_provider = true; + + error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND); + error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, CPCAP_BIT_VBUSSTBY_EN | CPCAP_BIT_VBUSEN_SPI, @@ -248,43 +313,26 @@ static void cpcap_usb_detect(struct work_struct *work) vbus = cpcap_usb_vbus_valid(ddata); + /* Otherwise assume we're connected to a USB host */ if (vbus) { - /* Are we connected to a docking station with vbus? */ - if (s.id_ground) { - dev_dbg(ddata->dev, "connected to a dock\n"); - - /* No VBUS needed with docks */ - error = cpcap_usb_set_usb_mode(ddata); - if (error) - goto out_err; - error = musb_mailbox(MUSB_ID_GROUND); - if (error) - goto out_err; - - return; - } - - /* Otherwise assume we're connected to a USB host */ dev_dbg(ddata->dev, "connected to USB host\n"); error = cpcap_usb_set_usb_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_VALID); - if (error) - goto out_err; + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID); return; } + ddata->vbus_provider = false; + ddata->docked = false; + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); + /* Default to debug UART mode */ error = cpcap_usb_set_uart_mode(ddata); if (error) goto out_err; - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - goto out_err; - dev_dbg(ddata->dev, "set UART mode\n"); return; @@ -376,7 +424,8 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) goto out_err; @@ -403,6 +452,11 @@ static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable UART mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -415,7 +469,8 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) { int error; - error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + /* Disable lines to prevent glitches from waking up mdm6600 */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED); if (error) return error; @@ -434,12 +489,6 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; - error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2, - CPCAP_BIT_USBXCVREN, - CPCAP_BIT_USBXCVREN); - if (error) - goto out_err; - error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, CPCAP_BIT_PU_SPI | CPCAP_BIT_DMPD_SPI | @@ -455,6 +504,11 @@ static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata) if (error) goto out_err; + /* Enable USB mode */ + error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP); + if (error) + goto out_err; + return 0; out_err: @@ -649,9 +703,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev) if (error) dev_err(ddata->dev, "could not set UART mode\n"); - error = musb_mailbox(MUSB_VBUS_OFF); - if (error) - dev_err(ddata->dev, "could not set mailbox\n"); + cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF); usb_remove_phy(&ddata->phy); cancel_delayed_work_sync(&ddata->detect_work); diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index ee184d5607bd..f20524f0c21d 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work) struct phy_mdm6600 *ddata; struct device *dev; DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES); - int error, i, val = 0; + int error; ddata = container_of(work, struct phy_mdm6600, status_work.work); dev = ddata->dev; @@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work) if (error) return; - for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) { - val |= test_bit(i, values) << i; - dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", - __func__, i, test_bit(i, values), val); - } - ddata->status = values[0]; + ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1); dev_info(dev, "modem status: %i %s\n", ddata->status, - phy_mdm6600_status_name[ddata->status & 7]); + phy_mdm6600_status_name[ddata->status]); complete(&ddata->ack); } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 091e20303a14..66f91726b8b2 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -66,7 +66,7 @@ /* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */ #define CLAMP_EN BIT(0) /* enables i/o clamp_n */ -#define PHY_INIT_COMPLETE_TIMEOUT 1000 +#define PHY_INIT_COMPLETE_TIMEOUT 10000 #define POWER_DOWN_DELAY_US_MIN 10 #define POWER_DOWN_DELAY_US_MAX 11 diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c index 2b97fb1185a0..9ca20c947283 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c @@ -603,6 +603,8 @@ static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate && !cfg->fracdiv) break; @@ -755,6 +757,8 @@ static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw, { const struct pre_pll_config *cfg = pre_pll_cfg_table; + rate = (rate / 1000) * 1000; + for (; cfg->pixclock != 0; cfg++) if (cfg->pixclock == rate) break; diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 3bfbf2ff6e2b..df0ef69dd474 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -422,6 +422,7 @@ config PINCTRL_TB10X config PINCTRL_EQUILIBRIUM tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC" + depends on OF && HAS_IOMEM select PINMUX select PINCONF select GPIOLIB diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index c6800d220920..bb07024d22ed 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -1088,60 +1088,52 @@ SSSF_PIN_DECL(AF15, GPIOV7, LPCSMI, SIG_DESC_SET(SCU434, 15)); #define AB7 176 SIG_EXPR_LIST_DECL_SESG(AB7, LAD0, LPC, SIG_DESC_SET(SCU434, 16), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AB7, ESPID0, ESPI, SIG_DESC_SET(SCU434, 16)); PIN_DECL_2(AB7, GPIOW0, LAD0, ESPID0); #define AB8 177 SIG_EXPR_LIST_DECL_SESG(AB8, LAD1, LPC, SIG_DESC_SET(SCU434, 17), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AB8, ESPID1, ESPI, SIG_DESC_SET(SCU434, 17)); PIN_DECL_2(AB8, GPIOW1, LAD1, ESPID1); #define AC8 178 SIG_EXPR_LIST_DECL_SESG(AC8, LAD2, LPC, SIG_DESC_SET(SCU434, 18), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AC8, ESPID2, ESPI, SIG_DESC_SET(SCU434, 18)); PIN_DECL_2(AC8, GPIOW2, LAD2, ESPID2); #define AC7 179 SIG_EXPR_LIST_DECL_SESG(AC7, LAD3, LPC, SIG_DESC_SET(SCU434, 19), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AC7, ESPID3, ESPI, SIG_DESC_SET(SCU434, 19)); PIN_DECL_2(AC7, GPIOW3, LAD3, ESPID3); #define AE7 180 SIG_EXPR_LIST_DECL_SESG(AE7, LCLK, LPC, SIG_DESC_SET(SCU434, 20), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AE7, ESPICK, ESPI, SIG_DESC_SET(SCU434, 20)); PIN_DECL_2(AE7, GPIOW4, LCLK, ESPICK); #define AF7 181 SIG_EXPR_LIST_DECL_SESG(AF7, LFRAME, LPC, SIG_DESC_SET(SCU434, 21), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AF7, ESPICS, ESPI, SIG_DESC_SET(SCU434, 21)); PIN_DECL_2(AF7, GPIOW5, LFRAME, ESPICS); #define AD7 182 SIG_EXPR_LIST_DECL_SESG(AD7, LSIRQ, LSIRQ, SIG_DESC_SET(SCU434, 22), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AD7, ESPIALT, ESPIALT, SIG_DESC_SET(SCU434, 22)); PIN_DECL_2(AD7, GPIOW6, LSIRQ, ESPIALT); FUNC_GROUP_DECL(LSIRQ, AD7); FUNC_GROUP_DECL(ESPIALT, AD7); #define AD8 183 SIG_EXPR_LIST_DECL_SESG(AD8, LPCRST, LPC, SIG_DESC_SET(SCU434, 23), - SIG_DESC_CLEAR(SCU510, 6)); -SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23), SIG_DESC_SET(SCU510, 6)); +SIG_EXPR_LIST_DECL_SESG(AD8, ESPIRST, ESPI, SIG_DESC_SET(SCU434, 23)); PIN_DECL_2(AD8, GPIOW7, LPCRST, ESPIRST); FUNC_GROUP_DECL(LPC, AB7, AB8, AC8, AC7, AE7, AF7, AD8); diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c index 32f268f173d1..57044ab376d3 100644 --- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c @@ -1049,7 +1049,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -EINVAL; - pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, + pinctrl->base1 = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!pinctrl->base1) { dev_err(&pdev->dev, "unable to map I/O space\n"); diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c index 3756fc9d5826..f1d60a708815 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c @@ -578,7 +578,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -EINVAL; - pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, + pinctrl->base1 = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!pinctrl->base1) { dev_err(&pdev->dev, "unable to map I/O space\n"); diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig index f1806fd781a0..530426a74f75 100644 --- a/drivers/pinctrl/cirrus/Kconfig +++ b/drivers/pinctrl/cirrus/Kconfig @@ -2,6 +2,7 @@ config PINCTRL_LOCHNAGAR tristate "Cirrus Logic Lochnagar pinctrl driver" depends on MFD_LOCHNAGAR + select GPIOLIB select PINMUX select PINCONF select GENERIC_PINCONF diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2bbd8ee93507..46600d9380ea 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1535,15 +1535,8 @@ int pinctrl_init_done(struct device *dev) return ret; } -#ifdef CONFIG_PM - -/** - * pinctrl_pm_select_state() - select pinctrl state for PM - * @dev: device to select default state for - * @state: state to set - */ -static int pinctrl_pm_select_state(struct device *dev, - struct pinctrl_state *state) +static int pinctrl_select_bound_state(struct device *dev, + struct pinctrl_state *state) { struct dev_pin_info *pins = dev->pins; int ret; @@ -1558,15 +1551,27 @@ static int pinctrl_pm_select_state(struct device *dev, } /** - * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * pinctrl_select_default_state() - select default pinctrl state * @dev: device to select default state for */ -int pinctrl_pm_select_default_state(struct device *dev) +int pinctrl_select_default_state(struct device *dev) { if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->default_state); + return pinctrl_select_bound_state(dev, dev->pins->default_state); +} +EXPORT_SYMBOL_GPL(pinctrl_select_default_state); + +#ifdef CONFIG_PM + +/** + * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * @dev: device to select default state for + */ +int pinctrl_pm_select_default_state(struct device *dev) +{ + return pinctrl_select_default_state(dev); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state); @@ -1579,7 +1584,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->sleep_state); + return pinctrl_select_bound_state(dev, dev->pins->sleep_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state); @@ -1592,7 +1597,7 @@ int pinctrl_pm_select_idle_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->idle_state); + return pinctrl_select_bound_state(dev, dev->pins->idle_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state); #endif diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 7e29e3fecdb2..c00d0022d311 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -611,7 +611,7 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, if (!res) return -ENOENT; - ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start, + ipctl->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ipctl->base) return -ENOMEM; diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 9ffb22211d2b..55141d5de29e 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -110,7 +110,6 @@ struct byt_gpio { struct platform_device *pdev; struct pinctrl_dev *pctl_dev; struct pinctrl_desc pctl_desc; - raw_spinlock_t lock; const struct intel_pinctrl_soc_data *soc_data; struct intel_community *communities_copy; struct byt_gpio_pin_context *saved_context; @@ -494,34 +493,34 @@ static const struct intel_pinctrl_soc_data byt_sus_soc_data = { }; static const struct pinctrl_pin_desc byt_ncore_pins[] = { - PINCTRL_PIN(0, "GPIO_NCORE0"), - PINCTRL_PIN(1, "GPIO_NCORE1"), - PINCTRL_PIN(2, "GPIO_NCORE2"), - PINCTRL_PIN(3, "GPIO_NCORE3"), - PINCTRL_PIN(4, "GPIO_NCORE4"), - PINCTRL_PIN(5, "GPIO_NCORE5"), - PINCTRL_PIN(6, "GPIO_NCORE6"), - PINCTRL_PIN(7, "GPIO_NCORE7"), - PINCTRL_PIN(8, "GPIO_NCORE8"), - PINCTRL_PIN(9, "GPIO_NCORE9"), - PINCTRL_PIN(10, "GPIO_NCORE10"), - PINCTRL_PIN(11, "GPIO_NCORE11"), - PINCTRL_PIN(12, "GPIO_NCORE12"), - PINCTRL_PIN(13, "GPIO_NCORE13"), - PINCTRL_PIN(14, "GPIO_NCORE14"), - PINCTRL_PIN(15, "GPIO_NCORE15"), - PINCTRL_PIN(16, "GPIO_NCORE16"), - PINCTRL_PIN(17, "GPIO_NCORE17"), - PINCTRL_PIN(18, "GPIO_NCORE18"), - PINCTRL_PIN(19, "GPIO_NCORE19"), - PINCTRL_PIN(20, "GPIO_NCORE20"), - PINCTRL_PIN(21, "GPIO_NCORE21"), - PINCTRL_PIN(22, "GPIO_NCORE22"), - PINCTRL_PIN(23, "GPIO_NCORE23"), - PINCTRL_PIN(24, "GPIO_NCORE24"), - PINCTRL_PIN(25, "GPIO_NCORE25"), - PINCTRL_PIN(26, "GPIO_NCORE26"), - PINCTRL_PIN(27, "GPIO_NCORE27"), + PINCTRL_PIN(0, "HV_DDI0_HPD"), + PINCTRL_PIN(1, "HV_DDI0_DDC_SDA"), + PINCTRL_PIN(2, "HV_DDI0_DDC_SCL"), + PINCTRL_PIN(3, "PANEL0_VDDEN"), + PINCTRL_PIN(4, "PANEL0_BKLTEN"), + PINCTRL_PIN(5, "PANEL0_BKLTCTL"), + PINCTRL_PIN(6, "HV_DDI1_HPD"), + PINCTRL_PIN(7, "HV_DDI1_DDC_SDA"), + PINCTRL_PIN(8, "HV_DDI1_DDC_SCL"), + PINCTRL_PIN(9, "PANEL1_VDDEN"), + PINCTRL_PIN(10, "PANEL1_BKLTEN"), + PINCTRL_PIN(11, "PANEL1_BKLTCTL"), + PINCTRL_PIN(12, "GP_INTD_DSI_TE1"), + PINCTRL_PIN(13, "HV_DDI2_DDC_SDA"), + PINCTRL_PIN(14, "HV_DDI2_DDC_SCL"), + PINCTRL_PIN(15, "GP_CAMERASB00"), + PINCTRL_PIN(16, "GP_CAMERASB01"), + PINCTRL_PIN(17, "GP_CAMERASB02"), + PINCTRL_PIN(18, "GP_CAMERASB03"), + PINCTRL_PIN(19, "GP_CAMERASB04"), + PINCTRL_PIN(20, "GP_CAMERASB05"), + PINCTRL_PIN(21, "GP_CAMERASB06"), + PINCTRL_PIN(22, "GP_CAMERASB07"), + PINCTRL_PIN(23, "GP_CAMERASB08"), + PINCTRL_PIN(24, "GP_CAMERASB09"), + PINCTRL_PIN(25, "GP_CAMERASB10"), + PINCTRL_PIN(26, "GP_CAMERASB11"), + PINCTRL_PIN(27, "GP_INTD_DSI_TE2"), }; static const unsigned int byt_ncore_pins_map[BYT_NGPIO_NCORE] = { @@ -549,6 +548,8 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = { NULL }; +static DEFINE_RAW_SPINLOCK(byt_lock); + static struct intel_community *byt_get_community(struct byt_gpio *vg, unsigned int pin) { @@ -658,7 +659,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -678,7 +679,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static void byt_set_group_mixed_mux(struct byt_gpio *vg, @@ -688,7 +689,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, unsigned long flags; int i; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); for (i = 0; i < group.npins; i++) { void __iomem *padcfg0; @@ -708,7 +709,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg, writel(value, padcfg0); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, @@ -749,11 +750,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset) unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, @@ -765,7 +766,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, u32 value, gpio_mux; unsigned long flags; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); /* * In most cases, func pin mux 000 means GPIO function. @@ -787,7 +788,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev, "pin %u forcibly re-configured as GPIO\n", offset); } - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); pm_runtime_get(&vg->pdev->dev); @@ -815,7 +816,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, unsigned long flags; u32 value; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(val_reg); value &= ~BYT_DIR_MASK; @@ -832,7 +833,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, "Potential Error: Setting GPIO with direct_irq_en to output"); writel(value, val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -901,11 +902,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, u32 conf, pull, val, debounce; u16 arg = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); pull = conf & BYT_PULL_ASSIGN_MASK; val = readl(val_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (param) { case PIN_CONFIG_BIAS_DISABLE: @@ -932,9 +933,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, if (!(conf & BYT_DEBOUNCE_EN)) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); debounce = readl(db_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { case BYT_DEBOUNCE_PULSE_375US: @@ -986,7 +987,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, u32 conf, val, debounce; int i, ret = 0; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); conf = readl(conf_reg); val = readl(val_reg); @@ -1094,7 +1095,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, if (!ret) writel(conf, conf_reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return ret; } @@ -1119,9 +1120,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset) unsigned long flags; u32 val; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return !!(val & BYT_LEVEL); } @@ -1136,13 +1137,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); old_val = readl(reg); if (value) writel(old_val | BYT_LEVEL, reg); else writel(old_val & ~BYT_LEVEL, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) @@ -1155,9 +1156,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) if (!reg) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); if (!(value & BYT_OUTPUT_EN)) return 0; @@ -1200,14 +1201,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) const char *label; unsigned int pin; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); pin = vg->soc_data->pins[i].number; reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG); if (!reg) { seq_printf(s, "Could not retrieve pin %i conf0 reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } conf0 = readl(reg); @@ -1216,11 +1217,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) if (!reg) { seq_printf(s, "Could not retrieve pin %i val reg\n", pin); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); continue; } val = readl(reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); comm = byt_get_community(vg, pin); if (!comm) { @@ -1304,9 +1305,9 @@ static void byt_irq_ack(struct irq_data *d) if (!reg) return; - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); writel(BIT(offset % 32), reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); } static void byt_irq_mask(struct irq_data *d) @@ -1330,7 +1331,7 @@ static void byt_irq_unmask(struct irq_data *d) if (!reg) return; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); switch (irqd_get_trigger_type(d)) { @@ -1353,7 +1354,7 @@ static void byt_irq_unmask(struct irq_data *d) writel(value, reg); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); } static int byt_irq_type(struct irq_data *d, unsigned int type) @@ -1367,7 +1368,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) if (!reg || offset >= vg->chip.ngpio) return -EINVAL; - raw_spin_lock_irqsave(&vg->lock, flags); + raw_spin_lock_irqsave(&byt_lock, flags); value = readl(reg); WARN(value & BYT_DIRECT_IRQ_EN, @@ -1389,7 +1390,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) else if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(d, handle_level_irq); - raw_spin_unlock_irqrestore(&vg->lock, flags); + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } @@ -1425,9 +1426,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) continue; } - raw_spin_lock(&vg->lock); + raw_spin_lock(&byt_lock); pending = readl(reg); - raw_spin_unlock(&vg->lock); + raw_spin_unlock(&byt_lock); for_each_set_bit(pin, &pending, 32) { virq = irq_find_mapping(vg->chip.irq.domain, base + pin); generic_handle_irq(virq); @@ -1450,9 +1451,9 @@ static void byt_init_irq_valid_mask(struct gpio_chip *chip, */ } -static void byt_gpio_irq_init_hw(struct byt_gpio *vg) +static int byt_gpio_irq_init_hw(struct gpio_chip *chip) { - struct gpio_chip *gc = &vg->chip; + struct byt_gpio *vg = gpiochip_get_data(chip); struct device *dev = &vg->pdev->dev; void __iomem *reg; u32 base, value; @@ -1476,7 +1477,7 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) value = readl(reg); if (value & BYT_DIRECT_IRQ_EN) { - clear_bit(i, gc->irq.valid_mask); + clear_bit(i, chip->irq.valid_mask); dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i); } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) { byt_gpio_clear_triggering(vg, i); @@ -1504,6 +1505,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) "GPIO interrupt error, pins misconfigured. INT_STAT%u: 0x%08x\n", base / 32, value); } + + return 0; +} + +static int byt_gpio_add_pin_ranges(struct gpio_chip *chip) +{ + struct byt_gpio *vg = gpiochip_get_data(chip); + struct device *dev = &vg->pdev->dev; + int ret; + + ret = gpiochip_add_pin_range(chip, dev_name(dev), 0, 0, vg->soc_data->npins); + if (ret) + dev_err(dev, "failed to add GPIO pin range\n"); + + return ret; } static int byt_gpio_probe(struct byt_gpio *vg) @@ -1518,6 +1534,7 @@ static int byt_gpio_probe(struct byt_gpio *vg) gc->label = dev_name(&vg->pdev->dev); gc->base = -1; gc->can_sleep = false; + gc->add_pin_ranges = byt_gpio_add_pin_ranges; gc->parent = &vg->pdev->dev; gc->ngpio = vg->soc_data->npins; gc->irq.init_valid_mask = byt_init_irq_valid_mask; @@ -1528,33 +1545,30 @@ static int byt_gpio_probe(struct byt_gpio *vg) if (!vg->saved_context) return -ENOMEM; #endif - ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg); - if (ret) { - dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n"); - return ret; - } - - ret = gpiochip_add_pin_range(&vg->chip, dev_name(&vg->pdev->dev), - 0, 0, vg->soc_data->npins); - if (ret) { - dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n"); - return ret; - } /* set up interrupts */ irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0); if (irq_rc && irq_rc->start) { - byt_gpio_irq_init_hw(vg); - ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0, - handle_bad_irq, IRQ_TYPE_NONE); - if (ret) { - dev_err(&vg->pdev->dev, "failed to add irqchip\n"); - return ret; - } + struct gpio_irq_chip *girq; + + girq = &gc->irq; + girq->chip = &byt_irqchip; + girq->init_hw = byt_gpio_irq_init_hw; + girq->parent_handler = byt_gpio_irq_handler; + girq->num_parents = 1; + girq->parents = devm_kcalloc(&vg->pdev->dev, girq->num_parents, + sizeof(*girq->parents), GFP_KERNEL); + if (!girq->parents) + return -ENOMEM; + girq->parents[0] = (unsigned int)irq_rc->start; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + } - gpiochip_set_chained_irqchip(gc, &byt_irqchip, - (unsigned)irq_rc->start, - byt_gpio_irq_handler); + ret = devm_gpiochip_add_data(&vg->pdev->dev, gc, vg); + if (ret) { + dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n"); + return ret; } return ret; @@ -1638,8 +1652,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(vg->pctl_dev); } - raw_spin_lock_init(&vg->lock); - ret = byt_gpio_probe(vg); if (ret) return ret; @@ -1654,8 +1666,11 @@ static int byt_pinctrl_probe(struct platform_device *pdev) static int byt_gpio_suspend(struct device *dev) { struct byt_gpio *vg = dev_get_drvdata(dev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1676,14 +1691,18 @@ static int byt_gpio_suspend(struct device *dev) vg->saved_context[i].val = value; } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } static int byt_gpio_resume(struct device *dev) { struct byt_gpio *vg = dev_get_drvdata(dev); + unsigned long flags; int i; + raw_spin_lock_irqsave(&byt_lock, flags); + for (i = 0; i < vg->soc_data->npins; i++) { void __iomem *reg; u32 value; @@ -1721,6 +1740,7 @@ static int byt_gpio_resume(struct device *dev) } } + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } #endif diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 582fa8a75559..60527b93a711 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -149,6 +149,7 @@ struct chv_pin_context { * @chip: GPIO chip in this pin controller * @irqchip: IRQ chip in this pin controller * @regs: MMIO registers + * @irq: Our parent irq * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO * offset (in GPIO number space) * @community: Community this pinctrl instance represents @@ -165,6 +166,7 @@ struct chv_pinctrl { struct gpio_chip chip; struct irq_chip irqchip; void __iomem *regs; + unsigned int irq; unsigned int intr_lines[16]; const struct chv_community *community; u32 saved_intmask; @@ -1555,39 +1557,9 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip, } } -static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) +static int chv_gpio_irq_init_hw(struct gpio_chip *chip) { - const struct chv_gpio_pinrange *range; - struct gpio_chip *chip = &pctrl->chip; - bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); - const struct chv_community *community = pctrl->community; - int ret, i, irq_base; - - *chip = chv_gpio_chip; - - chip->ngpio = community->pins[community->npins - 1].number + 1; - chip->label = dev_name(pctrl->dev); - chip->parent = pctrl->dev; - chip->base = -1; - if (need_valid_mask) - chip->irq.init_valid_mask = chv_init_irq_valid_mask; - - ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); - if (ret) { - dev_err(pctrl->dev, "Failed to register gpiochip\n"); - return ret; - } - - for (i = 0; i < community->ngpio_ranges; i++) { - range = &community->gpio_ranges[i]; - ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev), - range->base, range->base, - range->npins); - if (ret) { - dev_err(pctrl->dev, "failed to add GPIO pin range\n"); - return ret; - } - } + struct chv_pinctrl *pctrl = gpiochip_get_data(chip); /* * The same set of machines in chv_no_valid_mask[] have incorrectly @@ -1596,7 +1568,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) * * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. */ - if (!need_valid_mask) { + if (!pctrl->chip.irq.init_valid_mask) { /* * Mask all interrupts the community is able to generate * but leave the ones that can only generate GPEs unmasked. @@ -1608,15 +1580,47 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); - if (!need_valid_mask) { - irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, - community->npins, NUMA_NO_NODE); - if (irq_base < 0) { - dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); - return irq_base; + return 0; +} + +static int chv_gpio_add_pin_ranges(struct gpio_chip *chip) +{ + struct chv_pinctrl *pctrl = gpiochip_get_data(chip); + const struct chv_community *community = pctrl->community; + const struct chv_gpio_pinrange *range; + int ret, i; + + for (i = 0; i < community->ngpio_ranges; i++) { + range = &community->gpio_ranges[i]; + ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev), + range->base, range->base, + range->npins); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO pin range\n"); + return ret; } } + return 0; +} + +static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) +{ + const struct chv_gpio_pinrange *range; + struct gpio_chip *chip = &pctrl->chip; + bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); + const struct chv_community *community = pctrl->community; + int ret, i, irq_base; + + *chip = chv_gpio_chip; + + chip->ngpio = community->pins[community->npins - 1].number + 1; + chip->label = dev_name(pctrl->dev); + chip->add_pin_ranges = chv_gpio_add_pin_ranges; + chip->parent = pctrl->dev; + chip->base = -1; + + pctrl->irq = irq; pctrl->irqchip.name = "chv-gpio"; pctrl->irqchip.irq_startup = chv_gpio_irq_startup; pctrl->irqchip.irq_ack = chv_gpio_irq_ack; @@ -1625,10 +1629,27 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) pctrl->irqchip.irq_set_type = chv_gpio_irq_type; pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE; - ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0, - handle_bad_irq, IRQ_TYPE_NONE); + chip->irq.chip = &pctrl->irqchip; + chip->irq.init_hw = chv_gpio_irq_init_hw; + chip->irq.parent_handler = chv_gpio_irq_handler; + chip->irq.num_parents = 1; + chip->irq.parents = &pctrl->irq; + chip->irq.default_type = IRQ_TYPE_NONE; + chip->irq.handler = handle_bad_irq; + if (need_valid_mask) { + chip->irq.init_valid_mask = chv_init_irq_valid_mask; + } else { + irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, + community->npins, NUMA_NO_NODE); + if (irq_base < 0) { + dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); + return irq_base; + } + } + + ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); if (ret) { - dev_err(pctrl->dev, "failed to add IRQ chip\n"); + dev_err(pctrl->dev, "Failed to register gpiochip\n"); return ret; } @@ -1642,8 +1663,6 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) } } - gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq, - chv_gpio_irq_handler); return 0; } diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 44d7f50bbc82..d936e7aa74c4 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -49,6 +49,7 @@ .padown_offset = SPT_PAD_OWN, \ .padcfglock_offset = SPT_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ + .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ .pin_base = (s), \ .npins = ((e) - (s) + 1), \ diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 3c80828a5e50..bbc919bef2bf 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc, return ret; meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit); + bit = bit << 1; ret = regmap_read(pc->reg_ds, reg, &val); if (ret) diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index eab078244a4c..73aff6591de2 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -866,7 +866,7 @@ static int amd_gpio_probe(struct platform_device *pdev) return -EINVAL; } - gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start, + gpio_dev->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!gpio_dev->base) return -ENOMEM; diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 24e0e2ef47a4..369e04350e3d 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -1809,7 +1809,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc, unsigned int pin, bool high) { - if (jzpc->version >= ID_JZ4770) + if (jzpc->version >= ID_JZ4760) ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high); else ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high); diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index e914f6efd39e..9503ddf2edc7 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -85,7 +85,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin) const struct pinmux_ops *ops = pctldev->desc->pmxops; /* Can't inspect pin, assume it can be used */ - if (!desc) + if (!desc || !ops) return true; if (ops->strict && desc->mux_usecount) diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c index bb0edf51dfda..5731d1b60e28 100644 --- a/drivers/platform/chrome/wilco_ec/keyboard_leds.c +++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c @@ -73,13 +73,6 @@ static int send_kbbl_msg(struct wilco_ec_device *ec, return ret; } - if (response->status) { - dev_err(ec->dev, - "EC reported failure sending keyboard LEDs command: %d", - response->status); - return -EIO; - } - return 0; } @@ -87,6 +80,7 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness) { struct wilco_keyboard_leds_msg request; struct wilco_keyboard_leds_msg response; + int ret; memset(&request, 0, sizeof(request)); request.command = WILCO_EC_COMMAND_KBBL; @@ -94,7 +88,18 @@ static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness) request.mode = WILCO_KBBL_MODE_FLAG_PWM; request.percent = brightness; - return send_kbbl_msg(ec, &request, &response); + ret = send_kbbl_msg(ec, &request, &response); + if (ret < 0) + return ret; + + if (response.status) { + dev_err(ec->dev, + "EC reported failure sending keyboard LEDs command: %d", + response.status); + return -EIO; + } + + return 0; } static int kbbl_exist(struct wilco_ec_device *ec, bool *exists) @@ -140,6 +145,13 @@ static int kbbl_init(struct wilco_ec_device *ec) if (ret < 0) return ret; + if (response.status) { + dev_err(ec->dev, + "EC reported failure sending keyboard LEDs command: %d", + response.status); + return -EIO; + } + if (response.mode & WILCO_KBBL_MODE_FLAG_PWM) return response.percent; diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c index 61753b648506..5d21c6adf1ab 100644 --- a/drivers/platform/mellanox/mlxbf-bootctl.c +++ b/drivers/platform/mellanox/mlxbf-bootctl.c @@ -309,7 +309,7 @@ static struct platform_driver mlxbf_bootctl_driver = { .probe = mlxbf_bootctl_probe, .driver = { .name = "mlxbf-bootctl", - .groups = mlxbf_bootctl_groups, + .dev_groups = mlxbf_bootctl_groups, .acpi_match_table = mlxbf_bootctl_acpi_ids, } }; diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 9a5c9fd2dbc6..5739a9669b29 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info { * @work: work struct for deferred process * @timer: background timer * @vring: Tx/Rx ring - * @spin_lock: spin lock + * @spin_lock: Tx/Rx spin lock * @is_ready: ready flag */ struct mlxbf_tmfifo { @@ -164,7 +164,7 @@ struct mlxbf_tmfifo { struct work_struct work; struct timer_list timer; struct mlxbf_tmfifo_vring *vring[2]; - spinlock_t spin_lock; /* spin lock */ + spinlock_t spin_lock[2]; /* spin lock */ bool is_ready; }; @@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); /* Use spin-lock to protect the 'cons->tx_buf'. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); while (size > 0) { addr = cons->tx_buf.buf + cons->tx_buf.tail; @@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) } } - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } /* Rx/Tx one word in the descriptor buffer. */ @@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, fifo->vring[is_rx] = NULL; /* Notify upper layer that packet is done. */ - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); vring_interrupt(0, vring->vq); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); } mlxbf_tmfifo_desc_done: @@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) * worker handler. */ if (vring->vdev_id == VIRTIO_ID_CONSOLE) { - spin_lock_irqsave(&fifo->spin_lock, flags); + spin_lock_irqsave(&fifo->spin_lock[0], flags); tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; mlxbf_tmfifo_console_output(tm_vdev, vring); - spin_unlock_irqrestore(&fifo->spin_lock, flags); + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events)) { return true; @@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev) if (!fifo) return -ENOMEM; - spin_lock_init(&fifo->spin_lock); + spin_lock_init(&fifo->spin_lock[0]); + spin_lock_init(&fifo->spin_lock[1]); INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); mutex_init(&fifo->lock); diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index 706207d192ae..77be37a1fbcf 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) item = pdata->items; for (i = 0; i < pdata->counter; i++, item++) { + if (item->capability) { + /* + * Read group capability register to get actual number + * of interrupt capable components and set group mask + * accordingly. + */ + ret = regmap_read(priv->regmap, item->capability, + ®val); + if (ret) + goto out; + + item->mask = GENMASK((regval & item->mask) - 1, 0); + } + /* Clear group presense event. */ ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF, 0); diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index f4d0a86c00d0..5e77b0dc5fd6 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -18,7 +18,7 @@ if MIPS_PLATFORM_DEVICES config CPU_HWMON tristate "Loongson-3 CPU HWMon Driver" - depends on CONFIG_MACH_LOONGSON64 + depends on MACH_LOONGSON64 select HWMON default y help diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 27d5b40fb717..587403c44598 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -997,7 +997,6 @@ config INTEL_SCU_IPC config INTEL_SCU_IPC_UTIL tristate "Intel SCU IPC utility driver" depends on INTEL_SCU_IPC - default y ---help--- The IPC Util driver provides an interface with the SCU enabling low level access for debug work and updating the firmware. Say @@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM depends on PCI && IOSF_MBI && PM help Power-management driver for Intel's Image Signal Processor found on - Bay and Cherry Trail devices. This dummy driver's sole purpose is to - turn the ISP off (put it in D3) to save power and to allow entering - of S0ix modes. + Bay Trail and Cherry Trail devices. This dummy driver's sole purpose + is to turn the ISP off (put it in D3) to save power and to allow + entering of S0ix modes. To compile this driver as a module, choose M here: the module will be called intel_atomisp2_pm. @@ -1337,6 +1336,17 @@ config PCENGINES_APU2 To compile this driver as a module, choose M here: the module will be called pcengines-apuv2. +config INTEL_UNCORE_FREQ_CONTROL + tristate "Intel Uncore frequency control driver" + depends on X86_64 + help + This driver allows control of uncore frequency limits on + supported server platforms. + Uncore frequency controls RING/LLC (last-level cache) clocks. + + To compile this driver as a module, choose M here: the module + will be called intel-uncore-frequency. + source "drivers/platform/x86/intel_speed_select_if/Kconfig" config SYSTEM76_ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 42d85a00be4e..3747b1f07cf1 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o +obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index b361c73636a4..6f12747a359a 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */ { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, { KE_IGNORE, 0x6E, }, /* Low Battery notification */ + { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */ { KE_KEY, 0x7c, { KEY_MICMUTE } }, { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */ diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 821b08e01635..43bb15e05529 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL"); #define NOTIFY_KBD_BRTDWN 0xc5 #define NOTIFY_KBD_BRTTOGGLE 0xc7 #define NOTIFY_KBD_FBM 0x99 +#define NOTIFY_KBD_TTP 0xae #define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0) @@ -81,6 +82,10 @@ MODULE_LICENSE("GPL"); #define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02 #define ASUS_FAN_BOOST_MODES_MASK 0x03 +#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0 +#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1 +#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2 + #define USB_INTEL_XUSB2PR 0xD0 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -198,6 +203,9 @@ struct asus_wmi { u8 fan_boost_mode_mask; u8 fan_boost_mode; + bool throttle_thermal_policy_available; + u8 throttle_thermal_policy_mode; + // The RSOC controls the maximum charging percentage. bool battery_rsoc_available; @@ -512,13 +520,7 @@ static void kbd_led_update(struct asus_wmi *asus) { int ctrl_param = 0; - /* - * bits 0-2: level - * bit 7: light on/off - */ - if (asus->kbd_led_wk > 0) - ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); - + ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); } @@ -1724,6 +1726,107 @@ static ssize_t fan_boost_mode_store(struct device *dev, // Fan boost mode: 0 - normal, 1 - overboost, 2 - silent static DEVICE_ATTR_RW(fan_boost_mode); +/* Throttle thermal policy ****************************************************/ + +static int throttle_thermal_policy_check_present(struct asus_wmi *asus) +{ + u32 result; + int err; + + asus->throttle_thermal_policy_available = false; + + err = asus_wmi_get_devstate(asus, + ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + &result); + if (err) { + if (err == -ENODEV) + return 0; + return err; + } + + if (result & ASUS_WMI_DSTS_PRESENCE_BIT) + asus->throttle_thermal_policy_available = true; + + return 0; +} + +static int throttle_thermal_policy_write(struct asus_wmi *asus) +{ + int err; + u8 value; + u32 retval; + + value = asus->throttle_thermal_policy_mode; + + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + value, &retval); + if (err) { + pr_warn("Failed to set throttle thermal policy: %d\n", err); + return err; + } + + if (retval != 1) { + pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n", + retval); + return -EIO; + } + + return 0; +} + +static int throttle_thermal_policy_set_default(struct asus_wmi *asus) +{ + if (!asus->throttle_thermal_policy_available) + return 0; + + asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + return throttle_thermal_policy_write(asus); +} + +static int throttle_thermal_policy_switch_next(struct asus_wmi *asus) +{ + u8 new_mode = asus->throttle_thermal_policy_mode + 1; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + + asus->throttle_thermal_policy_mode = new_mode; + return throttle_thermal_policy_write(asus); +} + +static ssize_t throttle_thermal_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + u8 mode = asus->throttle_thermal_policy_mode; + + return scnprintf(buf, PAGE_SIZE, "%d\n", mode); +} + +static ssize_t throttle_thermal_policy_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int result; + u8 new_mode; + struct asus_wmi *asus = dev_get_drvdata(dev); + + result = kstrtou8(buf, 10, &new_mode); + if (result < 0) + return result; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + return -EINVAL; + + asus->throttle_thermal_policy_mode = new_mode; + throttle_thermal_policy_write(asus); + + return count; +} + +// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent +static DEVICE_ATTR_RW(throttle_thermal_policy); + /* Backlight ******************************************************************/ static int read_backlight_power(struct asus_wmi *asus) @@ -2005,6 +2108,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) return; } + if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) { + throttle_thermal_policy_switch_next(asus); + return; + } + if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle) return; @@ -2155,6 +2263,7 @@ static struct attribute *platform_attributes[] = { &dev_attr_lid_resume.attr, &dev_attr_als_enable.attr, &dev_attr_fan_boost_mode.attr, + &dev_attr_throttle_thermal_policy.attr, NULL }; @@ -2178,6 +2287,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, devid = ASUS_WMI_DEVID_ALS_ENABLE; else if (attr == &dev_attr_fan_boost_mode.attr) ok = asus->fan_boost_mode_available; + else if (attr == &dev_attr_throttle_thermal_policy.attr) + ok = asus->throttle_thermal_policy_available; if (devid != -1) ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); @@ -2437,6 +2548,12 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_fan_boost_mode; + err = throttle_thermal_policy_check_present(asus); + if (err) + goto fail_throttle_thermal_policy; + else + throttle_thermal_policy_set_default(asus); + err = asus_wmi_sysfs_init(asus->platform_device); if (err) goto fail_sysfs; @@ -2521,6 +2638,7 @@ fail_hwmon: fail_input: asus_wmi_sysfs_exit(asus->platform_device); fail_sysfs: +fail_throttle_thermal_policy: fail_fan_boost_mode: fail_platform: kfree(asus); diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c index be85ed966bf3..b471b86c28fe 100644 --- a/drivers/platform/x86/gpd-pocket-fan.c +++ b/drivers/platform/x86/gpd-pocket-fan.c @@ -16,17 +16,27 @@ #define MAX_SPEED 3 -static int temp_limits[3] = { 55000, 60000, 65000 }; +#define TEMP_LIMIT0_DEFAULT 55000 +#define TEMP_LIMIT1_DEFAULT 60000 +#define TEMP_LIMIT2_DEFAULT 65000 + +#define HYSTERESIS_DEFAULT 3000 + +#define SPEED_ON_AC_DEFAULT 2 + +static int temp_limits[3] = { + TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT, +}; module_param_array(temp_limits, int, NULL, 0444); MODULE_PARM_DESC(temp_limits, "Millicelsius values above which the fan speed increases"); -static int hysteresis = 3000; +static int hysteresis = HYSTERESIS_DEFAULT; module_param(hysteresis, int, 0444); MODULE_PARM_DESC(hysteresis, "Hysteresis in millicelsius before lowering the fan speed"); -static int speed_on_ac = 2; +static int speed_on_ac = SPEED_ON_AC_DEFAULT; module_param(speed_on_ac, int, 0444); MODULE_PARM_DESC(speed_on_ac, "minimum fan speed to allow when system is powered by AC"); @@ -117,21 +127,24 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev) int i; for (i = 0; i < ARRAY_SIZE(temp_limits); i++) { - if (temp_limits[i] < 40000 || temp_limits[i] > 70000) { + if (temp_limits[i] < 20000 || temp_limits[i] > 90000) { dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n", temp_limits[i]); - return -EINVAL; + temp_limits[0] = TEMP_LIMIT0_DEFAULT; + temp_limits[1] = TEMP_LIMIT1_DEFAULT; + temp_limits[2] = TEMP_LIMIT2_DEFAULT; + break; } } if (hysteresis < 1000 || hysteresis > 10000) { dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n", hysteresis); - return -EINVAL; + hysteresis = HYSTERESIS_DEFAULT; } if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) { dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n", speed_on_ac); - return -EINVAL; + speed_on_ac = SPEED_ON_AC_DEFAULT; } fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 9579a706fc08..a881b709af25 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -300,7 +300,7 @@ static int __init hp_wmi_bios_2008_later(void) static int __init hp_wmi_bios_2009_later(void) { - int state = 0; + u8 state[128]; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, sizeof(state), sizeof(state)); if (!ret) diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ef6d4bd77b1a..43d590250228 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -19,6 +19,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung"); static const struct acpi_device_id intel_hid_ids[] = { + {"INT1051", 0}, {"INT33D5", 0}, {"", 0}, }; diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c new file mode 100644 index 000000000000..2b1a0734c3f8 --- /dev/null +++ b/drivers/platform/x86/intel-uncore-frequency.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Uncore Frequency Setting + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Provide interface to set MSR 620 at a granularity of per die. On CPU online, + * one control CPU is identified per die to read/write limit. This control CPU + * is changed, if the CPU state is changed to offline. When the last CPU is + * offline in a die then remove the sysfs object for that die. + * The majority of actual code is related to sysfs create and read/write + * attributes. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/suspend.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +#define MSR_UNCORE_RATIO_LIMIT 0x620 +#define UNCORE_FREQ_KHZ_MULTIPLIER 100000 + +/** + * struct uncore_data - Encapsulate all uncore data + * @stored_uncore_data: Last user changed MSR 620 value, which will be restored + * on system resume. + * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init + * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init + * @control_cpu: Designated CPU for a die to read/write + * @valid: Mark the data valid/invalid + * + * This structure is used to encapsulate all data related to uncore sysfs + * settings for a die/package. + */ +struct uncore_data { + struct kobject kobj; + u64 stored_uncore_data; + u32 initial_min_freq_khz; + u32 initial_max_freq_khz; + int control_cpu; + bool valid; +}; + +#define to_uncore_data(a) container_of(a, struct uncore_data, kobj) + +/* Max instances for uncore data, one for each die */ +static int uncore_max_entries __read_mostly; +/* Storage for uncore data for all instances */ +static struct uncore_data *uncore_instances; +/* Root of the all uncore sysfs kobjs */ +struct kobject uncore_root_kobj; +/* Stores the CPU mask of the target CPUs to use during uncore read/write */ +static cpumask_t uncore_cpu_mask; +/* CPU online callback register instance */ +static enum cpuhp_state uncore_hp_state __read_mostly; +/* Mutex to control all mutual exclusions */ +static DEFINE_MUTEX(uncore_lock); + +struct uncore_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *c, ssize_t count); +}; + +#define define_one_uncore_ro(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_uncore_rw(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +#define show_uncore_data(member_name) \ + static ssize_t show_##member_name(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct uncore_data *data = to_uncore_data(kobj); \ + return scnprintf(buf, PAGE_SIZE, "%u\n", \ + data->member_name); \ + } \ + define_one_uncore_ro(member_name) + +show_uncore_data(initial_min_freq_khz); +show_uncore_data(initial_max_freq_khz); + +/* Common function to read MSR 0x620 and read min/max */ +static int uncore_read_ratio(struct uncore_data *data, unsigned int *min, + unsigned int *max) +{ + u64 cap; + int ret; + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + return ret; + + *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; + *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; + + return 0; +} + +/* Common function to set min/max ratios to be used by sysfs callbacks */ +static int uncore_write_ratio(struct uncore_data *data, unsigned int input, + int set_max) +{ + int ret; + u64 cap; + + mutex_lock(&uncore_lock); + + input /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (!input || input > 0x7F) { + ret = -EINVAL; + goto finish_write; + } + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + goto finish_write; + + if (set_max) { + cap &= ~0x7F; + cap |= input; + } else { + cap &= ~GENMASK(14, 8); + cap |= (input << 8); + } + + ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); + if (ret) + goto finish_write; + + data->stored_uncore_data = cap; + +finish_write: + mutex_unlock(&uncore_lock); + + return ret; +} + +static ssize_t store_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + const char *buf, ssize_t count, + int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int input; + + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + + uncore_write_ratio(data, input, min_max); + + return count; +} + +static ssize_t show_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + char *buf, int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int min, max; + int ret; + + mutex_lock(&uncore_lock); + ret = uncore_read_ratio(data, &min, &max); + mutex_unlock(&uncore_lock); + if (ret) + return ret; + + if (min_max) + return sprintf(buf, "%u\n", max); + + return sprintf(buf, "%u\n", min); +} + +#define store_uncore_min_max(name, min_max) \ + static ssize_t store_##name(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buf, ssize_t count) \ + { \ + \ + return store_min_max_freq_khz(kobj, attr, buf, count, \ + min_max); \ + } + +#define show_uncore_min_max(name, min_max) \ + static ssize_t show_##name(struct kobject *kobj, \ + struct attribute *attr, char *buf) \ + { \ + \ + return show_min_max_freq_khz(kobj, attr, buf, min_max); \ + } + +store_uncore_min_max(min_freq_khz, 0); +store_uncore_min_max(max_freq_khz, 1); + +show_uncore_min_max(min_freq_khz, 0); +show_uncore_min_max(max_freq_khz, 1); + +define_one_uncore_rw(min_freq_khz); +define_one_uncore_rw(max_freq_khz); + +static struct attribute *uncore_attrs[] = { + &initial_min_freq_khz.attr, + &initial_max_freq_khz.attr, + &max_freq_khz.attr, + &min_freq_khz.attr, + NULL +}; + +static struct kobj_type uncore_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = uncore_attrs, +}; + +static struct kobj_type uncore_root_ktype = { + .sysfs_ops = &kobj_sysfs_ops, +}; + +/* Caller provides protection */ +static struct uncore_data *uncore_get_instance(unsigned int cpu) +{ + int id = topology_logical_die_id(cpu); + + if (id >= 0 && id < uncore_max_entries) + return &uncore_instances[id]; + + return NULL; +} + +static void uncore_add_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (!data) { + mutex_unlock(&uncore_lock); + return; + } + + if (data->valid) { + /* control cpu changed */ + data->control_cpu = cpu; + } else { + char str[64]; + int ret; + + memset(data, 0, sizeof(*data)); + sprintf(str, "package_%02d_die_%02d", + topology_physical_package_id(cpu), + topology_die_id(cpu)); + + uncore_read_ratio(data, &data->initial_min_freq_khz, + &data->initial_max_freq_khz); + + ret = kobject_init_and_add(&data->kobj, &uncore_ktype, + &uncore_root_kobj, str); + if (!ret) { + data->control_cpu = cpu; + data->valid = true; + } + } + mutex_unlock(&uncore_lock); +} + +/* Last CPU in this die is offline, so remove sysfs entries */ +static void uncore_remove_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (data) { + kobject_put(&data->kobj); + data->control_cpu = -1; + data->valid = false; + } + mutex_unlock(&uncore_lock); +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int target; + + /* Check if there is an online cpu in the package for uncore MSR */ + target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + uncore_add_die_entry(cpu); + + return 0; +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + int target; + + /* Check if existing cpu is used for uncore MSRs */ + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + return 0; + + /* Find a new cpu to set uncore MSR */ + target = cpumask_any_but(topology_die_cpumask(cpu), cpu); + + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &uncore_cpu_mask); + uncore_add_die_entry(target); + } else { + uncore_remove_die_entry(cpu); + } + + return 0; +} + +static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode, + void *_unused) +{ + int cpu; + + switch (mode) { + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + for_each_cpu(cpu, &uncore_cpu_mask) { + struct uncore_data *data; + int ret; + + data = uncore_get_instance(cpu); + if (!data || !data->valid || !data->stored_uncore_data) + continue; + + ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT, + data->stored_uncore_data); + if (ret) + return ret; + } + break; + default: + break; + } + return 0; +} + +static struct notifier_block uncore_pm_nb = { + .notifier_call = uncore_pm_notify, +}; + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id intel_uncore_cpu_ids[] = { + ICPU(INTEL_FAM6_BROADWELL_G), + ICPU(INTEL_FAM6_BROADWELL_X), + ICPU(INTEL_FAM6_BROADWELL_D), + ICPU(INTEL_FAM6_SKYLAKE_X), + ICPU(INTEL_FAM6_ICELAKE_X), + ICPU(INTEL_FAM6_ICELAKE_D), + {} +}; + +static int __init intel_uncore_init(void) +{ + const struct x86_cpu_id *id; + int ret; + + id = x86_match_cpu(intel_uncore_cpu_ids); + if (!id) + return -ENODEV; + + uncore_max_entries = topology_max_packages() * + topology_max_die_per_package(); + uncore_instances = kcalloc(uncore_max_entries, + sizeof(*uncore_instances), GFP_KERNEL); + if (!uncore_instances) + return -ENOMEM; + + ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype, + &cpu_subsys.dev_root->kobj, + "intel_uncore_frequency"); + if (ret) + goto err_free; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/uncore-freq:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret < 0) + goto err_rem_kobj; + + uncore_hp_state = ret; + + ret = register_pm_notifier(&uncore_pm_nb); + if (ret) + goto err_rem_state; + + return 0; + +err_rem_state: + cpuhp_remove_state(uncore_hp_state); +err_rem_kobj: + kobject_put(&uncore_root_kobj); +err_free: + kfree(uncore_instances); + + return ret; +} +module_init(intel_uncore_init) + +static void __exit intel_uncore_exit(void) +{ + int i; + + unregister_pm_notifier(&uncore_pm_nb); + cpuhp_remove_state(uncore_hp_state); + for (i = 0; i < uncore_max_entries; ++i) { + if (uncore_instances[i].valid) + kobject_put(&uncore_instances[i].kobj); + } + kobject_put(&uncore_root_kobj); + kfree(uncore_instances); +} +module_exit(intel_uncore_exit) + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver"); diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c index b0f421fea2a5..805fc0d8515c 100644 --- a/drivers/platform/x86/intel_atomisp2_pm.c +++ b/drivers/platform/x86/intel_atomisp2_pm.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry - * Trail devices. The sole purpose of this driver is to allow the ISP to - * be put in D3. + * Dummy driver for Intel's Image Signal Processor found on Bay Trail + * and Cherry Trail devices. The sole purpose of this driver is to allow + * the ISP to be put in D3. * * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com> * @@ -36,8 +36,7 @@ static int isp_set_power(struct pci_dev *dev, bool enable) { unsigned long timeout; - u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : - ISPSSPM0_IUNIT_POWER_OFF; + u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF; /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, @@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable) /* * There should be no IUNIT access while power-down is - * in progress HW sighting: 4567865 + * in progress. HW sighting: 4567865. * Wait up to 50 ms for the IUNIT to shut down. * And we do the same for power on. */ timeout = jiffies + msecs_to_jiffies(50); - while (1) { + do { u32 tmp; /* Wait until ISPSSPM0 bit[25:24] shows the right value */ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp); tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET; if (tmp == val) - break; + return 0; - if (time_after(jiffies, timeout)) { - dev_err(&dev->dev, "IUNIT power-%s timeout.\n", - enable ? "on" : "off"); - return -EBUSY; - } usleep_range(1000, 2000); - } + } while (time_before(jiffies, timeout)); - return 0; + dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off"); + return -EBUSY; } static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/platform/x86/intel_cht_int33fe_typec.c b/drivers/platform/x86/intel_cht_int33fe_typec.c index 2d097fc2dd46..04138215956b 100644 --- a/drivers/platform/x86/intel_cht_int33fe_typec.c +++ b/drivers/platform/x86/intel_cht_int33fe_typec.c @@ -36,30 +36,6 @@ enum { INT33FE_NODE_MAX, }; -static const struct software_node nodes[]; - -static const struct software_node_ref_args pi3usb30532_ref = { - &nodes[INT33FE_NODE_PI3USB30532] -}; - -static const struct software_node_ref_args dp_ref = { - &nodes[INT33FE_NODE_DISPLAYPORT] -}; - -static struct software_node_ref_args mux_ref; - -static const struct software_node_reference usb_connector_refs[] = { - { "orientation-switch", 1, &pi3usb30532_ref}, - { "mode-switch", 1, &pi3usb30532_ref}, - { "displayport", 1, &dp_ref}, - { } -}; - -static const struct software_node_reference fusb302_refs[] = { - { "usb-role-switch", 1, &mux_ref}, - { } -}; - /* * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates * the max17047 both through the INT33FE ACPI device (it is right there @@ -95,8 +71,18 @@ static const struct property_entry max17047_props[] = { { } }; +/* + * We are not using inline property here because those are constant, + * and we need to adjust this one at runtime to point to real + * software node. + */ +static struct software_node_ref_args fusb302_mux_refs[] = { + { .node = NULL }, +}; + static const struct property_entry fusb302_props[] = { PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"), + PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs), { } }; @@ -112,6 +98,8 @@ static const u32 snk_pdo[] = { PDO_VAR(5000, 12000, 3000), }; +static const struct software_node nodes[]; + static const struct property_entry usb_connector_props[] = { PROPERTY_ENTRY_STRING("data-role", "dual"), PROPERTY_ENTRY_STRING("power-role", "dual"), @@ -119,15 +107,21 @@ static const struct property_entry usb_connector_props[] = { PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo), PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo), PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000), + PROPERTY_ENTRY_REF("orientation-switch", + &nodes[INT33FE_NODE_PI3USB30532]), + PROPERTY_ENTRY_REF("mode-switch", + &nodes[INT33FE_NODE_PI3USB30532]), + PROPERTY_ENTRY_REF("displayport", + &nodes[INT33FE_NODE_DISPLAYPORT]), { } }; static const struct software_node nodes[] = { - { "fusb302", NULL, fusb302_props, fusb302_refs }, + { "fusb302", NULL, fusb302_props }, { "max17047", NULL, max17047_props }, { "pi3usb30532" }, { "displayport" }, - { "connector", &nodes[0], usb_connector_props, usb_connector_refs }, + { "connector", &nodes[0], usb_connector_props }, { } }; @@ -163,9 +157,10 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) { software_node_unregister_nodes(nodes); - if (mux_ref.node) { - fwnode_handle_put(software_node_fwnode(mux_ref.node)); - mux_ref.node = NULL; + if (fusb302_mux_refs[0].node) { + fwnode_handle_put( + software_node_fwnode(fusb302_mux_refs[0].node)); + fusb302_mux_refs[0].node = NULL; } if (data->dp) { @@ -177,25 +172,31 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) { + const struct software_node *mux_ref_node; int ret; - ret = software_node_register_nodes(nodes); - if (ret) - return ret; - - /* The devices that are not created in this driver need extra steps. */ - /* * There is no ACPI device node for the USB role mux, so we need to wait * until the mux driver has created software node for the mux device. * It means we depend on the mux driver. This function will return * -EPROBE_DEFER until the mux device is registered. */ - mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); - if (!mux_ref.node) { - ret = -EPROBE_DEFER; - goto err_remove_nodes; - } + mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); + if (!mux_ref_node) + return -EPROBE_DEFER; + + /* + * Update node used in "usb-role-switch" property. Note that we + * rely on software_node_register_nodes() to use the original + * instance of properties instead of copying them. + */ + fusb302_mux_refs[0].node = mux_ref_node; + + ret = software_node_register_nodes(nodes); + if (ret) + return ret; + + /* The devices that are not created in this driver need extra steps. */ /* * The DP connector does have ACPI device node. In this case we can just diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h index 512ad234ad0d..35ed9711c7b9 100644 --- a/drivers/platform/x86/intel_ips.h +++ b/drivers/platform/x86/intel_ips.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010 Intel Corporation */ diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 292bace83f1e..6f436836fe50 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - ddata = (struct mid_pb_ddata *)id->driver_data; + ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data, + sizeof(*ddata), GFP_KERNEL); if (!ddata) - return -ENODATA; + return -ENOMEM; ddata->dev = &pdev->dev; ddata->irq = irq; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 571b4754477c..144faa8bad3d 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = { {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3}, - {}, + {} }; static const struct pmc_bit_map spt_mphy_map[] = { @@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = { {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13}, {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14}, {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15}, - {}, + {} }; static const struct pmc_bit_map spt_pfear_map[] = { @@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = { {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1}, {"CSME_RTC", SPT_PMC_BIT_CSME_RTC}, {"CSME_PSF", SPT_PMC_BIT_CSME_PSF}, - {}, + {} +}; + +static const struct pmc_bit_map *ext_spt_pfear_map[] = { + spt_pfear_map, + NULL }; static const struct pmc_bit_map spt_ltr_show_map[] = { @@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = { }; static const struct pmc_reg_map spt_reg_map = { - .pfear_sts = spt_pfear_map, + .pfear_sts = ext_spt_pfear_map, .mphy_sts = spt_mphy_map, .pll_sts = spt_pll_map, .ltr_show_sts = spt_ltr_show_map, @@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"SDX", BIT(4)}, {"SPE", BIT(5)}, {"Fuse", BIT(6)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and Elkhart Lake. + */ {"SBR8", BIT(7)}, {"CSME_FSC", BIT(0)}, @@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"HDA_PGD4", BIT(2)}, {"HDA_PGD5", BIT(3)}, {"HDA_PGD6", BIT(4)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and ELkhart Lake. + */ {"PSF6", BIT(5)}, {"PSF7", BIT(6)}, {"PSF8", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_cnp_pfear_map[] = { + cnp_pfear_map, + NULL +}; +static const struct pmc_bit_map icl_pfear_map[] = { /* Ice Lake generation onwards only */ {"RES_65", BIT(0)}, {"RES_66", BIT(1)}, @@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {} }; +static const struct pmc_bit_map *ext_icl_pfear_map[] = { + cnp_pfear_map, + icl_pfear_map, + NULL +}; + +static const struct pmc_bit_map tgl_pfear_map[] = { + /* Tiger Lake and Elkhart Lake generation onwards only */ + {"PSF9", BIT(0)}, + {"RES_66", BIT(1)}, + {"RES_67", BIT(2)}, + {"RES_68", BIT(3)}, + {"RES_69", BIT(4)}, + {"RES_70", BIT(5)}, + {"TBTLSX", BIT(6)}, + {} +}; + +static const struct pmc_bit_map *ext_tgl_pfear_map[] = { + cnp_pfear_map, + tgl_pfear_map, + NULL +}; + static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { {"AUDIO_D3", BIT(0)}, {"OTG_D3", BIT(1)}, @@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = { cnp_slps0_dbg0_map, cnp_slps0_dbg1_map, cnp_slps0_dbg2_map, - NULL, + NULL }; static const struct pmc_bit_map cnp_ltr_show_map[] = { @@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { }; static const struct pmc_reg_map cnp_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_cnp_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = { }; static const struct pmc_reg_map icl_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_icl_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = { .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, }; -static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) -{ - return readb(pmcdev->regbase + offset); -} +static const struct pmc_reg_map tgl_reg_map = { + .pfear_sts = ext_tgl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slps0_dbg_maps = cnp_slps0_dbg_maps, + .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, + .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = CNP_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, +}; static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) { return readl(pmcdev->regbase + reg_offset); } -static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int - reg_offset, u32 val) +static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset, + u32 val) { writel(val, pmcdev->regbase + reg_offset); } @@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void) #if IS_ENABLED(CONFIG_DEBUG_FS) static bool slps0_dbg_latch; -static void pmc_core_display_map(struct seq_file *s, int index, - u8 pf_reg, const struct pmc_bit_map *pf_map) +static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) +{ + return readb(pmcdev->regbase + offset); +} + +static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, + u8 pf_reg, const struct pmc_bit_map **pf_map) { seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", - index, pf_map[index].name, - pf_map[index].bit_mask & pf_reg ? "Off" : "On"); + ip, pf_map[idx][index].name, + pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); } static int pmc_core_ppfear_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->pfear_sts; + const struct pmc_bit_map **maps = pmcdev->map->pfear_sts; u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; - int index, iter; + int index, iter, idx, ip = 0; iter = pmcdev->map->ppfear0_offset; @@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); - for (index = 0; map[index].name && - index < pmcdev->map->ppfear_buckets * 8; index++) - pmc_core_display_map(s, index, pf_regs[index / 8], map); + for (idx = 0; maps[idx]; idx++) { + for (index = 0; maps[idx][index].name && + index < pmcdev->map->ppfear_buckets * 8; ip++, index++) + pmc_core_display_map(s, index, idx, ip, + pf_regs[index / 8], maps); + } return 0; } @@ -561,21 +623,22 @@ out_unlock: } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user -*userbuf, size_t count, loff_t *ppos) +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) { struct pmc_dev *pmcdev = &pmc; const struct pmc_reg_map *map = pmcdev->map; u32 val, buf_size, fd; - int err = 0; + int err; buf_size = count < 64 ? count : 64; - mutex_lock(&pmcdev->lock); - if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) { - err = -EFAULT; - goto out_unlock; - } + err = kstrtou32_from_user(userbuf, buf_size, 10, &val); + if (err) + return err; + + mutex_lock(&pmcdev->lock); if (val > map->ltr_ignore_max) { err = -EINVAL; @@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev, &pmc_core_dev_state); - debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev, - &pmc_core_ppfear_fops); + if (pmcdev->map->pfear_sts) + debugfs_create_file("pch_ip_power_gating_status", 0444, dir, + pmcdev, &pmc_core_ppfear_fops); debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, &pmc_core_ltr_ignore_ops); @@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map), INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map), + INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map), + INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map), + INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); static const struct pci_device_id pmc_pci_ids[] = { - { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0}, - { 0, }, + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) }, + { } }; /* * This quirk can be used on those platforms where - * the platform BIOS enforces 24Mhx Crystal to shutdown + * the platform BIOS enforces 24Mhz crystal to shutdown * before PMC can assert SLP_S0#. */ static int quirk_xtal_ignore(const struct dmi_system_id *id) diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index fdee5772e532..f1a0792b3f91 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Intel Core SoC Power Management Controller Header File * @@ -186,6 +186,8 @@ enum ppfear_regs { #define ICL_NUM_IP_IGN_ALLOWED 20 #define ICL_PMC_LTR_WIGIG 0x1BFC +#define TGL_NUM_IP_IGN_ALLOWED 22 + struct pmc_bit_map { const char *name; u32 bit_mask; @@ -213,7 +215,7 @@ struct pmc_bit_map { * captures them to have a common implementation. */ struct pmc_reg_map { - const struct pmc_bit_map *pfear_sts; + const struct pmc_bit_map **pfear_sts; const struct pmc_bit_map *mphy_sts; const struct pmc_bit_map *pll_sts; const struct pmc_bit_map **slps0_dbg_maps; diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index 6fe829f30997..e1266f5c6359 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = { INTEL_CPU_FAM6(KABYLAKE, pmc_core_device), INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device), INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE, pmc_core_device), + INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids); diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index 5c1da2bb1435..2433bf73f1ed 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -12,23 +12,13 @@ */ #include <linux/acpi.h> -#include <linux/atomic.h> -#include <linux/bitops.h> #include <linux/delay.h> -#include <linux/device.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/notifier.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/pm.h> -#include <linux/pm_qos.h> -#include <linux/sched.h> -#include <linux/spinlock.h> -#include <linux/suspend.h> #include <asm/intel_pmc_ipc.h> @@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset); } -static inline u8 __maybe_unused ipc_data_readb(u32 offset) -{ - return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset); -} - static inline u32 ipc_data_readl(u32 offset) { return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); @@ -211,35 +196,6 @@ static inline int is_gcr_valid(u32 offset) } /** - * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: data pointer for storing the register output - * - * Reads the 32-bit PMC GCR register at given offset. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_read(u32 offset, u32 *data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - *data = readl(ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_read); - -/** * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register * @offset: offset of GCR register from GCR address base * @data: data pointer for storing the register output @@ -269,36 +225,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data) EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64); /** - * intel_pmc_gcr_write() - Write PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: register update value - * - * Writes the PMC GCR register of given offset with given - * value. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_write(u32 offset, u32 data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - writel(data, ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); - -/** * intel_pmc_gcr_update() - Update PMC GCR register bits * @offset: offset of GCR register from GCR address base * @mask: bit mask for update operation @@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); * * Return: negative value on error or 0 on success. */ -int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) +static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) { u32 new_val; int ret = 0; @@ -339,7 +265,6 @@ gcr_ipc_unlock: spin_unlock(&ipcdev.gcr_lock); return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); static int update_no_reboot_bit(void *priv, bool set) { @@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void) * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_simple_command(int cmd, int sub) +static int intel_pmc_ipc_simple_command(int cmd, int sub) { int ret; @@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub) return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); /** * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers @@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, - u32 outlen, u32 dptr, u32 sptr) +static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, + u32 outlen, u32 dptr, u32 sptr) { u32 wbuf[4] = { 0 }; int ret; @@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); /** * intel_pmc_ipc_command() - IPC command with input/output data @@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, } return (ssize_t)count; } +static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store); static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, struct device_attribute *attr, @@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, int subcmd; int ret; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; if (val) subcmd = 1; @@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, } return (ssize_t)count; } - -static DEVICE_ATTR(simplecmd, S_IWUSR, - NULL, intel_pmc_ipc_simple_cmd_store); -static DEVICE_ATTR(northpeak, S_IWUSR, - NULL, intel_pmc_ipc_northpeak_store); +static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store); static struct attribute *intel_ipc_attrs[] = { &dev_attr_northpeak.attr, @@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = { .attrs = intel_ipc_attrs, }; +static const struct attribute_group *intel_ipc_groups[] = { + &intel_ipc_group, + NULL +}; + static struct resource punit_res_array[] = { /* Punit BIOS */ { @@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev) goto err_irq; } - ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group); - if (ret) { - dev_err(&pdev->dev, "Failed to create sysfs group %d\n", - ret); - goto err_sys; - } - ipcdev.has_gcr_regs = true; return 0; -err_sys: - devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); + err_irq: platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -980,7 +898,6 @@ err_irq: static int ipc_plat_remove(struct platform_device *pdev) { - sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = { .driver = { .name = "pmc-ipc-plat", .acpi_match_table = ACPI_PTR(ipc_acpi_ids), + .dev_groups = intel_ipc_groups, }, }; diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index cdab916fbf92..3d7da5266136 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -26,11 +26,7 @@ #include <asm/intel_scu_ipc.h> /* IPC defines the following message types */ -#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */ -#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */ -#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */ -#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */ -#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */ +#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */ /* Command id associated with message IPCMSG_PCNTRL */ #define IPC_CMD_PCNTRL_W 0 /* Register write */ @@ -58,56 +54,29 @@ #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ #define IPC_IOC 0x100 /* IPC command register IOC bit */ -#define PCI_DEVICE_ID_LINCROFT 0x082a -#define PCI_DEVICE_ID_PENWELL 0x080e -#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea -#define PCI_DEVICE_ID_TANGIER 0x11a0 - -/* intel scu ipc driver data */ -struct intel_scu_ipc_pdata_t { - u32 i2c_base; - u32 i2c_len; - u8 irq_mode; -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - -/* Penwell and Cloverview */ -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 1, -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { - .i2c_base = 0xff00d000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - struct intel_scu_ipc_dev { struct device *dev; void __iomem *ipc_base; - void __iomem *i2c_base; struct completion cmd_complete; u8 irq_mode; }; static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ +#define IPC_STATUS 0x04 +#define IPC_STATUS_IRQ BIT(2) +#define IPC_STATUS_ERR BIT(1) +#define IPC_STATUS_BUSY BIT(0) + /* - * IPC Read Buffer (Read Only): - * 16 byte buffer for receiving data from SCU, if IPC command - * processing results in response data + * IPC Write/Read Buffers: + * 16 byte buffer for sending and receiving data to and from SCU. */ +#define IPC_WRITE_BUFFER 0x80 #define IPC_READ_BUFFER 0x90 -#define IPC_I2C_CNTRL_ADDR 0 -#define I2C_DATA_ADDR 0x04 +/* Timeout in jiffies */ +#define IPC_TIMEOUT (3 * HZ) static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ @@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ */ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) { - if (scu->irq_mode) { - reinit_completion(&scu->cmd_complete); - writel(cmd | IPC_IOC, scu->ipc_base); - } - writel(cmd, scu->ipc_base); + reinit_completion(&scu->cmd_complete); + writel(cmd | IPC_IOC, scu->ipc_base); } /* @@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) */ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset) { - writel(data, scu->ipc_base + 0x80 + offset); + writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset); } /* @@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 */ static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu) { - return __raw_readl(scu->ipc_base + 0x04); + return __raw_readl(scu->ipc_base + IPC_STATUS); } /* Read ipc byte data */ @@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset) /* Wait till scu status is busy */ static inline int busy_loop(struct intel_scu_ipc_dev *scu) { - u32 status = ipc_read_status(scu); - u32 loop_count = 100000; + unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT); - /* break if scu doesn't reset busy bit after huge retry */ - while ((status & BIT(0)) && --loop_count) { - udelay(1); /* scu processing time is in few u secods */ - status = ipc_read_status(scu); - } + do { + u32 status; - if (status & BIT(0)) { - dev_err(scu->dev, "IPC timed out"); - return -ETIMEDOUT; - } + status = ipc_read_status(scu); + if (!(status & IPC_STATUS_BUSY)) + return (status & IPC_STATUS_ERR) ? -EIO : 0; - if (status & BIT(1)) - return -EIO; + usleep_range(50, 100); + } while (time_before(jiffies, end)); - return 0; + dev_err(scu->dev, "IPC timed out"); + return -ETIMEDOUT; } /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ @@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu) { int status; - if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) { + if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) { dev_err(scu->dev, "IPC timed out\n"); return -ETIMEDOUT; } status = ipc_read_status(scu); - if (status & BIT(1)) + if (status & IPC_STATUS_ERR) return -EIO; return 0; @@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) } /** - * intel_scu_ipc_ioread8 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read byte + * intel_scu_ipc_ioread8 - read a word via the SCU + * @addr: Register on SCU + * @data: Return pointer for read byte * - * Read a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Read a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_ioread8(u16 addr, u8 *data) { @@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data) EXPORT_SYMBOL(intel_scu_ipc_ioread8); /** - * intel_scu_ipc_ioread16 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read word - * - * Read a register pair. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread16(u16 addr, u16 *data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread16); - -/** - * intel_scu_ipc_ioread32 - read a dword via the SCU - * @addr: register on SCU - * @data: return pointer for read dword - * - * Read four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread32(u16 addr, u32 *data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread32); - -/** - * intel_scu_ipc_iowrite8 - write a byte via the SCU - * @addr: register on SCU - * @data: byte to write + * intel_scu_ipc_iowrite8 - write a byte via the SCU + * @addr: Register on SCU + * @data: Byte to write * - * Write a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Write a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_iowrite8(u16 addr, u8 data) { @@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data) EXPORT_SYMBOL(intel_scu_ipc_iowrite8); /** - * intel_scu_ipc_iowrite16 - write a word via the SCU - * @addr: register on SCU - * @data: word to write - * - * Write two registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_readvv - read a set of registers + * @addr: Register list + * @data: Bytes to return + * @len: Length of array * - * This function may sleep. - */ -int intel_scu_ipc_iowrite16(u16 addr, u16 data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite16); - -/** - * intel_scu_ipc_iowrite32 - write a dword via the SCU - * @addr: register on SCU - * @data: dword to write + * Read registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * Write four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * The largest array length permitted by the hardware is 5 items. * - * This function may sleep. - */ -int intel_scu_ipc_iowrite32(u16 addr, u32 data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite32); - -/** - * intel_scu_ipc_readvv - read a set of registers - * @addr: register list - * @data: bytes to return - * @len: length of array - * - * Read registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * The largest array length permitted by the hardware is 5 items. - * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) { @@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_readv); /** - * intel_scu_ipc_writev - write a set of registers - * @addr: register list - * @data: bytes to write - * @len: length of array - * - * Write registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_writev - write a set of registers + * @addr: Register list + * @data: Bytes to write + * @len: Length of array * - * The largest array length permitted by the hardware is 5 items. + * Write registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * This function may sleep. + * The largest array length permitted by the hardware is 5 items. * + * This function may sleep. */ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) { @@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_writev); /** - * intel_scu_ipc_update_register - r/m/w a register - * @addr: register address - * @bits: bits to update - * @mask: mask of bits to update - * - * Read-modify-write power control unit register. The first data argument - * must be register value and second is mask value - * mask is a bitmap that indicates which bits to update. - * 0 = masked. Don't modify this bit, 1 = modify this bit. - * returns 0 on success or an error code. - * - * This function may sleep. Locking between SCU accesses is handled - * for the caller. + * intel_scu_ipc_update_register - r/m/w a register + * @addr: Register address + * @bits: Bits to update + * @mask: Mask of bits to update + * + * Read-modify-write power control unit register. The first data argument + * must be register value and second is mask value mask is a bitmap that + * indicates which bits to update. %0 = masked. Don't modify this bit, %1 = + * modify this bit. returns %0 on success or an error code. + * + * This function may sleep. Locking between SCU accesses is handled + * for the caller. */ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) { @@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) EXPORT_SYMBOL(intel_scu_ipc_update_register); /** - * intel_scu_ipc_simple_command - send a simple command - * @cmd: command - * @sub: sub type + * intel_scu_ipc_simple_command - send a simple command + * @cmd: Command + * @sub: Sub type * - * Issue a simple command to the SCU. Do not use this interface if - * you must then access data as any data values may be overwritten - * by another SCU access by the time this function returns. + * Issue a simple command to the SCU. Do not use this interface if you must + * then access data as any data values may be overwritten by another SCU + * access by the time this function returns. * - * This function may sleep. Locking for SCU accesses is handled for - * the caller. + * This function may sleep. Locking for SCU accesses is handled for the + * caller. */ int intel_scu_ipc_simple_command(int cmd, int sub) { @@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub) EXPORT_SYMBOL(intel_scu_ipc_simple_command); /** - * intel_scu_ipc_command - command with data - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in dwords - * @out: output data - * @outlein: output length in dwords - * - * Issue a command to the SCU which involves data transfers. Do the - * data copies under the lock but leave it for the caller to interpret + * intel_scu_ipc_command - command with data + * @cmd: Command + * @sub: Sub type + * @in: Input data + * @inlen: Input length in dwords + * @out: Output data + * @outlen: Output length in dwords + * + * Issue a command to the SCU which involves data transfers. Do the + * data copies under the lock but leave it for the caller to interpret. */ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, u32 *out, int outlen) @@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, } EXPORT_SYMBOL(intel_scu_ipc_command); -#define IPC_SPTR 0x08 -#define IPC_DPTR 0x0C - -/** - * intel_scu_ipc_raw_command() - IPC command with data and pointers - * @cmd: IPC command code. - * @sub: IPC command sub type. - * @in: input data of this IPC command. - * @inlen: input data length in dwords. - * @out: output data of this IPC command. - * @outlen: output data length in dwords. - * @sptr: data writing to SPTR register. - * @dptr: data writing to DPTR register. - * - * Send an IPC command to SCU with input/output data and source/dest pointers. - * - * Return: an IPC error code or 0 on success. - */ -int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen, - u32 *out, int outlen, u32 dptr, u32 sptr) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - int inbuflen = DIV_ROUND_UP(inlen, 4); - u32 inbuf[4]; - int i, err; - - /* Up to 16 bytes */ - if (inbuflen > 4) - return -EINVAL; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - - writel(dptr, scu->ipc_base + IPC_DPTR); - writel(sptr, scu->ipc_base + IPC_SPTR); - - /* - * SRAM controller doesn't support 8-bit writes, it only - * supports 32-bit writes, so we have to copy input data into - * the temporary buffer, and SCU FW will use the inlen to - * determine the actual input data length in the temporary - * buffer. - */ - memcpy(inbuf, in, inlen); - - for (i = 0; i < inbuflen; i++) - ipc_data_writel(scu, inbuf[i], 4 * i); - - ipc_command(scu, (inlen << 16) | (sub << 12) | cmd); - err = intel_scu_ipc_check_status(scu); - if (!err) { - for (i = 0; i < outlen; i++) - *out++ = ipc_data_readl(scu, 4 * i); - } - - mutex_unlock(&ipclock); - return err; -} -EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command); - -/* I2C commands */ -#define IPC_I2C_WRITE 1 /* I2C Write command */ -#define IPC_I2C_READ 2 /* I2C Read command */ - -/** - * intel_scu_ipc_i2c_cntrl - I2C read/write operations - * @addr: I2C address + command bits - * @data: data to read/write - * - * Perform an an I2C read/write operation via the SCU. All locking is - * handled for the caller. This function may sleep. - * - * Returns an error code or 0 on success. - * - * This has to be in the IPC driver for the locking. - */ -int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - u32 cmd = 0; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - cmd = (addr >> 24) & 0xFF; - if (cmd == IPC_I2C_READ) { - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - /* Write not getting updated without delay */ - usleep_range(1000, 2000); - *data = readl(scu->i2c_base + I2C_DATA_ADDR); - } else if (cmd == IPC_I2C_WRITE) { - writel(*data, scu->i2c_base + I2C_DATA_ADDR); - usleep_range(1000, 2000); - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - } else { - dev_err(scu->dev, - "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd); - - mutex_unlock(&ipclock); - return -EIO; - } - mutex_unlock(&ipclock); - return 0; -} -EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); - /* * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1 * When ioc bit is set to 1, caller api must wait for interrupt handler called @@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); static irqreturn_t ioc(int irq, void *dev_id) { struct intel_scu_ipc_dev *scu = dev_id; + int status = ipc_read_status(scu); - if (scu->irq_mode) - complete(&scu->cmd_complete); + writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS); + complete(&scu->cmd_complete); return IRQ_HANDLED; } @@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; struct intel_scu_ipc_dev *scu = &ipcdev; - struct intel_scu_ipc_pdata_t *pdata; if (scu->dev) /* We support only one SCU */ return -EBUSY; - pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data; - if (!pdata) - return -ENODEV; - - scu->irq_mode = pdata->irq_mode; - err = pcim_enable_device(pdev); if (err) return err; @@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) scu->ipc_base = pcim_iomap_table(pdev)[0]; - scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len); - if (!scu->i2c_base) - return -ENOMEM; - err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc", scu); if (err) @@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; } -#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata} - static const struct pci_device_id pci_ids[] = { - SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata), - SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata), + { PCI_VDEVICE(INTEL, 0x080e) }, + { PCI_VDEVICE(INTEL, 0x08ea) }, + { PCI_VDEVICE(INTEL, 0x11a0) }, {} }; diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c index 3de5a3c66529..0c2aa22c7a12 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { {0x7F, 0x00, 0x0B}, {0x7F, 0x10, 0x12}, {0x7F, 0x20, 0x23}, + {0x94, 0x03, 0x03}, + {0x95, 0x03, 0x03}, }; static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { @@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { {0xD0, 0x03, 0x08}, {0x7F, 0x02, 0x00}, {0x7F, 0x08, 0x00}, + {0x95, 0x03, 0x03}, }; struct isst_cmd { diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index e84d3e983e0c..8e3fb55ac1ae 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity); if (err) { pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; @@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity); if (err) { pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index df8565bad595..c4c742bb23cf 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = { static int telemetry_pltdrv_probe(struct platform_device *pdev) { - struct resource *res0 = NULL, *res1 = NULL; const struct x86_cpu_id *id; - int size, ret = -ENOMEM; + void __iomem *mem; + int ret; id = x86_match_cpu(telemetry_cpu_ids); if (!id) @@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) telm_conf = (struct telemetry_plt_config *)id->driver_data; - res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res0) { - ret = -EINVAL; - goto out; - } - size = resource_size(res0); - if (!devm_request_mem_region(&pdev->dev, res0->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } - telm_conf->pss_config.ssram_base_addr = res0->start; - telm_conf->pss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); - res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res1) { - ret = -EINVAL; - goto out; - } - size = resource_size(res1); - if (!devm_request_mem_region(&pdev->dev, res1->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } + telm_conf->pss_config.regmap = mem; - telm_conf->ioss_config.ssram_base_addr = res1->start; - telm_conf->ioss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mem)) + return PTR_ERR(mem); - telm_conf->pss_config.regmap = ioremap_nocache( - telm_conf->pss_config.ssram_base_addr, - telm_conf->pss_config.ssram_size); - if (!telm_conf->pss_config.regmap) { - ret = -ENOMEM; - goto out; - } - - telm_conf->ioss_config.regmap = ioremap_nocache( - telm_conf->ioss_config.ssram_base_addr, - telm_conf->ioss_config.ssram_size); - if (!telm_conf->ioss_config.regmap) { - ret = -ENOMEM; - goto out; - } + telm_conf->ioss_config.regmap = mem; mutex_init(&telm_conf->telem_lock); mutex_init(&telm_conf->telem_trace_lock); @@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) return 0; out: - if (res0) - release_mem_region(res0->start, resource_size(res0)); - if (res1) - release_mem_region(res1->start, resource_size(res1)); - if (telm_conf->pss_config.regmap) - iounmap(telm_conf->pss_config.regmap); - if (telm_conf->ioss_config.regmap) - iounmap(telm_conf->ioss_config.regmap); dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n"); return ret; @@ -1204,9 +1163,6 @@ out: static int telemetry_pltdrv_remove(struct platform_device *pdev) { telemetry_clear_pltdata(); - iounmap(telm_conf->pss_config.regmap); - iounmap(telm_conf->ioss_config.regmap); - return 0; } diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8fe51e43f1bc..c27548fd386a 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -35,6 +35,8 @@ #define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23 #define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24 #define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a +#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b +#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e #define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30 #define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31 #define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32 @@ -46,6 +48,8 @@ #define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45 #define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50 #define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51 #define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52 @@ -68,6 +72,7 @@ #define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1 #define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2 #define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3 +#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5 @@ -85,9 +90,13 @@ #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7 #define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8 +#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9 +#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb +#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda +#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc #define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL #define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ @@ -96,6 +105,9 @@ #define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \ MLXPLAT_CPLD_LPC_PIO_OFFSET) +#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ + MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \ + MLXPLAT_CPLD_LPC_PIO_OFFSET) /* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */ #define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04 @@ -112,17 +124,29 @@ #define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6) #define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0) +#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0) #define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4) #define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4) +#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04 +#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT) + +/* Masks for aggregation for comex carriers */ +#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1) +#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \ + MLXPLAT_CPLD_AGGR_MASK_CARRIER) +#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1 /* Default I2C parent bus number */ #define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1 /* Maximum number of possible physical buses equipped on system */ #define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16 +#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24 /* Number of channels in group */ #define MLXPLAT_CPLD_GRP_CHNL_NUM 8 @@ -130,14 +154,16 @@ /* Start channel numbers */ #define MLXPLAT_CPLD_CH1 2 #define MLXPLAT_CPLD_CH2 10 +#define MLXPLAT_CPLD_CH3 18 /* Number of LPC attached MUX platform devices */ -#define MLXPLAT_CPLD_LPC_MUX_DEVS 2 +#define MLXPLAT_CPLD_LPC_MUX_DEVS 3 /* Hotplug devices adapter numbers */ #define MLXPLAT_CPLD_NR_NONE -1 #define MLXPLAT_CPLD_PSU_DEFAULT_NR 10 #define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4 +#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3 #define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11 #define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13 @@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = { IORESOURCE_IO), }; +/* Platform i2c next generation systems data */ +static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = { + { + .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .mask = MLXPLAT_CPLD_I2C_CAP_MASK, + .bit = MLXPLAT_CPLD_I2C_CAP_BIT, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = { + { + .data = mlxplat_mlxcpld_i2c_ng_items_data, + }, +}; + /* Platform next generation systems i2c data */ static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = { + .items = mlxplat_mlxcpld_i2c_ng_items, .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX, .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET, @@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = { static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 }; /* Platform mux data */ -static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { +static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = { { .parent = 1, .base_nr = MLXPLAT_CPLD_CH1, @@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { }; +/* Platform mux configuration variables */ +static int mlxplat_max_adap_num; +static int mlxplat_mux_num; +static struct i2c_mux_reg_platform_data *mlxplat_mux_data; + +/* Platform extended mux data */ +static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = { + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH1, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH2, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH3, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2, + .reg_size = 1, + .idle_in_use = 1, + }, + +}; + /* Platform hotplug devices */ static struct i2c_board_info mlxplat_mlxcpld_psu[] = { { @@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = { }, }; +/* Platform hotplug comex carrier system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + /* Platform hotplug default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = { { @@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { }, }; +static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = { + { + .data = mlxplat_mlxcpld_comex_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_psu), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_fan), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .items = mlxplat_mlxcpld_default_items, @@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = { + .items = mlxplat_mlxcpld_comex_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK, +}; + static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = { { .label = "pwr1", @@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +/* Platform hotplug extended system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu3", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(2), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu4", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(3), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = { + { + .label = "pwr1", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr2", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr3", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(2), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, + { + .label = "pwr4", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(3), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = { + { + .data = mlxplat_mlxcpld_ext_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_ext_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_ng_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_NG_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = { + .items = mlxplat_mlxcpld_ext_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, +}; + /* Platform led default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = { { @@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = { .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data), }; +/* Platform led for Comex based 100GbE systems */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = { + { + .label = "status:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "status:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK + }, + { + .label = "psu:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "psu:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan1:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan1:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan2:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan2:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan3:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan3:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan4:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan4:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "uid:blue", + .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, +}; + +static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = { + .data = mlxplat_mlxcpld_comex_100G_led_data, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data), +}; + /* Platform register access default */ static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = { { @@ -1157,6 +1482,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = { .mode = 0200, }, { + .label = "select_iio", + .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0644, + }, + { .label = "asic_health", .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, .mask = MLXPLAT_CPLD_ASIC_MASK, @@ -1245,6 +1576,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_platform", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0444, + }, + { + .label = "reset_soc", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(5), + .mode = 0444, + }, + { .label = "reset_comex_wd", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(6), @@ -1263,6 +1606,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_sw_pwr_off", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0444, + }, + { .label = "reset_comex_thermal", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, .mask = GENMASK(7, 0) & ~BIT(3), @@ -1275,6 +1624,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_ac_pwr_fail", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0444, + }, + { .label = "psu1_on", .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(0), @@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .bit = GENMASK(7, 0), .mode = 0444, }, + { + .label = "voltreg_update_status", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET, + .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK, + .bit = 5, + .mode = 0444, + }, + { + .label = "vpd_wp", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(3), + .mode = 0644, + }, + { + .label = "pcie_asic_reset_dis", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0644, + }, + { + .label = "config1", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "config2", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "ufm_version", + .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, }; static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = { @@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET: @@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET: @@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = { { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 }, }; +static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = { + { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET, + MLXPLAT_CPLD_LOW_AGGRCX_MASK }, + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, +}; + +static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = { + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 }, +}; + struct mlxplat_mlxcpld_regmap_context { void __iomem *base; }; @@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = { .reg_write = mlxplat_mlxcpld_reg_write, }; +static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_comex_default, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + +static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_ng400, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + static struct resource mlxplat_mlxcpld_resources[] = { [0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"), }; @@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_default_channels[i]; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_default_channels[i]); @@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng; return 1; -}; +} + +static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data); + mlxplat_mux_data = mlxplat_extended_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_comex_data; + mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_led = &mlxplat_comex_100G_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex; + + return 1; +} + +static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_ext_data; + mlxplat_hotplug->deferred_nr = + mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; + mlxplat_led = &mlxplat_default_ng_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400; + + return 1; +} static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { @@ -1954,6 +2470,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { }, }, { + .callback = mlxplat_dmi_comex_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"), + }, + }, + { + .callback = mlxplat_dmi_ng400_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"), + }, + }, + { .callback = mlxplat_dmi_msn274x_matched, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), @@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) /* Scan adapters from expected id to verify it is free. */ *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i < - MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) { + mlxplat_max_adap_num; i++) { search_adap = i2c_get_adapter(i); if (search_adap) { i2c_put_adapter(search_adap); @@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) } /* Return with error if free id for adapter is not found. */ - if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) + if (i == mlxplat_max_adap_num) return -ENODEV; /* Shift adapter ids, since expected parent adapter is not free. */ *nr = i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { shift = *nr - mlxplat_mux_data[i].parent; mlxplat_mux_data[i].parent = *nr; mlxplat_mux_data[i].base_nr += shift; @@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void) if (nr < 0) goto fail_alloc; - nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr; + nr = (nr == mlxplat_max_adap_num) ? -1 : nr; if (mlxplat_i2c) mlxplat_i2c->regmap = priv->regmap; priv->pdev_i2c = platform_device_register_resndata( @@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void) goto fail_alloc; } - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { priv->pdev_mux[i] = platform_device_register_resndata( &priv->pdev_i2c->dev, "i2c-mux-reg", i, NULL, @@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void) platform_device_unregister(priv->pdev_led); platform_device_unregister(priv->pdev_hotplug); - for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--) + for (i = mlxplat_mux_num - 1; i >= 0 ; i--) platform_device_unregister(priv->pdev_mux[i]); platform_device_unregister(priv->pdev_i2c); diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index 48b112b4f0b0..9b11ef1a401f 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -2,7 +2,7 @@ /* * PC-Engines APUv2/APUv3 board platform driver - * for gpio buttons and LEDs + * for GPIO buttons and LEDs * * Copyright (C) 2018 metux IT consult * Author: Enrico Weigelt <info@metux.net> @@ -23,10 +23,10 @@ /* * NOTE: this driver only supports APUv2/3 - not APUv1, as this one - * has completely different register layouts + * has completely different register layouts. */ -/* register mappings */ +/* Register mappings */ #define APU2_GPIO_REG_LED1 AMD_FCH_GPIO_REG_GPIO57 #define APU2_GPIO_REG_LED2 AMD_FCH_GPIO_REG_GPIO58 #define APU2_GPIO_REG_LED3 AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 @@ -35,7 +35,7 @@ #define APU2_GPIO_REG_MPCIE2 AMD_FCH_GPIO_REG_GPIO59_DEVSLP0 #define APU2_GPIO_REG_MPCIE3 AMD_FCH_GPIO_REG_GPIO51 -/* order in which the gpio lines are defined in the register list */ +/* Order in which the GPIO lines are defined in the register list */ #define APU2_GPIO_LINE_LED1 0 #define APU2_GPIO_LINE_LED2 1 #define APU2_GPIO_LINE_LED3 2 @@ -44,7 +44,7 @@ #define APU2_GPIO_LINE_MPCIE2 5 #define APU2_GPIO_LINE_MPCIE3 6 -/* gpio device */ +/* GPIO device */ static int apu2_gpio_regs[] = { [APU2_GPIO_LINE_LED1] = APU2_GPIO_REG_LED1, @@ -72,7 +72,7 @@ static const struct amd_fch_gpio_pdata board_apu2 = { .gpio_names = apu2_gpio_names, }; -/* gpio leds device */ +/* GPIO LEDs device */ static const struct gpio_led apu2_leds[] = { { .name = "apu:green:1" }, @@ -95,12 +95,12 @@ static struct gpiod_lookup_table gpios_led_table = { NULL, 1, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3, NULL, 2, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_REG_SIMSWAP, + GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP, NULL, 3, GPIO_ACTIVE_LOW), } }; -/* gpio keyboard device */ +/* GPIO keyboard device */ static struct gpio_keys_button apu2_keys_buttons[] = { { @@ -129,12 +129,12 @@ static struct gpiod_lookup_table gpios_key_table = { } }; -/* board setup */ +/* Board setup */ -/* note: matching works on string prefix, so "apu2" must come before "apu" */ +/* Note: matching works on string prefix, so "apu2" must come before "apu" */ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { - /* APU2 w/ legacy bios < 4.0.8 */ + /* APU2 w/ legacy BIOS < 4.0.8 */ { .ident = "apu2", .matches = { @@ -143,7 +143,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU2 w/ legacy bios >= 4.0.8 */ + /* APU2 w/ legacy BIOS >= 4.0.8 */ { .ident = "apu2", .matches = { @@ -152,7 +152,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU2 w/ maainline bios */ + /* APU2 w/ mainline BIOS */ { .ident = "apu2", .matches = { @@ -162,7 +162,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { .driver_data = (void *)&board_apu2, }, - /* APU3 w/ legacy bios < 4.0.8 */ + /* APU3 w/ legacy BIOS < 4.0.8 */ { .ident = "apu3", .matches = { @@ -171,7 +171,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU3 w/ legacy bios >= 4.0.8 */ + /* APU3 w/ legacy BIOS >= 4.0.8 */ { .ident = "apu3", .matches = { @@ -180,7 +180,7 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, - /* APU3 w/ mainline bios */ + /* APU3 w/ mainline BIOS */ { .ident = "apu3", .matches = { @@ -189,6 +189,33 @@ static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { }, .driver_data = (void *)&board_apu2, }, + /* APU4 w/ legacy BIOS < 4.0.8 */ + { + .ident = "apu4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "APU4") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU4 w/ legacy BIOS >= 4.0.8 */ + { + .ident = "apu4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "apu4") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU4 w/ mainline BIOS */ + { + .ident = "apu4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu4") + }, + .driver_data = (void *)&board_apu2, + }, {} }; @@ -223,7 +250,7 @@ static int __init apu_board_init(void) id = dmi_first_match(apu_gpio_dmi_table); if (!id) { - pr_err("failed to detect apu board via dmi\n"); + pr_err("failed to detect APU board via DMI\n"); return -ENODEV; } @@ -262,7 +289,7 @@ module_init(apu_board_init); module_exit(apu_board_exit); MODULE_AUTHOR("Enrico Weigelt, metux IT consult <info@metux.net>"); -MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); +MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LEDs/keys driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); MODULE_ALIAS("platform:pcengines-apuv2"); diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 07d1b911e72f..3e3c66dfec2e 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -429,6 +429,14 @@ static const struct dmi_system_id critclk_systems[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"), }, }, + { + .ident = "CONNECT X300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"), + DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"), + }, + }, + { /*sentinel*/ } }; @@ -481,7 +489,7 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent) pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr); pmc->base_addr &= PMC_BASE_ADDR_MASK; - pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN); + pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN); if (!pmc->regmap) { dev_err(&pdev->dev, "error: ioremap failed\n"); return -ENOMEM; diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 9b6a93ff41ff..23e40aa2176e 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1394,7 +1394,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung) int ret = 0; int i; - samsung->f0000_segment = ioremap_nocache(0xf0000, 0xffff); + samsung->f0000_segment = ioremap(0xf0000, 0xffff); if (!samsung->f0000_segment) { if (debug || force) pr_err("Can't map the segment at 0xf0000\n"); @@ -1434,7 +1434,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung) if (debug) samsung_sabi_infos(samsung, loca, ifaceP); - samsung->sabi_iface = ioremap_nocache(ifaceP, 16); + samsung->sabi_iface = ioremap(ifaceP, 16); if (!samsung->sabi_iface) { pr_err("Can't remap %x\n", ifaceP); ret = -EINVAL; diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 72205771d03d..93177e6e5ecd 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-digma_citi_e200.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 640), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-gp-electronic-t701.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"), { } }; @@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1698), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v80-plus-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1665), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-onda-v820w-32g.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 1676), PROPERTY_ENTRY_U32("touchscreen-size-y", 1130), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-onda-v891w-v1.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1625), PROPERTY_ENTRY_U32("touchscreen-size-y", 1135), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v891w-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 880), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-pipo-w2s.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"), { } }; @@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = { .properties = pipo_w2s_props, }; +static const struct property_entry pipo_w11_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 15), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1532), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data pipo_w11_data = { + .acpi_name = "MSSL1680:00", + .properties = pipo_w11_props, +}; + static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 32), PROPERTY_ENTRY_U32("touchscreen-min-y", 16), PROPERTY_ENTRY_U32("touchscreen-size-x", 1692), PROPERTY_ENTRY_U32("touchscreen-size-y", 1146), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-pov-mobii-wintab-p800w-v20.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1794), PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p800w.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), PROPERTY_ENTRY_U32("touchscreen-size-y", 1520), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-schneider-sct101ctm.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-teclast_x98plus2.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1970), PROPERTY_ENTRY_U32("touchscreen-size-y", 1530), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c11.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = { static const struct property_entry trekstor_primebook_c13_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2624), PROPERTY_ENTRY_U32("touchscreen-size-y", 1920), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c13.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = { static const struct property_entry trekstor_primetab_t13b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2500), PROPERTY_ENTRY_U32("touchscreen-size-y", 1900), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primetab-t13b.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), @@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1900), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3670-surftab-twin-10-1-st10432-8.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 884), PROPERTY_ENTRY_U32("touchscreen-size-y", 632), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-surftab-wintron70-st70416-6.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -910,6 +909,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Pipo W11 */ + .driver_data = (void *)&pipo_w11_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PIPO"), + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + /* Above matches are too generic, add bios-ver match */ + DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"), + }, + }, + { /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */ .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { @@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), - DMI_MATCH(DMI_PRODUCT_NAME, - "SurfTab wintron 7.0 ST70416-6"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"), /* Exact match, different versions need different fw */ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"), }, @@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client) } static int ts_dmi_notifier_call(struct notifier_block *nb, - unsigned long action, void *data) + unsigned long action, void *data) { struct device *dev = data; struct i2c_client *client; diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c index 179b737280e1..c43d8ad02529 100644 --- a/drivers/pnp/isapnp/core.c +++ b/drivers/pnp/isapnp/core.c @@ -747,34 +747,12 @@ __skip: } /* - * Compute ISA PnP checksum for first eight bytes. - */ -static unsigned char __init isapnp_checksum(unsigned char *data) -{ - int i, j; - unsigned char checksum = 0x6a, bit, b; - - for (i = 0; i < 8; i++) { - b = data[i]; - for (j = 0; j < 8; j++) { - bit = 0; - if (b & (1 << j)) - bit = 1; - checksum = - ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7) - | (checksum >> 1); - } - } - return checksum; -} - -/* * Build device list for all present ISA PnP devices. */ static int __init isapnp_build_device_list(void) { int csn; - unsigned char header[9], checksum; + unsigned char header[9]; struct pnp_card *card; u32 eisa_id; char id[8]; @@ -784,7 +762,6 @@ static int __init isapnp_build_device_list(void) for (csn = 1; csn <= isapnp_csn_count; csn++) { isapnp_wake(csn); isapnp_peek(header, 9); - checksum = isapnp_checksum(header); eisa_id = header[0] | header[1] << 8 | header[2] << 16 | header[3] << 24; pnp_eisa_id_to_string(eisa_id, id); diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index 089b6244b716..b8fe166cd0d9 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -12,6 +12,22 @@ menuconfig POWER_AVS Say Y here to enable Adaptive Voltage Scaling class support. +config QCOM_CPR + tristate "QCOM Core Power Reduction (CPR) support" + depends on POWER_AVS + select PM_OPP + select REGMAP + help + Say Y here to enable support for the CPR hardware found on Qualcomm + SoCs like QCS404. + + This driver populates CPU OPPs tables and makes adjustments to the + tables based on feedback from the CPR hardware. If you want to do + CPUfrequency scaling say Y here. + + To compile this driver as a module, choose M here: the module will + be called qcom-cpr + config ROCKCHIP_IODOMAIN tristate "Rockchip IO domain support" depends on POWER_AVS && ARCH_ROCKCHIP && OF diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile index a1b8cd453f19..9007d05853e2 100644 --- a/drivers/power/avs/Makefile +++ b/drivers/power/avs/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o +obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c new file mode 100644 index 000000000000..9192fb747653 --- /dev/null +++ b/drivers/power/avs/qcom-cpr.c @@ -0,0 +1,1793 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, Linaro Limited + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/debugfs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/interrupt.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> +#include <linux/nvmem-consumer.h> + +/* Register Offsets for RB-CPR and Bit Definitions */ + +/* RBCPR Version Register */ +#define REG_RBCPR_VERSION 0 +#define RBCPR_VER_2 0x02 +#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0) + +/* RBCPR Gate Count and Target Registers */ +#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n)) + +#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0 +#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0) +#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12 +#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0) + +/* RBCPR Timer Control */ +#define REG_RBCPR_TIMER_INTERVAL 0x44 +#define REG_RBIF_TIMER_ADJUST 0x4c + +#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0 +#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4 +#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0) +#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8 + +/* RBCPR Config Register */ +#define REG_RBIF_LIMIT 0x48 +#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0) +#define RBIF_LIMIT_CEILING_SHIFT 6 +#define RBIF_LIMIT_FLOOR_BITS 6 +#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0) + +#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK +#define RBIF_LIMIT_FLOOR_DEFAULT 0 + +#define REG_RBIF_SW_VLEVEL 0x94 +#define RBIF_SW_VLEVEL_DEFAULT 0x20 + +#define REG_RBCPR_STEP_QUOT 0x80 +#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8 + +/* RBCPR Control Register */ +#define REG_RBCPR_CTL 0x90 + +#define RBCPR_CTL_LOOP_EN BIT(0) +#define RBCPR_CTL_TIMER_EN BIT(3) +#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5) +#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6) +#define RBCPR_CTL_COUNT_MODE BIT(10) +#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24 +#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28 + +/* RBCPR Ack/Nack Response */ +#define REG_RBIF_CONT_ACK_CMD 0x98 +#define REG_RBIF_CONT_NACK_CMD 0x9c + +/* RBCPR Result status Register */ +#define REG_RBCPR_RESULT_0 0xa0 + +#define RBCPR_RESULT0_BUSY_SHIFT 19 +#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT) +#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18 +#define RBCPR_RESULT0_ERROR_SHIFT 6 +#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0) +#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2 +#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0) +#define RBCPR_RESULT0_STEP_UP_SHIFT 1 + +/* RBCPR Interrupt Control Register */ +#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n)) +#define REG_RBIF_IRQ_CLEAR 0x110 +#define REG_RBIF_IRQ_STATUS 0x114 + +#define CPR_INT_DONE BIT(0) +#define CPR_INT_MIN BIT(1) +#define CPR_INT_DOWN BIT(2) +#define CPR_INT_MID BIT(3) +#define CPR_INT_UP BIT(4) +#define CPR_INT_MAX BIT(5) +#define CPR_INT_CLAMP BIT(6) +#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \ + CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP) +#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN) + +#define CPR_NUM_RING_OSC 8 + +/* CPR eFuse parameters */ +#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0) + +#define CPR_FUSE_MIN_QUOT_DIFF 50 + +#define FUSE_REVISION_UNKNOWN (-1) + +enum voltage_change_dir { + NO_CHANGE, + DOWN, + UP, +}; + +struct cpr_fuse { + char *ring_osc; + char *init_voltage; + char *quotient; + char *quotient_offset; +}; + +struct fuse_corner_data { + int ref_uV; + int max_uV; + int min_uV; + int max_volt_scale; + int max_quot_scale; + /* fuse quot */ + int quot_offset; + int quot_scale; + int quot_adjust; + /* fuse quot_offset */ + int quot_offset_scale; + int quot_offset_adjust; +}; + +struct cpr_fuses { + int init_voltage_step; + int init_voltage_width; + struct fuse_corner_data *fuse_corner_data; +}; + +struct corner_data { + unsigned int fuse_corner; + unsigned long freq; +}; + +struct cpr_desc { + unsigned int num_fuse_corners; + int min_diff_quot; + int *step_quot; + + unsigned int timer_delay_us; + unsigned int timer_cons_up; + unsigned int timer_cons_down; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int idle_clocks; + unsigned int gcnt_us; + unsigned int vdd_apc_step_up_limit; + unsigned int vdd_apc_step_down_limit; + unsigned int clamp_timer_interval; + + struct cpr_fuses cpr_fuses; + bool reduce_to_fuse_uV; + bool reduce_to_corner_uV; +}; + +struct acc_desc { + unsigned int enable_reg; + u32 enable_mask; + + struct reg_sequence *config; + struct reg_sequence *settings; + int num_regs_per_fuse; +}; + +struct cpr_acc_desc { + const struct cpr_desc *cpr_desc; + const struct acc_desc *acc_desc; +}; + +struct fuse_corner { + int min_uV; + int max_uV; + int uV; + int quot; + int step_quot; + const struct reg_sequence *accs; + int num_accs; + unsigned long max_freq; + u8 ring_osc_idx; +}; + +struct corner { + int min_uV; + int max_uV; + int uV; + int last_uV; + int quot_adjust; + u32 save_ctl; + u32 save_irq; + unsigned long freq; + struct fuse_corner *fuse_corner; +}; + +struct cpr_drv { + unsigned int num_corners; + unsigned int ref_clk_khz; + + struct generic_pm_domain pd; + struct device *dev; + struct device *attached_cpu_dev; + struct mutex lock; + void __iomem *base; + struct corner *corner; + struct regulator *vdd_apc; + struct clk *cpu_clk; + struct regmap *tcsr; + bool loop_disabled; + u32 gcnt; + unsigned long flags; + + struct fuse_corner *fuse_corners; + struct corner *corners; + + const struct cpr_desc *desc; + const struct acc_desc *acc_desc; + const struct cpr_fuse *cpr_fuses; + + struct dentry *debugfs; +}; + +static bool cpr_is_allowed(struct cpr_drv *drv) +{ + return !drv->loop_disabled; +} + +static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value) +{ + writel_relaxed(value, drv->base + offset); +} + +static u32 cpr_read(struct cpr_drv *drv, u32 offset) +{ + return readl_relaxed(drv->base + offset); +} + +static void +cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(drv->base + offset); + val &= ~mask; + val |= value & mask; + writel_relaxed(val, drv->base + offset); +} + +static void cpr_irq_clr(struct cpr_drv *drv) +{ + cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL); +} + +static void cpr_irq_clr_nack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); +} + +static void cpr_irq_clr_ack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); +} + +static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits) +{ + cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits); +} + +static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value) +{ + cpr_masked_write(drv, REG_RBCPR_CTL, mask, value); +} + +static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner) +{ + u32 val, mask; + const struct cpr_desc *desc = drv->desc; + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK; + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val); + cpr_masked_write(drv, REG_RBCPR_CTL, + RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, + corner->save_ctl); + cpr_irq_set(drv, corner->save_irq); + + if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV) + val = RBCPR_CTL_LOOP_EN; + else + val = 0; + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val); +} + +static void cpr_ctl_disable(struct cpr_drv *drv) +{ + cpr_irq_set(drv, 0); + cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0); + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, + RBIF_TIMER_ADJ_CONS_UP_MASK | + RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0); + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0); +} + +static bool cpr_ctl_is_enabled(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_CTL); + return reg_val & RBCPR_CTL_LOOP_EN; +} + +static bool cpr_ctl_is_busy(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_RESULT_0); + return reg_val & RBCPR_RESULT0_BUSY_MASK; +} + +static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner) +{ + corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL); + corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0)); +} + +static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner) +{ + u32 gcnt, ctl, irq, ro_sel, step_quot; + struct fuse_corner *fuse = corner->fuse_corner; + const struct cpr_desc *desc = drv->desc; + int i; + + ro_sel = fuse->ring_osc_idx; + gcnt = drv->gcnt; + gcnt |= fuse->quot - corner->quot_adjust; + + /* Program the step quotient and idle clocks */ + step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT; + step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK; + cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot); + + /* Clear the target quotient value and gate count of all ROs */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt); + ctl = corner->save_ctl; + cpr_write(drv, REG_RBCPR_CTL, ctl); + irq = corner->save_irq; + cpr_irq_set(drv, irq); + dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt, + ctl, irq); +} + +static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f, + struct fuse_corner *end) +{ + if (f == end) + return; + + if (f < end) { + for (f += 1; f <= end; f++) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } else { + for (f -= 1; f >= end; f--) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } +} + +static int cpr_pre_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == DOWN) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_post_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == UP) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner, + int new_uV, enum voltage_change_dir dir) +{ + int ret; + struct fuse_corner *fuse_corner = corner->fuse_corner; + + ret = cpr_pre_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV); + if (ret) { + dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n", + new_uV); + return ret; + } + + ret = cpr_post_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + return 0; +} + +static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv) +{ + return drv->corner ? drv->corner - drv->corners + 1 : 0; +} + +static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) +{ + u32 val, error_steps, reg_mask; + int last_uV, new_uV, step_uV, ret; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + if (dir != UP && dir != DOWN) + return 0; + + step_uV = regulator_get_linear_step(drv->vdd_apc); + if (!step_uV) + return -EINVAL; + + corner = drv->corner; + + val = cpr_read(drv, REG_RBCPR_RESULT_0); + + error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT; + error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK; + last_uV = corner->last_uV; + + if (dir == UP) { + if (desc->clamp_timer_interval && + error_steps < desc->up_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->up_threshold, + desc->vdd_apc_step_up_limit); + } + + if (last_uV >= corner->max_uV) { + cpr_irq_clr_nack(drv); + + /* Maximize the UP threshold */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = reg_mask; + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable UP interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_up_limit) + error_steps = desc->vdd_apc_step_up_limit; + + /* Calculate new voltage */ + new_uV = last_uV + error_steps * step_uV; + new_uV = min(new_uV, corner->max_uV); + + dev_dbg(drv->dev, + "UP: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } else if (dir == DOWN) { + if (desc->clamp_timer_interval && + error_steps < desc->down_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->down_threshold, + desc->vdd_apc_step_down_limit); + } + + if (last_uV <= corner->min_uV) { + cpr_irq_clr_nack(drv); + + /* Enable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable DOWN interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_down_limit) + error_steps = desc->vdd_apc_step_down_limit; + + /* Calculate new voltage */ + new_uV = last_uV - error_steps * step_uV; + new_uV = max(new_uV, corner->min_uV); + + dev_dbg(drv->dev, + "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) { + cpr_irq_clr_nack(drv); + return ret; + } + drv->corner->last_uV = new_uV; + + if (dir == UP) { + /* Disable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = 0; + } else if (dir == DOWN) { + /* Restore default threshold for UP */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = desc->up_threshold; + val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + } + + cpr_ctl_modify(drv, reg_mask, val); + + /* Re-enable default interrupts */ + cpr_irq_set(drv, CPR_INT_DEFAULT); + + /* Ack */ + cpr_irq_clr_ack(drv); + + return 0; +} + +static irqreturn_t cpr_irq_handler(int irq, void *dev) +{ + struct cpr_drv *drv = dev; + const struct cpr_desc *desc = drv->desc; + irqreturn_t ret = IRQ_HANDLED; + u32 val; + + mutex_lock(&drv->lock); + + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS) + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + + dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val); + + if (!cpr_ctl_is_enabled(drv)) { + dev_dbg(drv->dev, "CPR is disabled\n"); + ret = IRQ_NONE; + } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) { + dev_dbg(drv->dev, "CPR measurement is not ready\n"); + } else if (!cpr_is_allowed(drv)) { + val = cpr_read(drv, REG_RBCPR_CTL); + dev_err_ratelimited(drv->dev, + "Interrupt broken? RBCPR_CTL = %#02x\n", + val); + ret = IRQ_NONE; + } else { + /* + * Following sequence of handling is as per each IRQ's + * priority + */ + if (val & CPR_INT_UP) { + cpr_scale(drv, UP); + } else if (val & CPR_INT_DOWN) { + cpr_scale(drv, DOWN); + } else if (val & CPR_INT_MIN) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MAX) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MID) { + /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */ + dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n"); + } else { + dev_dbg(drv->dev, + "IRQ occurred for unknown flag (%#08x)\n", val); + } + + /* Save register values for the corner */ + cpr_corner_save(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_enable(struct cpr_drv *drv) +{ + int ret; + + ret = regulator_enable(drv->vdd_apc); + if (ret) + return ret; + + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv) && drv->corner) { + cpr_irq_clr(drv); + cpr_corner_restore(drv, drv->corner); + cpr_ctl_enable(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return 0; +} + +static int cpr_disable(struct cpr_drv *drv) +{ + int ret; + + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_clr(drv); + } + + mutex_unlock(&drv->lock); + + ret = regulator_disable(drv->vdd_apc); + if (ret) + return ret; + + return 0; +} + +static int cpr_config(struct cpr_drv *drv) +{ + int i; + u32 val, gcnt; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + /* Disable interrupt and CPR */ + cpr_write(drv, REG_RBIF_IRQ_EN(0), 0); + cpr_write(drv, REG_RBCPR_CTL, 0); + + /* Program the default HW ceiling, floor and vlevel */ + val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK) + << RBIF_LIMIT_CEILING_SHIFT; + val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK; + cpr_write(drv, REG_RBIF_LIMIT, val); + cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT); + + /* + * Clear the target quotient value and gate count of all + * ring oscillators + */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + /* Init and save gcnt */ + gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000; + gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK; + gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT; + drv->gcnt = gcnt; + + /* Program the delay count for the timer */ + val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000; + cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val); + dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val, + desc->timer_delay_us); + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT; + cpr_write(drv, REG_RBIF_TIMER_ADJUST, val); + + /* Program the control register */ + val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT; + val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT; + val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE; + val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN; + cpr_write(drv, REG_RBCPR_CTL, val); + + for (i = 0; i < drv->num_corners; i++) { + corner = &drv->corners[i]; + corner->save_ctl = val; + corner->save_irq = CPR_INT_DEFAULT; + } + + cpr_irq_set(drv, CPR_INT_DEFAULT); + + val = cpr_read(drv, REG_RBCPR_VERSION); + if (val <= RBCPR_VER_2) + drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS; + + return 0; +} + +static int cpr_set_performance_state(struct generic_pm_domain *domain, + unsigned int state) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + struct corner *corner, *end; + enum voltage_change_dir dir; + int ret = 0, new_uV; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n", + __func__, state, cpr_get_cur_perf_state(drv)); + + /* + * Determine new corner we're going to. + * Remove one since lowest performance state is 1. + */ + corner = drv->corners + state - 1; + end = &drv->corners[drv->num_corners - 1]; + if (corner > end || corner < drv->corners) { + ret = -EINVAL; + goto unlock; + } + + /* Determine direction */ + if (drv->corner > corner) + dir = DOWN; + else if (drv->corner < corner) + dir = UP; + else + dir = NO_CHANGE; + + if (cpr_is_allowed(drv)) + new_uV = corner->last_uV; + else + new_uV = corner->uV; + + if (cpr_is_allowed(drv)) + cpr_ctl_disable(drv); + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) + goto unlock; + + if (cpr_is_allowed(drv)) { + cpr_irq_clr(drv); + if (drv->corner != corner) + cpr_corner_restore(drv, corner); + cpr_ctl_enable(drv, corner); + } + + drv->corner = corner; + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data) +{ + struct nvmem_cell *cell; + ssize_t len; + char *ret; + int i; + + *data = 0; + + cell = nvmem_cell_get(dev, cname); + if (IS_ERR(cell)) { + if (PTR_ERR(cell) != -EPROBE_DEFER) + dev_err(dev, "undefined cell %s\n", cname); + return PTR_ERR(cell); + } + + ret = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + if (IS_ERR(ret)) { + dev_err(dev, "can't read cell %s\n", cname); + return PTR_ERR(ret); + } + + for (i = 0; i < len; i++) + *data |= ret[i] << (8 * i); + + kfree(ret); + dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len); + + return 0; +} + +static int +cpr_populate_ring_osc_idx(struct cpr_drv *drv) +{ + struct fuse_corner *fuse = drv->fuse_corners; + struct fuse_corner *end = fuse + drv->desc->num_fuse_corners; + const struct cpr_fuse *fuses = drv->cpr_fuses; + u32 data; + int ret; + + for (; fuse < end; fuse++, fuses++) { + ret = cpr_read_efuse(drv->dev, fuses->ring_osc, + &data); + if (ret) + return ret; + fuse->ring_osc_idx = data; + } + + return 0; +} + +static int cpr_read_fuse_uV(const struct cpr_desc *desc, + const struct fuse_corner_data *fdata, + const char *init_v_efuse, + int step_volt, + struct cpr_drv *drv) +{ + int step_size_uV, steps, uV; + u32 bits = 0; + int ret; + + ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits); + if (ret) + return ret; + + steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1); + /* Not two's complement.. instead highest bit is sign bit */ + if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1)) + steps = -steps; + + step_size_uV = desc->cpr_fuses.init_voltage_step; + + uV = fdata->ref_uV + steps * step_size_uV; + return DIV_ROUND_UP(uV, step_volt) * step_volt; +} + +static int cpr_fuse_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + const struct acc_desc *acc_desc = drv->acc_desc; + int i; + unsigned int step_volt; + struct fuse_corner_data *fdata; + struct fuse_corner *fuse, *end; + int uV; + const struct reg_sequence *accs; + int ret; + + accs = acc_desc->settings; + + step_volt = regulator_get_linear_step(drv->vdd_apc); + if (!step_volt) + return -EINVAL; + + /* Populate fuse_corner members */ + fuse = drv->fuse_corners; + end = &fuse[desc->num_fuse_corners - 1]; + fdata = desc->cpr_fuses.fuse_corner_data; + + for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) { + /* + * Update SoC voltages: platforms might choose a different + * regulators than the one used to characterize the algorithms + * (ie, init_voltage_step). + */ + fdata->min_uV = roundup(fdata->min_uV, step_volt); + fdata->max_uV = roundup(fdata->max_uV, step_volt); + + /* Populate uV */ + uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage, + step_volt, drv); + if (uV < 0) + return uV; + + fuse->min_uV = fdata->min_uV; + fuse->max_uV = fdata->max_uV; + fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV); + + if (fuse == end) { + /* + * Allow the highest fuse corner's PVS voltage to + * define the ceiling voltage for that corner in order + * to support SoC's in which variable ceiling values + * are required. + */ + end->max_uV = max(end->max_uV, end->uV); + } + + /* Populate target quotient by scaling */ + ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot); + if (ret) + return ret; + + fuse->quot *= fdata->quot_scale; + fuse->quot += fdata->quot_offset; + fuse->quot += fdata->quot_adjust; + fuse->step_quot = desc->step_quot[fuse->ring_osc_idx]; + + /* Populate acc settings */ + fuse->accs = accs; + fuse->num_accs = acc_desc->num_regs_per_fuse; + accs += acc_desc->num_regs_per_fuse; + } + + /* + * Restrict all fuse corner PVS voltages based upon per corner + * ceiling and floor voltages. + */ + for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) { + if (fuse->uV > fuse->max_uV) + fuse->uV = fuse->max_uV; + else if (fuse->uV < fuse->min_uV) + fuse->uV = fuse->min_uV; + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->min_uV, + fuse->min_uV); + if (!ret) { + dev_err(drv->dev, + "min uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->min_uV, i); + return -EINVAL; + } + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->max_uV, + fuse->max_uV); + if (!ret) { + dev_err(drv->dev, + "max uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->max_uV, i); + return -EINVAL; + } + + dev_dbg(drv->dev, + "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n", + i, fuse->min_uV, fuse->uV, fuse->max_uV, + fuse->ring_osc_idx, fuse->quot, fuse->step_quot); + } + + return 0; +} + +static int cpr_calculate_scaling(const char *quot_offset, + struct cpr_drv *drv, + const struct fuse_corner_data *fdata, + const struct corner *corner) +{ + u32 quot_diff = 0; + unsigned long freq_diff; + int scaling; + const struct fuse_corner *fuse, *prev_fuse; + int ret; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + if (quot_offset) { + ret = cpr_read_efuse(drv->dev, quot_offset, "_diff); + if (ret) + return ret; + + quot_diff *= fdata->quot_offset_scale; + quot_diff += fdata->quot_offset_adjust; + } else { + quot_diff = fuse->quot - prev_fuse->quot; + } + + freq_diff = fuse->max_freq - prev_fuse->max_freq; + freq_diff /= 1000000; /* Convert to MHz */ + scaling = 1000 * quot_diff / freq_diff; + return min(scaling, fdata->max_quot_scale); +} + +static int cpr_interpolate(const struct corner *corner, int step_volt, + const struct fuse_corner_data *fdata) +{ + unsigned long f_high, f_low, f_diff; + int uV_high, uV_low, uV; + u64 temp, temp_limit; + const struct fuse_corner *fuse, *prev_fuse; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + f_high = fuse->max_freq; + f_low = prev_fuse->max_freq; + uV_high = fuse->uV; + uV_low = prev_fuse->uV; + f_diff = fuse->max_freq - corner->freq; + + /* + * Don't interpolate in the wrong direction. This could happen + * if the adjusted fuse voltage overlaps with the previous fuse's + * adjusted voltage. + */ + if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq) + return corner->uV; + + temp = f_diff * (uV_high - uV_low); + do_div(temp, f_high - f_low); + + /* + * max_volt_scale has units of uV/MHz while freq values + * have units of Hz. Divide by 1000000 to convert to. + */ + temp_limit = f_diff * fdata->max_volt_scale; + do_div(temp_limit, 1000000); + + uV = uV_high - min(temp, temp_limit); + return roundup(uV, step_volt); +} + +static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp) +{ + struct device_node *np; + unsigned int fuse_corner = 0; + + np = dev_pm_opp_get_of_node(opp); + if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner)) + pr_err("%s: missing 'qcom,opp-fuse-level' property\n", + __func__); + + of_node_put(np); + + return fuse_corner; +} + +static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, + struct device *cpu_dev) +{ + u64 rate = 0; + struct device_node *ref_np; + struct device_node *desc_np; + struct device_node *child_np = NULL; + struct device_node *child_req_np = NULL; + + desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!desc_np) + return 0; + + ref_np = dev_pm_opp_get_of_node(ref); + if (!ref_np) + goto out_ref; + + do { + of_node_put(child_req_np); + child_np = of_get_next_available_child(desc_np, child_np); + child_req_np = of_parse_phandle(child_np, "required-opps", 0); + } while (child_np && child_req_np != ref_np); + + if (child_np && child_req_np == ref_np) + of_property_read_u64(child_np, "opp-hz", &rate); + + of_node_put(child_req_np); + of_node_put(child_np); + of_node_put(ref_np); +out_ref: + of_node_put(desc_np); + + return (unsigned long) rate; +} + +static int cpr_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + int i, level, scaling = 0; + unsigned int fnum, fc; + const char *quot_offset; + struct fuse_corner *fuse, *prev_fuse; + struct corner *corner, *end; + struct corner_data *cdata; + const struct fuse_corner_data *fdata; + bool apply_scaling; + unsigned long freq_diff, freq_diff_mhz; + unsigned long freq; + int step_volt = regulator_get_linear_step(drv->vdd_apc); + struct dev_pm_opp *opp; + + if (!step_volt) + return -EINVAL; + + corner = drv->corners; + end = &corner[drv->num_corners - 1]; + + cdata = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(struct corner_data), + GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + /* + * Store maximum frequency for each fuse corner based on the frequency + * plan + */ + for (level = 1; level <= drv->num_corners; level++) { + opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level); + if (IS_ERR(opp)) + return -EINVAL; + fc = cpr_get_fuse_corner(opp); + if (!fc) { + dev_pm_opp_put(opp); + return -EINVAL; + } + fnum = fc - 1; + freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev); + if (!freq) { + dev_pm_opp_put(opp); + return -EINVAL; + } + cdata[level - 1].fuse_corner = fnum; + cdata[level - 1].freq = freq; + + fuse = &drv->fuse_corners[fnum]; + dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n", + freq, dev_pm_opp_get_level(opp) - 1, fnum); + if (freq > fuse->max_freq) + fuse->max_freq = freq; + dev_pm_opp_put(opp); + } + + /* + * Get the quotient adjustment scaling factor, according to: + * + * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1)) + * / (freq(corner_N) - freq(corner_N-1)), max_factor) + * + * QUOT(corner_N): quotient read from fuse for fuse corner N + * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1) + * freq(corner_N): max frequency in MHz supported by fuse corner N + * freq(corner_N-1): max frequency in MHz supported by fuse corner + * (N - 1) + * + * Then walk through the corners mapped to each fuse corner + * and calculate the quotient adjustment for each one using the + * following formula: + * + * quot_adjust = (freq_max - freq_corner) * scaling / 1000 + * + * freq_max: max frequency in MHz supported by the fuse corner + * freq_corner: frequency in MHz corresponding to the corner + * scaling: calculated from above equation + * + * + * + + + * | v | + * q | f c o | f c + * u | c l | c + * o | f t | f + * t | c a | c + * | c f g | c f + * | e | + * +--------------- +---------------- + * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 + * corner corner + * + * c = corner + * f = fuse corner + * + */ + for (apply_scaling = false, i = 0; corner <= end; corner++, i++) { + fnum = cdata[i].fuse_corner; + fdata = &desc->cpr_fuses.fuse_corner_data[fnum]; + quot_offset = fuses[fnum].quotient_offset; + fuse = &drv->fuse_corners[fnum]; + if (fnum) + prev_fuse = &drv->fuse_corners[fnum - 1]; + else + prev_fuse = NULL; + + corner->fuse_corner = fuse; + corner->freq = cdata[i].freq; + corner->uV = fuse->uV; + + if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) { + scaling = cpr_calculate_scaling(quot_offset, drv, + fdata, corner); + if (scaling < 0) + return scaling; + + apply_scaling = true; + } else if (corner->freq == fuse->max_freq) { + /* This is a fuse corner; don't scale anything */ + apply_scaling = false; + } + + if (apply_scaling) { + freq_diff = fuse->max_freq - corner->freq; + freq_diff_mhz = freq_diff / 1000000; + corner->quot_adjust = scaling * freq_diff_mhz / 1000; + + corner->uV = cpr_interpolate(corner, step_volt, fdata); + } + + corner->max_uV = fuse->max_uV; + corner->min_uV = fuse->min_uV; + corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV); + corner->last_uV = corner->uV; + + /* Reduce the ceiling voltage if needed */ + if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV) + corner->max_uV = corner->uV; + else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV) + corner->max_uV = max(corner->min_uV, fuse->uV); + + dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i, + corner->min_uV, corner->uV, corner->max_uV, + fuse->quot - corner->quot_adjust); + } + + return 0; +} + +static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct cpr_fuse *fuses; + int i; + + fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners, + sizeof(struct cpr_fuse), + GFP_KERNEL); + if (!fuses) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < desc->num_fuse_corners; i++) { + char tbuf[32]; + + snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1); + fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].ring_osc) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1); + fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].init_voltage) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient%d", i + 1); + fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].quotient) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1); + fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].quotient_offset) + return ERR_PTR(-ENOMEM); + } + + return fuses; +} + +static void cpr_set_loop_allowed(struct cpr_drv *drv) +{ + drv->loop_disabled = false; +} + +static int cpr_init_parameters(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct clk *clk; + + clk = clk_get(drv->dev, "ref"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + drv->ref_clk_khz = clk_get_rate(clk) / 1000; + clk_put(clk); + + if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK || + desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK || + desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK || + desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK || + desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK || + desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK) + return -EINVAL; + + dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n", + desc->up_threshold, desc->down_threshold); + + return 0; +} + +static int cpr_find_initial_corner(struct cpr_drv *drv) +{ + unsigned long rate; + const struct corner *end; + struct corner *iter; + unsigned int i = 0; + + if (!drv->cpu_clk) { + dev_err(drv->dev, "cannot get rate from NULL clk\n"); + return -EINVAL; + } + + end = &drv->corners[drv->num_corners - 1]; + rate = clk_get_rate(drv->cpu_clk); + + /* + * Some bootloaders set a CPU clock frequency that is not defined + * in the OPP table. When running at an unlisted frequency, + * cpufreq_online() will change to the OPP which has the lowest + * frequency, at or above the unlisted frequency. + * Since cpufreq_online() always "rounds up" in the case of an + * unlisted frequency, this function always "rounds down" in case + * of an unlisted frequency. That way, when cpufreq_online() + * triggers the first ever call to cpr_set_performance_state(), + * it will correctly determine the direction as UP. + */ + for (iter = drv->corners; iter <= end; iter++) { + if (iter->freq > rate) + break; + i++; + if (iter->freq == rate) { + drv->corner = iter; + break; + } + if (iter->freq < rate) + drv->corner = iter; + } + + if (!drv->corner) { + dev_err(drv->dev, "boot up corner not found\n"); + return -EINVAL; + } + + dev_dbg(drv->dev, "boot up perf state: %u\n", i); + + return 0; +} + +static const struct cpr_desc qcs404_cpr_desc = { + .num_fuse_corners = 3, + .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF, + .step_quot = (int []){ 25, 25, 25, }, + .timer_delay_us = 5000, + .timer_cons_up = 0, + .timer_cons_down = 2, + .up_threshold = 1, + .down_threshold = 3, + .idle_clocks = 15, + .gcnt_us = 1, + .vdd_apc_step_up_limit = 1, + .vdd_apc_step_down_limit = 1, + .cpr_fuses = { + .init_voltage_step = 8000, + .init_voltage_width = 6, + .fuse_corner_data = (struct fuse_corner_data[]){ + /* fuse corner 0 */ + { + .ref_uV = 1224000, + .max_uV = 1224000, + .min_uV = 1048000, + .max_volt_scale = 0, + .max_quot_scale = 0, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 1 */ + { + .ref_uV = 1288000, + .max_uV = 1288000, + .min_uV = 1048000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = -20, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 2 */ + { + .ref_uV = 1352000, + .max_uV = 1384000, + .min_uV = 1088000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + }, + }, +}; + +static const struct acc_desc qcs404_acc_desc = { + .settings = (struct reg_sequence[]){ + { 0xb120, 0x1041040 }, + { 0xb124, 0x41 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + }, + .config = (struct reg_sequence[]){ + { 0xb138, 0xff }, + { 0xb130, 0x5555 }, + }, + .num_regs_per_fuse = 2, +}; + +static const struct cpr_acc_desc qcs404_cpr_acc_desc = { + .cpr_desc = &qcs404_cpr_desc, + .acc_desc = &qcs404_acc_desc, +}; + +static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int cpr_power_off(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_disable(drv); +} + +static int cpr_power_on(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_enable(drv); +} + +static int cpr_pd_attach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + const struct acc_desc *acc_desc = drv->acc_desc; + int ret = 0; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev)); + + /* + * This driver only supports scaling voltage for a CPU cluster + * where all CPUs in the cluster share a single regulator. + * Therefore, save the struct device pointer only for the first + * CPU device that gets attached. There is no need to do any + * additional initialization when further CPUs get attached. + */ + if (drv->attached_cpu_dev) + goto unlock; + + /* + * cpr_scale_voltage() requires the direction (if we are changing + * to a higher or lower OPP). The first time + * cpr_set_performance_state() is called, there is no previous + * performance state defined. Therefore, we call + * cpr_find_initial_corner() that gets the CPU clock frequency + * set by the bootloader, so that we can determine the direction + * the first time cpr_set_performance_state() is called. + */ + drv->cpu_clk = devm_clk_get(dev, NULL); + if (IS_ERR(drv->cpu_clk)) { + ret = PTR_ERR(drv->cpu_clk); + if (ret != -EPROBE_DEFER) + dev_err(drv->dev, "could not get cpu clk: %d\n", ret); + goto unlock; + } + drv->attached_cpu_dev = dev; + + dev_dbg(drv->dev, "using cpu clk from: %s\n", + dev_name(drv->attached_cpu_dev)); + + /* + * Everything related to (virtual) corners has to be initialized + * here, when attaching to the power domain, since we need to know + * the maximum frequency for each fuse corner, and this is only + * available after the cpufreq driver has attached to us. + * The reason for this is that we need to know the highest + * frequency associated with each fuse corner. + */ + ret = dev_pm_opp_get_opp_count(&drv->pd.dev); + if (ret < 0) { + dev_err(drv->dev, "could not get OPP count\n"); + goto unlock; + } + drv->num_corners = ret; + + if (drv->num_corners < 2) { + dev_err(drv->dev, "need at least 2 OPPs to use CPR\n"); + ret = -EINVAL; + goto unlock; + } + + dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners); + + drv->corners = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(*drv->corners), + GFP_KERNEL); + if (!drv->corners) { + ret = -ENOMEM; + goto unlock; + } + + ret = cpr_corner_init(drv); + if (ret) + goto unlock; + + cpr_set_loop_allowed(drv); + + ret = cpr_init_parameters(drv); + if (ret) + goto unlock; + + /* Configure CPR HW but keep it disabled */ + ret = cpr_config(drv); + if (ret) + goto unlock; + + ret = cpr_find_initial_corner(drv); + if (ret) + goto unlock; + + if (acc_desc->config) + regmap_multi_reg_write(drv->tcsr, acc_desc->config, + acc_desc->num_regs_per_fuse); + + /* Enable ACC if required */ + if (acc_desc->enable_mask) + regmap_update_bits(drv->tcsr, acc_desc->enable_reg, + acc_desc->enable_mask, + acc_desc->enable_mask); + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_debug_info_show(struct seq_file *s, void *unused) +{ + u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps; + u32 step_dn, step_up, error, error_lt0, busy; + struct cpr_drv *drv = s->private; + struct fuse_corner *fuse_corner; + struct corner *corner; + + corner = drv->corner; + fuse_corner = corner->fuse_corner; + + seq_printf(s, "corner, current_volt = %d uV\n", + corner->last_uV); + + ro_sel = fuse_corner->ring_osc_idx; + gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel)); + seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt); + + ctl = cpr_read(drv, REG_RBCPR_CTL); + seq_printf(s, "rbcpr_ctl = %#02X\n", ctl); + + irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS); + seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status); + + reg = cpr_read(drv, REG_RBCPR_RESULT_0); + seq_printf(s, "rbcpr_result_0 = %#02X\n", reg); + + step_dn = reg & 0x01; + step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01; + seq_printf(s, " [step_dn = %u", step_dn); + + seq_printf(s, ", step_up = %u", step_up); + + error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT) + & RBCPR_RESULT0_ERROR_STEPS_MASK; + seq_printf(s, ", error_steps = %u", error_steps); + + error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK; + seq_printf(s, ", error = %u", error); + + error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01; + seq_printf(s, ", error_lt_0 = %u", error_lt0); + + busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01; + seq_printf(s, ", busy = %u]\n", busy); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cpr_debug_info); + +static void cpr_debugfs_init(struct cpr_drv *drv) +{ + drv->debugfs = debugfs_create_dir("qcom_cpr", NULL); + + debugfs_create_file("debug_info", 0444, drv->debugfs, + drv, &cpr_debug_info_fops); +} + +static int cpr_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct cpr_drv *drv; + int irq, ret; + const struct cpr_acc_desc *data; + struct device_node *np; + u32 cpr_rev = FUSE_REVISION_UNKNOWN; + + data = of_device_get_match_data(dev); + if (!data || !data->cpr_desc || !data->acc_desc) + return -EINVAL; + + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + drv->dev = dev; + drv->desc = data->cpr_desc; + drv->acc_desc = data->acc_desc; + + drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners, + sizeof(*drv->fuse_corners), + GFP_KERNEL); + if (!drv->fuse_corners) + return -ENOMEM; + + np = of_parse_phandle(dev->of_node, "acc-syscon", 0); + if (!np) + return -ENODEV; + + drv->tcsr = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(drv->tcsr)) + return PTR_ERR(drv->tcsr); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + drv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(drv->base)) + return PTR_ERR(drv->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + drv->vdd_apc = devm_regulator_get(dev, "vdd-apc"); + if (IS_ERR(drv->vdd_apc)) + return PTR_ERR(drv->vdd_apc); + + /* + * Initialize fuse corners, since it simply depends + * on data in efuses. + * Everything related to (virtual) corners has to be + * initialized after attaching to the power domain, + * since it depends on the CPU's OPP table. + */ + ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev); + if (ret) + return ret; + + drv->cpr_fuses = cpr_get_fuses(drv); + if (IS_ERR(drv->cpr_fuses)) + return PTR_ERR(drv->cpr_fuses); + + ret = cpr_populate_ring_osc_idx(drv); + if (ret) + return ret; + + ret = cpr_fuse_corner_init(drv); + if (ret) + return ret; + + mutex_init(&drv->lock); + + ret = devm_request_threaded_irq(dev, irq, NULL, + cpr_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_RISING, + "cpr", drv); + if (ret) + return ret; + + drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name, + GFP_KERNEL); + if (!drv->pd.name) + return -EINVAL; + + drv->pd.power_off = cpr_power_off; + drv->pd.power_on = cpr_power_on; + drv->pd.set_performance_state = cpr_set_performance_state; + drv->pd.opp_to_performance_state = cpr_get_performance_state; + drv->pd.attach_dev = cpr_pd_attach_dev; + + ret = pm_genpd_init(&drv->pd, NULL, true); + if (ret) + return ret; + + ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); + if (ret) + return ret; + + platform_set_drvdata(pdev, drv); + cpr_debugfs_init(drv); + + return 0; +} + +static int cpr_remove(struct platform_device *pdev) +{ + struct cpr_drv *drv = platform_get_drvdata(pdev); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_set(drv, 0); + } + + of_genpd_del_provider(pdev->dev.of_node); + pm_genpd_remove(&drv->pd); + + debugfs_remove_recursive(drv->debugfs); + + return 0; +} + +static const struct of_device_id cpr_match_table[] = { + { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, cpr_match_table); + +static struct platform_driver cpr_driver = { + .probe = cpr_probe, + .remove = cpr_remove, + .driver = { + .name = "qcom-cpr", + .of_match_table = cpr_match_table, + }, +}; +module_platform_driver(cpr_driver); + +MODULE_DESCRIPTION("Core Power Reduction (CPR) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index a67701ed93e8..73257cf107d9 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -980,6 +980,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server), INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core), + INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), @@ -989,6 +990,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core), INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), @@ -1295,6 +1297,9 @@ struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv) struct cpuinfo_x86 *c = &cpu_data(cpu); int ret; + if (!rapl_defaults) + return ERR_PTR(-ENODEV); + rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL); if (!rp) return ERR_PTR(-ENOMEM); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index b45d2b86d8ca..b0d1b8d264fa 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -121,7 +121,7 @@ config PTP_1588_CLOCK_KVM config PTP_1588_CLOCK_IDTCM tristate "IDT CLOCKMATRIX as PTP clock" - depends on PTP_1588_CLOCK + depends on PTP_1588_CLOCK && I2C default n help This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index e60eab7f8a61..b84f16bbd6f2 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -166,10 +166,11 @@ static struct posix_clock_operations ptp_clock_ops = { .read = ptp_read, }; -static void delete_ptp_clock(struct posix_clock *pc) +static void ptp_clock_release(struct device *dev) { - struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); + ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); @@ -213,7 +214,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } ptp->clock.ops = ptp_clock_ops; - ptp->clock.release = delete_ptp_clock; ptp->info = info; ptp->devid = MKDEV(major, index); ptp->index = index; @@ -236,15 +236,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, if (err) goto no_pin_groups; - /* Create a new device in our class. */ - ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid, - ptp, ptp->pin_attr_groups, - "ptp%d", ptp->index); - if (IS_ERR(ptp->dev)) { - err = PTR_ERR(ptp->dev); - goto no_device; - } - /* Register a new PPS source. */ if (info->pps) { struct pps_source_info pps; @@ -260,8 +251,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } } - /* Create a posix clock. */ - err = posix_clock_register(&ptp->clock, ptp->devid); + /* Initialize a new device of our class in our clock structure. */ + device_initialize(&ptp->dev); + ptp->dev.devt = ptp->devid; + ptp->dev.class = ptp_class; + ptp->dev.parent = parent; + ptp->dev.groups = ptp->pin_attr_groups; + ptp->dev.release = ptp_clock_release; + dev_set_drvdata(&ptp->dev, ptp); + dev_set_name(&ptp->dev, "ptp%d", ptp->index); + + /* Create a posix clock and link it to the device. */ + err = posix_clock_register(&ptp->clock, &ptp->dev); if (err) { pr_err("failed to create posix clock\n"); goto no_clock; @@ -273,8 +274,6 @@ no_clock: if (ptp->pps_source) pps_unregister_source(ptp->pps_source); no_pps: - device_destroy(ptp_class, ptp->devid); -no_device: ptp_cleanup_pin_groups(ptp); no_pin_groups: if (ptp->kworker) @@ -304,10 +303,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp) if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - device_destroy(ptp_class, ptp->devid); - ptp_cleanup_pin_groups(ptp); - posix_clock_unregister(&ptp->clock); + return 0; } EXPORT_SYMBOL(ptp_clock_unregister); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 9171d42468fd..6b97155148f1 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -28,7 +28,7 @@ struct timestamp_event_queue { struct ptp_clock { struct posix_clock clock; - struct device *dev; + struct device dev; struct ptp_clock_info *info; dev_t devid; int index; /* index into clocks.map */ diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 74eb5af7295f..97bfdd47954f 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -194,6 +194,18 @@ config REGULATOR_BD70528 This driver can also be built as a module. If so, the module will be called bd70528-regulator. +config REGULATOR_BD71828 + tristate "ROHM BD71828 Power Regulator" + depends on MFD_ROHM_BD71828 + select REGULATOR_ROHM + help + This driver supports voltage regulators on ROHM BD71828 PMIC. + This will enable support for the software controllable buck + and LDO regulators. + + This driver can also be built as a module. If so, the module + will be called bd71828-regulator. + config REGULATOR_BD718XX tristate "ROHM BD71837 Power Regulator" depends on MFD_ROHM_BD718XX @@ -600,6 +612,27 @@ config REGULATOR_MCP16502 through the regulator interface. In addition it enables suspend-to-ram/standby transition. +config REGULATOR_MP8859 + tristate "MPS MP8859 regulator driver" + depends on I2C + select REGMAP_I2C + help + Say y here to support the MP8859 voltage regulator. This driver + supports basic operations (get/set voltage) through the regulator + interface. + Say M here if you want to include support for the regulator as a + module. The module will be named "mp8859". + +config REGULATOR_MPQ7920 + tristate "Monolithic MPQ7920 PMIC" + depends on I2C && OF + select REGMAP_I2C + help + Say y here to support the MPQ7920 PMIC. This will enable supports + the software controllable 4 buck and 5 LDO regulators. + This driver supports the control of different power rails of device + through regulator interface. + config REGULATOR_MT6311 tristate "MediaTek MT6311 PMIC" depends on I2C @@ -1077,6 +1110,13 @@ config REGULATOR_VEXPRESS This driver provides support for voltage regulators available on the ARM Ltd's Versatile Express platform. +config REGULATOR_VQMMC_IPQ4019 + tristate "IPQ4019 VQMMC SD LDO regulator support" + depends on ARCH_QCOM + help + This driver provides support for the VQMMC LDO I/0 + voltage regulator of the IPQ4019 SD/EMMC controller. + config REGULATOR_WM831X tristate "Wolfson Microelectronics WM831x PMIC regulators" depends on MFD_WM831X diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 2210ba56f9bd..07bc977c52b0 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o +obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o @@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o +obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o +obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o @@ -132,6 +135,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o +obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 989506bd90b1..16f0c8570036 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -413,10 +413,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) int i; for (i = 0; i < rate_count; i++) { - if (ramp <= slew_rates[i]) - cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); - else + if (ramp > slew_rates[i]) break; + + if (id == AXP20X_DCDC2) + cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i); + else + cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i); } if (cfg == 0xff) { @@ -605,7 +608,7 @@ static const struct regulator_desc axp22x_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100, AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, - AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK), + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100, AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c index ec764022621f..5bf8a2dc5fe7 100644 --- a/drivers/regulator/bd70528-regulator.c +++ b/drivers/regulator/bd70528-regulator.c @@ -101,7 +101,6 @@ static const struct regulator_ops bd70528_ldo_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, - .set_ramp_delay = bd70528_set_ramp_delay, }; static const struct regulator_ops bd70528_led_ops = { diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c new file mode 100644 index 000000000000..b2fa17be4988 --- /dev/null +++ b/drivers/regulator/bd71828-regulator.c @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2019 ROHM Semiconductors +// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver +// + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/rohm-bd71828.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +struct reg_init { + unsigned int reg; + unsigned int mask; + unsigned int val; +}; +struct bd71828_regulator_data { + struct regulator_desc desc; + const struct rohm_dvs_config dvs; + const struct reg_init *reg_inits; + int reg_init_amnt; +}; + +static const struct reg_init buck1_inits[] = { + /* + * DVS Buck voltages can be changed by register values or via GPIO. + * Use register accesses by default. + */ + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK1_CTRL, + .val = BD71828_DVS_BUCK1_CTRL_I2C, + }, +}; + +static const struct reg_init buck2_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK2_CTRL, + .val = BD71828_DVS_BUCK2_CTRL_I2C, + }, +}; + +static const struct reg_init buck6_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK6_CTRL, + .val = BD71828_DVS_BUCK6_CTRL_I2C, + }, +}; + +static const struct reg_init buck7_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK7_CTRL, + .val = BD71828_DVS_BUCK7_CTRL_I2C, + }, +}; + +static const struct regulator_linear_range bd71828_buck1267_volts[] = { + REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250), + REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0), +}; + +static const struct regulator_linear_range bd71828_buck3_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_buck4_volts[] = { + REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000), + REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0), +}; + +static const struct regulator_linear_range bd71828_buck5_volts[] = { + REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_ldo_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0), +}; + +static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int val; + + switch (ramp_delay) { + case 1 ... 2500: + val = 0; + break; + case 2501 ... 5000: + val = 1; + break; + case 5001 ... 10000: + val = 2; + break; + case 10001 ... 20000: + val = 3; + break; + default: + val = 3; + dev_err(&rdev->dev, + "ramp_delay: %d not supported, setting 20mV/uS", + ramp_delay); + } + + /* + * On BD71828 the ramp delay level control reg is at offset +2 to + * enable reg + */ + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2, + BD71828_MASK_RAMP_DELAY, + val << (ffs(BD71828_MASK_RAMP_DELAY) - 1)); +} + +static int buck_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + struct bd71828_regulator_data *data; + + data = container_of(desc, struct bd71828_regulator_data, desc); + + return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap); +} + +static int ldo6_parse_dt(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + uint32_t uv = 0; + unsigned int en; + struct regmap *regmap = cfg->regmap; + static const char * const props[] = { "rohm,dvs-run-voltage", + "rohm,dvs-idle-voltage", + "rohm,dvs-suspend-voltage", + "rohm,dvs-lpsr-voltage" }; + unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN, + BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN }; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + ret = of_property_read_u32(np, props[i], &uv); + if (ret) { + if (ret != -EINVAL) + return ret; + continue; + } + if (uv) + en = 0xffffffff; + else + en = 0; + + ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en); + if (ret) + return ret; + } + return 0; +} + +static const struct regulator_ops bd71828_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_dvs_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd71828_set_ramp_delay, +}; + +static const struct regulator_ops bd71828_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_ldo6_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static const struct bd71828_regulator_data bd71828_rdata[] = { + { + .desc = { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK1, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK1_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK1_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + /* + * LPSR voltage is same as SUSPEND voltage. Allow + * setting it so that regulator can be set enabled at + * LPSR state + */ + .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck1_inits, + .reg_init_amnt = ARRAY_SIZE(buck1_inits), + }, + { + .desc = { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK2, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK2_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK2_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck2_inits, + .reg_init_amnt = ARRAY_SIZE(buck2_inits), + }, + { + .desc = { + .name = "buck3", + .of_match = of_match_ptr("BUCK3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK3, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck3_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts), + .n_voltages = BD71828_BUCK3_VOLTS, + .enable_reg = BD71828_REG_BUCK3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK3_VOLT, + .vsel_mask = BD71828_MASK_BUCK3_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK3_VOLT, + .idle_reg = BD71828_REG_BUCK3_VOLT, + .suspend_reg = BD71828_REG_BUCK3_VOLT, + .lpsr_reg = BD71828_REG_BUCK3_VOLT, + .run_mask = BD71828_MASK_BUCK3_VOLT, + .idle_mask = BD71828_MASK_BUCK3_VOLT, + .suspend_mask = BD71828_MASK_BUCK3_VOLT, + .lpsr_mask = BD71828_MASK_BUCK3_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck4", + .of_match = of_match_ptr("BUCK4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK4, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck4_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts), + .n_voltages = BD71828_BUCK4_VOLTS, + .enable_reg = BD71828_REG_BUCK4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK4_VOLT, + .vsel_mask = BD71828_MASK_BUCK4_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK4 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK4_VOLT, + .idle_reg = BD71828_REG_BUCK4_VOLT, + .suspend_reg = BD71828_REG_BUCK4_VOLT, + .lpsr_reg = BD71828_REG_BUCK4_VOLT, + .run_mask = BD71828_MASK_BUCK4_VOLT, + .idle_mask = BD71828_MASK_BUCK4_VOLT, + .suspend_mask = BD71828_MASK_BUCK4_VOLT, + .lpsr_mask = BD71828_MASK_BUCK4_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck5", + .of_match = of_match_ptr("BUCK5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK5, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck5_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts), + .n_voltages = BD71828_BUCK5_VOLTS, + .enable_reg = BD71828_REG_BUCK5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK5_VOLT, + .vsel_mask = BD71828_MASK_BUCK5_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK5 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK5_VOLT, + .idle_reg = BD71828_REG_BUCK5_VOLT, + .suspend_reg = BD71828_REG_BUCK5_VOLT, + .lpsr_reg = BD71828_REG_BUCK5_VOLT, + .run_mask = BD71828_MASK_BUCK5_VOLT, + .idle_mask = BD71828_MASK_BUCK5_VOLT, + .suspend_mask = BD71828_MASK_BUCK5_VOLT, + .lpsr_mask = BD71828_MASK_BUCK5_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck6", + .of_match = of_match_ptr("BUCK6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK6, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK6_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK6_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck6_inits, + .reg_init_amnt = ARRAY_SIZE(buck6_inits), + }, + { + .desc = { + .name = "buck7", + .of_match = of_match_ptr("BUCK7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK7, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK7_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK7_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck7_inits, + .reg_init_amnt = ARRAY_SIZE(buck7_inits), + }, + { + .desc = { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO1, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO1_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO1_VOLT, + .idle_reg = BD71828_REG_LDO1_VOLT, + .suspend_reg = BD71828_REG_LDO1_VOLT, + .lpsr_reg = BD71828_REG_LDO1_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO2, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO2_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO2 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO2_VOLT, + .idle_reg = BD71828_REG_LDO2_VOLT, + .suspend_reg = BD71828_REG_LDO2_VOLT, + .lpsr_reg = BD71828_REG_LDO2_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO3, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO3_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO3_VOLT, + .idle_reg = BD71828_REG_LDO3_VOLT, + .suspend_reg = BD71828_REG_LDO3_VOLT, + .lpsr_reg = BD71828_REG_LDO3_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo4", + .of_match = of_match_ptr("LDO4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO4, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO4_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO4_VOLT, + .idle_reg = BD71828_REG_LDO4_VOLT, + .suspend_reg = BD71828_REG_LDO4_VOLT, + .lpsr_reg = BD71828_REG_LDO4_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo5", + .of_match = of_match_ptr("LDO5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO5, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO5_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .of_parse_cb = buck_set_hw_dvs_levels, + .owner = THIS_MODULE, + }, + /* + * LDO5 is special. It can choose vsel settings to be configured + * from 2 different registers (by GPIO). + * + * This driver supports only configuration where + * BD71828_REG_LDO5_VOLT_L is used. + */ + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO5_VOLT, + .idle_reg = BD71828_REG_LDO5_VOLT, + .suspend_reg = BD71828_REG_LDO5_VOLT, + .lpsr_reg = BD71828_REG_LDO5_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo6", + .of_match = of_match_ptr("LDO6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO6, + .ops = &bd71828_ldo6_ops, + .type = REGULATOR_VOLTAGE, + .fixed_uV = BD71828_LDO_6_VOLTAGE, + .n_voltages = 1, + .enable_reg = BD71828_REG_LDO6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .owner = THIS_MODULE, + /* + * LDO6 only supports enable/disable for all states. + * Voltage for LDO6 is fixed. + */ + .of_parse_cb = ldo6_parse_dt, + }, + }, { + .desc = { + /* SNVS LDO in data-sheet */ + .name = "ldo7", + .of_match = of_match_ptr("LDO7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO_SNVS, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO7_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO7 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO7_VOLT, + .idle_reg = BD71828_REG_LDO7_VOLT, + .suspend_reg = BD71828_REG_LDO7_VOLT, + .lpsr_reg = BD71828_REG_LDO7_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, +}; + +static int bd71828_probe(struct platform_device *pdev) +{ + struct rohm_regmap_dev *bd71828; + int i, j, ret; + struct regulator_config config = { + .dev = pdev->dev.parent, + }; + + bd71828 = dev_get_drvdata(pdev->dev.parent); + if (!bd71828) { + dev_err(&pdev->dev, "No MFD driver data\n"); + return -EINVAL; + } + + config.regmap = bd71828->regmap; + + for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) { + struct regulator_dev *rdev; + const struct bd71828_regulator_data *rd; + + rd = &bd71828_rdata[i]; + rdev = devm_regulator_register(&pdev->dev, + &rd->desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "failed to register %s regulator\n", + rd->desc.name); + return PTR_ERR(rdev); + } + for (j = 0; j < rd->reg_init_amnt; j++) { + ret = regmap_update_bits(bd71828->regmap, + rd->reg_inits[j].reg, + rd->reg_inits[j].mask, + rd->reg_inits[j].val); + if (ret) { + dev_err(&pdev->dev, + "regulator %s init failed\n", + rd->desc.name); + return ret; + } + } + } + return 0; +} + +static struct platform_driver bd71828_regulator = { + .driver = { + .name = "bd71828-pmic" + }, + .probe = bd71828_probe, +}; + +module_platform_driver(bd71828_regulator); + +MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); +MODULE_DESCRIPTION("BD71828 voltage regulator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd71828-pmic"); diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 13a43eee2e46..8f9b2d8eaf10 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -1142,28 +1142,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { }, }; -struct bd718xx_pmic_inits { - const struct bd718xx_regulator_data *r_datas; - unsigned int r_amount; -}; - static int bd718xx_probe(struct platform_device *pdev) { struct bd718xx *mfd; struct regulator_config config = { 0 }; - struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = { - [ROHM_CHIP_TYPE_BD71837] = { - .r_datas = bd71837_regulators, - .r_amount = ARRAY_SIZE(bd71837_regulators), - }, - [ROHM_CHIP_TYPE_BD71847] = { - .r_datas = bd71847_regulators, - .r_amount = ARRAY_SIZE(bd71847_regulators), - }, - }; - int i, j, err; bool use_snvs; + const struct bd718xx_regulator_data *reg_data; + unsigned int num_reg_data; mfd = dev_get_drvdata(pdev->dev.parent); if (!mfd) { @@ -1172,8 +1158,16 @@ static int bd718xx_probe(struct platform_device *pdev) goto err; } - if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT || - !pmic_regulators[mfd->chip.chip_type].r_datas) { + switch (mfd->chip.chip_type) { + case ROHM_CHIP_TYPE_BD71837: + reg_data = bd71837_regulators; + num_reg_data = ARRAY_SIZE(bd71837_regulators); + break; + case ROHM_CHIP_TYPE_BD71847: + reg_data = bd71847_regulators; + num_reg_data = ARRAY_SIZE(bd71847_regulators); + break; + default: dev_err(&pdev->dev, "Unsupported chip type\n"); err = -EINVAL; goto err; @@ -1215,13 +1209,13 @@ static int bd718xx_probe(struct platform_device *pdev) } } - for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) { + for (i = 0; i < num_reg_data; i++) { const struct regulator_desc *desc; struct regulator_dev *rdev; const struct bd718xx_regulator_data *r; - r = &pmic_regulators[mfd->chip.chip_type].r_datas[i]; + r = ®_data[i]; desc = &r->desc; config.dev = pdev->dev.parent; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 679ad3d2ed23..d015d99cb59d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1198,6 +1198,10 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, return -EINVAL; } + /* no need to loop voltages if range is continuous */ + if (rdev->desc->continuous_voltage_range) + return 0; + /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ for (i = 0; i < count; i++) { int value; @@ -1938,8 +1942,8 @@ struct regulator *_regulator_get(struct device *dev, const char *id, regulator = create_regulator(rdev, dev, id); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); - put_device(&rdev->dev); module_put(rdev->owner); + put_device(&rdev->dev); return regulator; } @@ -2063,13 +2067,13 @@ static void _regulator_put(struct regulator *regulator) rdev->open_count--; rdev->exclusive = 0; - put_device(&rdev->dev); regulator_unlock(rdev); kfree_const(regulator->supply_name); kfree(regulator); module_put(rdev->owner); + put_device(&rdev->dev); } /** @@ -3466,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, out: return ret; } +EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev); static int regulator_limit_voltage_step(struct regulator_dev *rdev, int *current_uV, int *min_uV) @@ -4030,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev) return ret; return ret - rdev->constraints->uV_offset; } +EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev); /** * regulator_get_voltage - get regulator output voltage @@ -5002,6 +5008,7 @@ regulator_register(const struct regulator_desc *regulator_desc, struct regulator_dev *rdev; bool dangling_cfg_gpiod = false; bool dangling_of_gpiod = false; + bool reg_device_fail = false; struct device *dev; int ret, i; @@ -5187,7 +5194,7 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); ret = device_register(&rdev->dev); if (ret != 0) { - put_device(&rdev->dev); + reg_device_fail = true; goto unset_supplies; } @@ -5218,7 +5225,10 @@ wash: clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - kfree(rdev); + if (reg_device_fail) + put_device(&rdev->dev); + else + kfree(rdev); kfree(config); rinse: if (dangling_cfg_gpiod) diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index f9448ed50e05..0cdeb6186529 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, da9210_dt_ids); -static int da9210_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9210_i2c_probe(struct i2c_client *i2c) { struct da9210 *chip; struct device *dev = &i2c->dev; @@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = { .name = "da9210", .of_match_table = of_match_ptr(da9210_dt_ids), }, - .probe = da9210_i2c_probe, + .probe_new = da9210_i2c_probe, .id_table = da9210_i2c_id, }; diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index 523dc1b95826..2ea4362ffa5c 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip) /* * I2C driver interface functions */ -static int da9211_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9211_i2c_probe(struct i2c_client *i2c) { struct da9211 *chip; int error, ret; @@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = { .name = "da9211", .of_match_table = of_match_ptr(da9211_dt_ids), }, - .probe = da9211_i2c_probe, + .probe_new = da9211_i2c_probe, .id_table = da9211_i2c_id, }; diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index ca3dc3f3bb29..bb16c465426e 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -13,6 +13,8 @@ #include <linux/regulator/driver.h> #include <linux/module.h> +#include "internal.h" + /** * regulator_is_enabled_regmap - standard is_enabled() for regmap users * @@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, consumers[i].supply = supply_names[i]; } EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names); + +/** + * regulator_is_equal - test whether two regulators are the same + * + * @reg1: first regulator to operate on + * @reg2: second regulator to operate on + */ +bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2) +{ + return reg1->rdev == reg2->rdev; +} +EXPORT_SYMBOL_GPL(regulator_is_equal); diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c index 978f5e903cae..cfb765986d0d 100644 --- a/drivers/regulator/isl9305.c +++ b/drivers/regulator/isl9305.c @@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = { .cache_type = REGCACHE_RBTREE, }; -static int isl9305_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int isl9305_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct isl9305_pdata *pdata = i2c->dev.platform_data; @@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = { .name = "isl9305", .of_match_table = of_match_ptr(isl9305_dt_ids), }, - .probe = isl9305_i2c_probe, + .probe_new = isl9305_i2c_probe, .id_table = isl9305_i2c_id, }; diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index bc96e65ef7c0..8be252f81b09 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971, return 0; } -static int lp3971_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int lp3971_i2c_probe(struct i2c_client *i2c) { struct lp3971 *lp3971; struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev); @@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = { .driver = { .name = "LP3971", }, - .probe = lp3971_i2c_probe, + .probe_new = lp3971_i2c_probe, .id_table = lp3971_i2c_id, }; diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c index d934540eb8c4..e12e52c69e52 100644 --- a/drivers/regulator/ltc3676.c +++ b/drivers/regulator/ltc3676.c @@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int ltc3676_regulator_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc3676_regulator_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct regulator_init_data *init_data = dev_get_platdata(dev); @@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(ltc3676_of_match), }, - .probe = ltc3676_regulator_probe, + .probe_new = ltc3676_regulator_probe, .id_table = ltc3676_i2c_id, }; module_i2c_driver(ltc3676_driver); diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c index e57fc9197d62..ac89a412f665 100644 --- a/drivers/regulator/max77650-regulator.c +++ b/drivers/regulator/max77650-regulator.c @@ -386,9 +386,16 @@ static int max77650_regulator_probe(struct platform_device *pdev) return 0; } +static const struct of_device_id max77650_regulator_of_match[] = { + { .compatible = "maxim,max77650-regulator" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_regulator_of_match); + static struct platform_driver max77650_regulator_driver = { .driver = { .name = "max77650-regulator", + .of_match_table = max77650_regulator_of_match, }, .probe = max77650_regulator_probe, }; diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c new file mode 100644 index 000000000000..1d26b506ee5b --- /dev/null +++ b/drivers/regulator/mp8859.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2019 five technologies GmbH +// Author: Markus Reichl <m.reichl@fivetechno.de> + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/of.h> +#include <linux/regulator/driver.h> +#include <linux/regmap.h> + + +#define VOL_MIN_IDX 0x00 +#define VOL_MAX_IDX 0x7ff + +/* Register definitions */ +#define MP8859_VOUT_L_REG 0 //3 lo Bits +#define MP8859_VOUT_H_REG 1 //8 hi Bits +#define MP8859_VOUT_GO_REG 2 +#define MP8859_IOUT_LIM_REG 3 +#define MP8859_CTL1_REG 4 +#define MP8859_CTL2_REG 5 +#define MP8859_RESERVED1_REG 6 +#define MP8859_RESERVED2_REG 7 +#define MP8859_RESERVED3_REG 8 +#define MP8859_STATUS_REG 9 +#define MP8859_INTERRUPT_REG 0x0A +#define MP8859_MASK_REG 0x0B +#define MP8859_ID1_REG 0x0C +#define MP8859_MFR_ID_REG 0x27 +#define MP8859_DEV_ID_REG 0x28 +#define MP8859_IC_REV_REG 0x29 + +#define MP8859_MAX_REG 0x29 + +#define MP8859_GO_BIT 0x01 + + +static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel) +{ + int ret; + + ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7); + + if (ret) + return ret; + ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3); + + if (ret) + return ret; + ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG, + MP8859_GO_BIT, 1); + return ret; +} + +static int mp8859_get_voltage_sel(struct regulator_dev *rdev) +{ + unsigned int val_tmp; + unsigned int val; + int ret; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp); + + if (ret) + return ret; + val = val_tmp << 3; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp); + + if (ret) + return ret; + val |= val_tmp & 0x07; + return val; +} + +static const struct regulator_linear_range mp8859_dcdc_ranges[] = { + REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000), +}; + +static const struct regmap_config mp8859_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MP8859_MAX_REG, + .cache_type = REGCACHE_RBTREE, +}; + +static const struct regulator_ops mp8859_ops = { + .set_voltage_sel = mp8859_set_voltage_sel, + .get_voltage_sel = mp8859_get_voltage_sel, + .list_voltage = regulator_list_voltage_linear_range, +}; + +static const struct regulator_desc mp8859_regulators[] = { + { + .id = 0, + .type = REGULATOR_VOLTAGE, + .name = "mp8859_dcdc", + .of_match = of_match_ptr("mp8859_dcdc"), + .n_voltages = VOL_MAX_IDX + 1, + .linear_ranges = mp8859_dcdc_ranges, + .n_linear_ranges = 1, + .ops = &mp8859_ops, + .owner = THIS_MODULE, + }, +}; + +static int mp8859_i2c_probe(struct i2c_client *i2c) +{ + int ret; + struct regulator_config config = {.dev = &i2c->dev}; + struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap); + struct regulator_dev *rdev; + + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(&i2c->dev, "regmap init failed: %d\n", ret); + return ret; + } + rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0], + &config); + + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(&i2c->dev, "failed to register %s: %d\n", + mp8859_regulators[0].name, ret); + return ret; + } + return 0; +} + +static const struct of_device_id mp8859_dt_id[] = { + {.compatible = "mps,mp8859"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mp8859_dt_id); + +static const struct i2c_device_id mp8859_i2c_id[] = { + { "mp8859", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id); + +static struct i2c_driver mp8859_regulator_driver = { + .driver = { + .name = "mp8859", + .of_match_table = of_match_ptr(mp8859_dt_id), + }, + .probe_new = mp8859_i2c_probe, + .id_table = mp8859_i2c_id, +}; + +module_i2c_driver(mp8859_regulator_driver); + +MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver"); +MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c new file mode 100644 index 000000000000..54c862edf571 --- /dev/null +++ b/drivers/regulator/mpq7920.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// mpq7920.c - regulator driver for mps mpq7920 +// +// Copyright 2019 Monolithic Power Systems, Inc +// +// Author: Saravanan Sekar <sravanhome@gmail.com> + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include "mpq7920.h" + +#define MPQ7920_BUCK_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) +#define MPQ7920_LDO_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) + +#define MPQ7920BUCK(_name, _id, _ilim) \ + [MPQ7920_BUCK ## _id] = { \ + .id = MPQ7920_BUCK ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .of_parse_cb = mpq7920_parse_cb, \ + .ops = &mpq7920_buck_ops, \ + .min_uV = MPQ7920_BUCK_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \ + .curr_table = _ilim, \ + .n_current_limits = ARRAY_SIZE(_ilim), \ + .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .csel_mask = MPQ7920_MASK_BUCK_ILIM, \ + .enable_reg = MPQ7920_REG_REGULATOR_EN, \ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_BUCK ## _id), \ + .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .soft_start_mask = MPQ7920_MASK_SOFTSTART, \ + .owner = THIS_MODULE, \ + } + +#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \ + [MPQ7920_LDO ## _id] = { \ + .id = MPQ7920_LDO ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .ops = _ops, \ + .min_uV = MPQ7920_LDO_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_LDO_VOLT_RANGE, \ + .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .curr_table = _ilim, \ + .n_current_limits = _ilim_sz, \ + .csel_reg = _creg, \ + .csel_mask = _cmask, \ + .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_LDO ##_id + 1), \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + } + +enum mpq7920_regulators { + MPQ7920_BUCK1, + MPQ7920_BUCK2, + MPQ7920_BUCK3, + MPQ7920_BUCK4, + MPQ7920_LDO1, /* LDORTC */ + MPQ7920_LDO2, + MPQ7920_LDO3, + MPQ7920_LDO4, + MPQ7920_LDO5, + MPQ7920_MAX_REGULATORS, +}; + +struct mpq7920_regulator_info { + struct regmap *regmap; + struct regulator_desc *rdesc; +}; + +static const struct regmap_config mpq7920_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x25, +}; + +/* Current limits array (in uA) + * ILIM1 & ILIM3 + */ +static const unsigned int mpq7920_I_limits1[] = { + 4600000, 6600000, 7600000, 9300000 +}; + +/* ILIM2 & ILIM4 */ +static const unsigned int mpq7920_I_limits2[] = { + 2700000, 3900000, 5100000, 6100000 +}; + +/* LDO4 & LDO5 */ +static const unsigned int mpq7920_I_limits3[] = { + 300000, 700000 +}; + +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay); +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *rdesc, + struct regulator_config *config); + +/* RTCLDO not controllable, always ON */ +static const struct regulator_ops mpq7920_ldortc_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_wo_current_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, +}; + +static const struct regulator_ops mpq7920_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_soft_start = regulator_set_soft_start_regmap, + .set_ramp_delay = mpq7920_set_ramp_delay, +}; + +static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = { + MPQ7920BUCK("buck1", 1, mpq7920_I_limits1), + MPQ7920BUCK("buck2", 2, mpq7920_I_limits2), + MPQ7920BUCK("buck3", 3, mpq7920_I_limits1), + MPQ7920BUCK("buck4", 4, mpq7920_I_limits2), + MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B, + MPQ7920_MASK_LDO_ILIM), + MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B, + MPQ7920_MASK_LDO_ILIM), +}; + +/* + * DVS ramp rate BUCK1 to BUCK4 + * 00-01: Reserved + * 10: 8mV/us + * 11: 4mV/us + */ +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int ramp_val; + + if (ramp_delay > 8000 || ramp_delay < 0) + return -EINVAL; + + if (ramp_delay <= 4000) + ramp_val = 3; + else + ramp_val = 2; + + return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6); +} + +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *config) +{ + uint8_t val; + int ret; + struct mpq7920_regulator_info *info = config->driver_data; + struct regulator_desc *rdesc = &info->rdesc[desc->id]; + + if (of_property_read_bool(np, "mps,buck-ovp-disable")) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_B + (rdesc->id * 4), + MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE); + } + + ret = of_property_read_u8(np, "mps,buck-phase-delay", &val); + if (!ret) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_C + (rdesc->id * 4), + MPQ7920_MASK_BUCK_PHASE_DEALY, + (val & 3) << 4); + } + + ret = of_property_read_u8(np, "mps,buck-softstart", &val); + if (!ret) + rdesc->soft_start_val_on = (val & 3) << 2; + + return 0; +} + +static void mpq7920_parse_dt(struct device *dev, + struct mpq7920_regulator_info *info) +{ + int ret; + struct device_node *np = dev->of_node; + uint8_t freq; + + np = of_get_child_by_name(np, "regulators"); + if (!np) { + dev_err(dev, "missing 'regulators' subnode in DT\n"); + return; + } + + ret = of_property_read_u8(np, "mps,switch-freq", &freq); + if (!ret) { + regmap_update_bits(info->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_SWITCH_FREQ, + (freq & 3) << 4); + } + + of_node_put(np); +} + +static int mpq7920_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct mpq7920_regulator_info *info; + struct regulator_config config = { NULL, }; + struct regulator_dev *rdev; + struct regmap *regmap; + int i; + + info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->rdesc = mpq7920_regulators_desc; + regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to allocate regmap!\n"); + return PTR_ERR(regmap); + } + + i2c_set_clientdata(client, info); + info->regmap = regmap; + if (client->dev.of_node) + mpq7920_parse_dt(&client->dev, info); + + config.dev = dev; + config.regmap = regmap; + config.driver_data = info; + + for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) { + rdev = devm_regulator_register(dev, + &mpq7920_regulators_desc[i], + &config); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator!\n"); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct of_device_id mpq7920_of_match[] = { + { .compatible = "mps,mpq7920"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mpq7920_of_match); + +static const struct i2c_device_id mpq7920_id[] = { + { "mpq7920", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mpq7920_id); + +static struct i2c_driver mpq7920_regulator_driver = { + .driver = { + .name = "mpq7920", + .of_match_table = of_match_ptr(mpq7920_of_match), + }, + .probe_new = mpq7920_i2c_probe, + .id_table = mpq7920_id, +}; +module_i2c_driver(mpq7920_regulator_driver); + +MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>"); +MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h new file mode 100644 index 000000000000..489924655a96 --- /dev/null +++ b/drivers/regulator/mpq7920.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * mpq7920.h - Regulator definitions for mpq7920 + * + * Copyright 2019 Monolithic Power Systems, Inc + * + */ + +#ifndef __MPQ7920_H__ +#define __MPQ7920_H__ + +#define MPQ7920_REG_CTL0 0x00 +#define MPQ7920_REG_CTL1 0x01 +#define MPQ7920_REG_CTL2 0x02 +#define MPQ7920_BUCK1_REG_A 0x03 +#define MPQ7920_BUCK1_REG_B 0x04 +#define MPQ7920_BUCK1_REG_C 0x05 +#define MPQ7920_BUCK1_REG_D 0x06 +#define MPQ7920_BUCK2_REG_A 0x07 +#define MPQ7920_BUCK2_REG_B 0x08 +#define MPQ7920_BUCK2_REG_C 0x09 +#define MPQ7920_BUCK2_REG_D 0x0a +#define MPQ7920_BUCK3_REG_A 0x0b +#define MPQ7920_BUCK3_REG_B 0x0c +#define MPQ7920_BUCK3_REG_C 0x0d +#define MPQ7920_BUCK3_REG_D 0x0e +#define MPQ7920_BUCK4_REG_A 0x0f +#define MPQ7920_BUCK4_REG_B 0x10 +#define MPQ7920_BUCK4_REG_C 0x11 +#define MPQ7920_BUCK4_REG_D 0x12 +#define MPQ7920_LDO1_REG_A 0x13 +#define MPQ7920_LDO1_REG_B 0x0 +#define MPQ7920_LDO2_REG_A 0x14 +#define MPQ7920_LDO2_REG_B 0x15 +#define MPQ7920_LDO2_REG_C 0x16 +#define MPQ7920_LDO3_REG_A 0x17 +#define MPQ7920_LDO3_REG_B 0x18 +#define MPQ7920_LDO3_REG_C 0x19 +#define MPQ7920_LDO4_REG_A 0x1a +#define MPQ7920_LDO4_REG_B 0x1b +#define MPQ7920_LDO4_REG_C 0x1c +#define MPQ7920_LDO5_REG_A 0x1d +#define MPQ7920_LDO5_REG_B 0x1e +#define MPQ7920_LDO5_REG_C 0x1f +#define MPQ7920_REG_MODE 0x20 +#define MPQ7920_REG_REGULATOR_EN 0x22 + +#define MPQ7920_MASK_VREF 0x7f +#define MPQ7920_MASK_BUCK_ILIM 0xc0 +#define MPQ7920_MASK_LDO_ILIM BIT(6) +#define MPQ7920_MASK_DISCHARGE BIT(5) +#define MPQ7920_MASK_MODE 0xc0 +#define MPQ7920_MASK_SOFTSTART 0x0c +#define MPQ7920_MASK_SWITCH_FREQ 0x30 +#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30 +#define MPQ7920_MASK_DVS_SLEWRATE 0xc0 +#define MPQ7920_MASK_OVP 0x40 +#define MPQ7920_OVP_DISABLE ~(0x40) +#define MPQ7920_DISCHARGE_ON BIT(5) + +#define MPQ7920_REGULATOR_EN_OFFSET 7 + +/* values in mV */ +#define MPQ7920_BUCK_VOLT_MIN 400000 +#define MPQ7920_LDO_VOLT_MIN 650000 +#define MPQ7920_VOLT_MAX 3587500 +#define MPQ7920_VOLT_STEP 12500 + +#endif /* __MPQ7920_H__ */ diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c index af95449d3590..69e6af3cd505 100644 --- a/drivers/regulator/mt6311-regulator.c +++ b/drivers/regulator/mt6311-regulator.c @@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = { /* * I2C driver interface functions */ -static int mt6311_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int mt6311_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct regulator_dev *rdev; @@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = { .name = "mt6311", .of_match_table = of_match_ptr(mt6311_dt_ids), }, - .probe = mt6311_i2c_probe, + .probe_new = mt6311_i2c_probe, .id_table = mt6311_i2c_id, }; diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c index 3d3415839ba2..787ced918372 100644 --- a/drivers/regulator/pv88060-regulator.c +++ b/drivers/regulator/pv88060-regulator.c @@ -279,8 +279,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88060_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88060_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88060 *chip; @@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = { .name = "pv88060", .of_match_table = of_match_ptr(pv88060_dt_ids), }, - .probe = pv88060_i2c_probe, + .probe_new = pv88060_i2c_probe, .id_table = pv88060_i2c_id, }; diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c index b1d0d97ae935..784729ec2182 100644 --- a/drivers/regulator/pv88090-regulator.c +++ b/drivers/regulator/pv88090-regulator.c @@ -272,8 +272,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88090_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88090_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88090 *chip; @@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = { .name = "pv88090", .of_match_table = of_match_ptr(pv88090_dt_ids), }, - .probe = pv88090_i2c_probe, + .probe_new = pv88090_i2c_probe, .id_table = pv88090_i2c_id, }; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 5b4003226484..31f79fda3238 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev, } if (!pdata->dvs_gpio[i]) { - dev_warn(dev, "there is no dvs%d gpio\n", i); + dev_info(dev, "there is no dvs%d gpio\n", i); continue; } diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c index 4a91be0ad5ae..5c12d57be040 100644 --- a/drivers/regulator/rn5t618-regulator.c +++ b/drivers/regulator/rn5t618-regulator.c @@ -148,6 +148,7 @@ static struct platform_driver rn5t618_regulator_driver = { module_platform_driver(rn5t618_regulator_driver); +MODULE_ALIAS("platform:rn5t618-regulator"); MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); MODULE_DESCRIPTION("RN5T618 regulator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 51f7e8b74d8c..115f59530852 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 4f2dc5ebffdc..23d288278957 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index bdc07739e9a2..4abd3ed31f60 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -588,7 +588,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev, if (of_property_read_u32(reg_np, "op_mode", &rmode->mode)) { dev_warn(iodev->dev, - "no op_mode property property at %pOF\n", + "no op_mode property at %pOF\n", reg_np); rmode->mode = S5M8767_OPMODE_NORMAL_MODE; @@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index bf1a3508ebc4..44e4cecbf6de 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip) dev_dbg(chip->dev, "Fault log: FLT_POR\n"); } -static int slg51000_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int slg51000_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct slg51000 *chip; @@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = { .driver = { .name = "slg51000-regulator", }, - .probe = slg51000_i2c_probe, + .probe_new = slg51000_i2c_probe, .id_table = slg51000_i2c_id, }; diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c index 42e03b2c10a0..2222e739e62b 100644 --- a/drivers/regulator/sy8106a-regulator.c +++ b/drivers/regulator/sy8106a-regulator.c @@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = { /* * I2C driver interface functions */ -static int sy8106a_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int sy8106a_i2c_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; struct regulator_dev *rdev; @@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = { .name = "sy8106a", .of_match_table = of_match_ptr(sy8106a_i2c_of_match), }, - .probe = sy8106a_i2c_probe, + .probe_new = sy8106a_i2c_probe, .id_table = sy8106a_i2c_id, }; diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c index 92adb4f3ee19..62d243f3b904 100644 --- a/drivers/regulator/sy8824x.c +++ b/drivers/regulator/sy8824x.c @@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = { .val_bits = 8, }; -static int sy8824_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sy8824_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device_node *np = dev->of_node; @@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = { .name = "sy8824-regulator", .of_match_table = of_match_ptr(sy8824_dt_ids), }, - .probe = sy8824_i2c_probe, + .probe_new = sy8824_i2c_probe, .id_table = sy8824_id, }; module_i2c_driver(sy8824_regulator_driver); diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index 89b9314d64c9..af9abcd9c166 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -748,7 +748,7 @@ static int ti_abb_probe(struct platform_device *pdev) * We may have shared interrupt register offsets which are * write-1-to-clear between domains ensuring exclusivity. */ - abb->int_base = devm_ioremap_nocache(dev, res->start, + abb->int_base = devm_ioremap(dev, res->start, resource_size(res)); if (!abb->int_base) { dev_err(dev, "Unable to map '%s'\n", pname); @@ -768,7 +768,7 @@ static int ti_abb_probe(struct platform_device *pdev) * We may have shared efuse register offsets which are read-only * between domains */ - abb->efuse_base = devm_ioremap_nocache(dev, res->start, + abb->efuse_base = devm_ioremap(dev, res->start, resource_size(res)); if (!abb->efuse_base) { dev_err(dev, "Unable to map '%s'\n", pname); diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c index 7b0e38f8d627..0edc83089ba2 100644 --- a/drivers/regulator/tps65132-regulator.c +++ b/drivers/regulator/tps65132-regulator.c @@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = { .wr_table = &tps65132_no_reg_table, }; -static int tps65132_probe(struct i2c_client *client, - const struct i2c_device_id *client_id) +static int tps65132_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct tps65132_regulator *tps; @@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = { .driver = { .name = "tps65132", }, - .probe = tps65132_probe, + .probe_new = tps65132_probe, .id_table = tps65132_id, }; diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c index 9a9ee8188109..cbadb1c99679 100644 --- a/drivers/regulator/vctrl-regulator.c +++ b/drivers/regulator/vctrl-regulator.c @@ -11,10 +11,13 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/regulator/coupler.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> #include <linux/sort.h> +#include "internal.h" + struct vctrl_voltage_range { int min_uV; int max_uV; @@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV) static int vctrl_get_voltage(struct regulator_dev *rdev) { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); - int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); return vctrl_calc_output_voltage(vctrl, ctrl_uV); } @@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); struct regulator *ctrl_reg = vctrl->ctrl_reg; - int orig_ctrl_uV = regulator_get_voltage(ctrl_reg); + int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev); int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV); int ret; if (req_min_uV >= uV || !vctrl->ovp_threshold) /* voltage rising or no OVP */ - return regulator_set_voltage( - ctrl_reg, + return regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl_calc_ctrl_voltage(vctrl, req_min_uV), - vctrl_calc_ctrl_voltage(vctrl, req_max_uV)); + vctrl_calc_ctrl_voltage(vctrl, req_max_uV), + PM_SUSPEND_ON); while (uV > req_min_uV) { int max_drop_uV = (uV * vctrl->ovp_threshold) / 100; @@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, next_uV = max_t(int, req_min_uV, uV - max_drop_uV); next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV); - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + next_ctrl_uV, next_ctrl_uV, - next_ctrl_uV); + PM_SUSPEND_ON); if (ret) goto err; @@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, err: /* Try to go back to original voltage */ - regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV); + regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV, + PM_SUSPEND_ON); return ret; } @@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, if (selector >= vctrl->sel || !vctrl->ovp_threshold) { /* voltage rising or no OVP */ - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[selector].ctrl, vctrl->vtable[selector].ctrl, - vctrl->vtable[selector].ctrl); + PM_SUSPEND_ON); if (!ret) vctrl->sel = selector; @@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, else next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel; - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl->vtable[next_sel].ctrl, - vctrl->vtable[next_sel].ctrl); + vctrl->vtable[next_sel].ctrl, + PM_SUSPEND_ON); if (ret) { dev_err(&rdev->dev, "failed to set control voltage to %duV\n", @@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, err: if (vctrl->sel != orig_sel) { /* Try to go back to original voltage */ - if (!regulator_set_voltage(ctrl_reg, + if (!regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[orig_sel].ctrl, vctrl->vtable[orig_sel].ctrl, - vctrl->vtable[orig_sel].ctrl)) + PM_SUSPEND_ON)) vctrl->sel = orig_sel; else dev_warn(&rdev->dev, @@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev) if (ret) return ret; - ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); if (ctrl_uV < 0) { dev_err(&pdev->dev, "failed to get control voltage\n"); return ctrl_uV; diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c new file mode 100644 index 000000000000..6d5ae25d08d1 --- /dev/null +++ b/drivers/regulator/vqmmc-ipq4019-regulator.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com> +// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr> +// +// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +static const unsigned int ipq4019_vmmc_voltages[] = { + 1500000, 1800000, 2500000, 3000000, +}; + +static const struct regulator_ops ipq4019_regulator_voltage_ops = { + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_desc vmmc_regulator = { + .name = "vmmcq", + .ops = &ipq4019_regulator_voltage_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .volt_table = ipq4019_vmmc_voltages, + .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages), + .vsel_reg = 0, + .vsel_mask = 0x3, +}; + +static const struct regmap_config ipq4019_vmmcq_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, +}; + +static int ipq4019_regulator_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regulator_init_data *init_data; + struct regulator_config cfg = {}; + struct regulator_dev *rdev; + struct resource *res; + struct regmap *rmap; + void __iomem *base; + + init_data = of_get_regulator_init_data(dev, dev->of_node, + &vmmc_regulator); + if (!init_data) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config); + if (IS_ERR(rmap)) + return PTR_ERR(rmap); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.of_node = dev->of_node; + cfg.regmap = rmap; + + rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator: %ld\n", + PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + platform_set_drvdata(pdev, rdev); + + return 0; +} + +static const struct of_device_id regulator_ipq4019_of_match[] = { + { .compatible = "qcom,vqmmc-ipq4019-regulator", }, + {}, +}; + +static struct platform_driver ipq4019_regulator_driver = { + .probe = ipq4019_regulator_probe, + .driver = { + .name = "vqmmc-ipq4019-regulator", + .of_match_table = of_match_ptr(regulator_ipq4019_of_match), + }, +}; +module_platform_driver(ipq4019_regulator_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>"); +MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator"); diff --git a/drivers/reset/core.c b/drivers/reset/core.c index ca1d49146f61..7597c70e04d5 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -787,7 +787,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev, return ERR_PTR(-ENOMEM); rstc = __reset_control_get(dev, id, index, shared, optional, acquired); - if (!IS_ERR(rstc)) { + if (!IS_ERR_OR_NULL(rstc)) { *ptr = rstc; devres_add(dev, ptr); } else { @@ -861,8 +861,7 @@ static int of_reset_control_get_count(struct device_node *node) * @acquired: only one reset control may be acquired for a given controller * and ID * - * Returns pointer to allocated reset_control_array on success or - * error on failure + * Returns pointer to allocated reset_control on success or error on failure */ struct reset_control * of_reset_control_array_get(struct device_node *np, bool shared, bool optional, @@ -915,8 +914,7 @@ EXPORT_SYMBOL_GPL(of_reset_control_array_get); * that just have to be asserted or deasserted, without any * requirements on the order. * - * Returns pointer to allocated reset_control_array on success or - * error on failure + * Returns pointer to allocated reset_control on success or error on failure */ struct reset_control * devm_reset_control_array_get(struct device *dev, bool shared, bool optional) @@ -930,7 +928,7 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional) return ERR_PTR(-ENOMEM); rstc = of_reset_control_array_get(dev->of_node, shared, optional, true); - if (IS_ERR(rstc)) { + if (IS_ERR_OR_NULL(rstc)) { devres_free(devres); return rstc; } diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c index a608f445dad6..f213264c8567 100644 --- a/drivers/reset/reset-brcmstb.c +++ b/drivers/reset/reset-brcmstb.c @@ -91,12 +91,6 @@ static int brcmstb_reset_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) || - !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) { - dev_err(kdev, "incorrect register range\n"); - return -EINVAL; - } - priv->base = devm_ioremap_resource(kdev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index df2829dd55ad..2ecd8752b088 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -172,20 +172,7 @@ int mc146818_set_time(struct rtc_time *time) save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - -#ifdef CONFIG_X86 - if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 == 0x17) || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)), - RTC_FREQ_SELECT); - save_freq_select &= ~RTC_DIV_RESET2; - } else - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), - RTC_FREQ_SELECT); -#else - CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT); -#endif + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 5249fc99fd5f..9135e2101752 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -47,7 +47,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data) irqen = irqsta & ~RTC_IRQ_EN_AL; mutex_lock(&rtc->lock); if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, - irqen) < 0) + irqen) == 0) mtk_rtc_write_trigger(rtc); mutex_unlock(&rtc->lock); @@ -169,12 +169,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM); mutex_unlock(&rtc->lock); - tm->tm_sec = data[RTC_OFFSET_SEC]; - tm->tm_min = data[RTC_OFFSET_MIN]; - tm->tm_hour = data[RTC_OFFSET_HOUR]; - tm->tm_mday = data[RTC_OFFSET_DOM]; - tm->tm_mon = data[RTC_OFFSET_MTH]; - tm->tm_year = data[RTC_OFFSET_YEAR]; + tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK; + tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK; + tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK; + tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK; + tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK; + tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK; tm->tm_year += RTC_MIN_YEAR_OFFSET; tm->tm_mon--; @@ -195,14 +195,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) tm->tm_year -= RTC_MIN_YEAR_OFFSET; tm->tm_mon++; - data[RTC_OFFSET_SEC] = tm->tm_sec; - data[RTC_OFFSET_MIN] = tm->tm_min; - data[RTC_OFFSET_HOUR] = tm->tm_hour; - data[RTC_OFFSET_DOM] = tm->tm_mday; - data[RTC_OFFSET_MTH] = tm->tm_mon; - data[RTC_OFFSET_YEAR] = tm->tm_year; - mutex_lock(&rtc->lock); + ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC, + data, RTC_OFFSET_COUNT); + if (ret < 0) + goto exit; + + data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) | + (tm->tm_sec & RTC_AL_SEC_MASK)); + data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) | + (tm->tm_min & RTC_AL_MIN_MASK)); + data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) | + (tm->tm_hour & RTC_AL_HOU_MASK)); + data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) | + (tm->tm_mday & RTC_AL_DOM_MASK)); + data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) | + (tm->tm_mon & RTC_AL_MTH_MASK)); + data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) | + (tm->tm_year & RTC_AL_YEA_MASK)); + if (alm->enabled) { ret = regmap_bulk_write(rtc->regmap, rtc->addr_base + RTC_AL_SEC, diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 579b3ff5c644..feb1f8e52c00 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -504,7 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev) if (unlikely(!rtc->res)) return -EBUSY; - rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start, + rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, rtc->regsize); if (unlikely(!rtc->regbase)) return -EINVAL; diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 8dcd20b34dde..852f5f3b3592 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -379,6 +379,22 @@ static void __init sun50i_h6_rtc_clk_init(struct device_node *node) CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc", sun50i_h6_rtc_clk_init); +/* + * The R40 user manual is self-conflicting on whether the prescaler is + * fixed or configurable. The clock diagram shows it as fixed, but there + * is also a configurable divider in the RTC block. + */ +static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = { + .rc_osc_rate = 16000000, + .fixed_prescaler = 512, +}; +static void __init sun8i_r40_rtc_clk_init(struct device_node *node) +{ + sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data); +} +CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc", + sun8i_r40_rtc_clk_init); + static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = { .rc_osc_rate = 32000, .has_out_clk = 1, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index c94184d080f8..a28b9ff82378 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; int fcx_in_css, fcx_in_gneq, fcx_in_features; - int tpm, mdc; + unsigned int mdc; + int tpm; if (dasd_nofcx) return 0; @@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device) return 0; mdc = ccw_device_get_mdc(device->cdev, 0); - if (mdc < 0) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); return 0; } else { @@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device) static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) { struct dasd_eckd_private *private = device->private; - int mdc; + unsigned int mdc; u32 fcx_max_data; if (private->fcx_max_data) { mdc = ccw_device_get_mdc(device->cdev, lpm); - if ((mdc < 0)) { + if (mdc == 0) { dev_warn(&device->cdev->dev, "Detecting the maximum data size for zHPF " "requests failed (rc=%d) for a new path %x\n", @@ -2073,7 +2074,7 @@ out_err2: dasd_free_block(device->block); device->block = NULL; out_err1: - kfree(private->conf_data); + dasd_eckd_clear_conf_data(device); kfree(device->private); device->private = NULL; return rc; @@ -2082,7 +2083,6 @@ out_err1: static void dasd_eckd_uncheck_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - int i; if (!private) return; @@ -2092,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->sneq = NULL; private->vdsneq = NULL; private->gneq = NULL; - private->conf_len = 0; - for (i = 0; i < 8; i++) { - kfree(device->path[i].conf_data); - if ((__u8 *)device->path[i].conf_data == - private->conf_data) { - private->conf_data = NULL; - private->conf_len = 0; - } - device->path[i].conf_data = NULL; - device->path[i].cssid = 0; - device->path[i].ssid = 0; - device->path[i].chpid = 0; - } - kfree(private->conf_data); - private->conf_data = NULL; + dasd_eckd_clear_conf_data(device); } static struct dasd_ccw_req * diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h index 8f75df06e893..45ddabec4017 100644 --- a/drivers/s390/block/dasd_fba.h +++ b/drivers/s390/block/dasd_fba.h @@ -2,7 +2,7 @@ /* * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * Coypright IBM Corp. 1999, 2000 + * Copyright IBM Corp. 1999, 2000 * */ diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 1770b99f607e..8d4d69ea5baf 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -5,7 +5,7 @@ * Carsten Otte <Cotte@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> - * Coypright IBM Corp. 1999, 2002 + * Copyright IBM Corp. 1999, 2002 * * /proc interface for the dasd driver. * diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 65841af15748..ccecf6b9504e 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); * @mask: mask of paths to use * * Return the number of 64K-bytes blocks all paths at least support - * for a transport command. Return values <= 0 indicate failures. + * for a transport command. Return value 0 indicates failure. */ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) { diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index a1915061932e..5256e3ce84e5 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev) drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) return -ENODEV; - /* (re-)init queue's state machine */ - ap_queue_reinit_state(to_ap_queue(dev)); } /* Add queue/card to list of active queues/cards */ diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 433b7b64368d..bb35ba4a8d24 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); -void ap_queue_reinit_state(struct ap_queue *aq); +void ap_queue_init_state(struct ap_queue *aq); struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, int comp_device_type, unsigned int functions); diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index dad2be333d82..37c3bdc3642d 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->ap_dev.device.type = &ap_queue_type; aq->ap_dev.device_type = device_type; aq->qid = qid; - aq->state = AP_STATE_RESET_START; + aq->state = AP_STATE_UNBOUND; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->list); @@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_reinit_state(struct ap_queue *aq) +void ap_queue_init_state(struct ap_queue *aq) { spin_lock_bh(&aq->lock); aq->state = AP_STATE_RESET_START; ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); } +EXPORT_SYMBOL(ap_queue_init_state); diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index c1db64a2db21..110fe9d0cb91 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -1037,8 +1037,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prepparm = (struct iprepparm *) prepcblk->rpl_parmb; /* do some plausibility checks on the key block */ - if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || - prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) { + if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || + prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) { DEBUG_ERR("%s reply with invalid or unknown key block\n", __func__); rc = -EIO; diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index c50f3e86cc74..7cbb384ec535 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2A_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c index 35c7c6672713..c78c0d119806 100644 --- a/drivers/s390/crypto/zcrypt_cex2c.c +++ b/drivers/s390/crypto/zcrypt_cex2c.c @@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_rapq(aq->qid); rc = zcrypt_cex2c_rng_supported(aq); if (rc < 0) { zcrypt_queue_free(zq); @@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) else zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, MSGTYPE06_VARIANT_NORNG); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX2C_CLEANUP_TIME; aq->private = zq; diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 442e3d6162f7..6fabc906114c 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) zq->queue = aq; zq->online = 1; atomic_set(&zq->load, 0); + ap_queue_init_state(aq); ap_queue_init_reply(aq, &zq->reply); aq->request_timeout = CEX4_CLEANUP_TIME, aq->private = zq; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b9a2349e4b90..29facb913671 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -655,17 +655,17 @@ static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); - if ((buffer[2] & 0xc0) == 0xc0) { + if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", buffer[4]); QETH_CARD_TEXT(card, 2, "ckidxres"); QETH_CARD_TEXT(card, 2, " idxterm"); - QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); - if (buffer[4] == 0xf6) { + QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); + if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || + buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { dev_err(&card->gdev->dev, - "The qeth device is not configured " - "for the OSI layer required by z/VM\n"); - return -EPERM; + "The device does not support the configured transport mode\n"); + return -EPROTONOSUPPORT; } return -EIO; } @@ -742,10 +742,10 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, case 0: break; case -EIO: - qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); /* fall through */ default: + qeth_clear_ipacmd_list(card); goto out; } @@ -2482,50 +2482,46 @@ static int qeth_mpc_initialize(struct qeth_card *card) rc = qeth_cm_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "2err%d", rc); - goto out_qdio; + return rc; } rc = qeth_cm_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "3err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_enable(card); if (rc) { QETH_CARD_TEXT_(card, 2, "4err%d", rc); - goto out_qdio; + return rc; } rc = qeth_ulp_setup(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_alloc_qdio_queues(card); if (rc) { QETH_CARD_TEXT_(card, 2, "5err%d", rc); - goto out_qdio; + return rc; } rc = qeth_qdio_establish(card); if (rc) { QETH_CARD_TEXT_(card, 2, "6err%d", rc); qeth_free_qdio_queues(card); - goto out_qdio; + return rc; } rc = qeth_qdio_activate(card); if (rc) { QETH_CARD_TEXT_(card, 2, "7err%d", rc); - goto out_qdio; + return rc; } rc = qeth_dm_act(card); if (rc) { QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out_qdio; + return rc; } return 0; -out_qdio: - qeth_qdio_clear_card(card, !IS_IQD(card)); - qdio_free(CARD_DDEV(card)); - return rc; } void qeth_print_status_message(struct qeth_card *card) @@ -3429,11 +3425,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) goto out; } - if (card->state != CARD_STATE_DOWN) { - rc = -1; - goto out; - } - qeth_free_qdio_queues(card); card->options.cq = cq; rc = 0; @@ -4779,7 +4770,7 @@ static int qeth_qdio_establish(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "qdioest"); - qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL); + qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); if (!qib_param_field) { rc = -ENOMEM; goto out_free_nothing; @@ -5035,10 +5026,8 @@ retriable: } if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { rc = qeth_query_setdiagass(card); - if (rc < 0) { + if (rc) QETH_CARD_TEXT_(card, 2, "8err%d", rc); - goto out; - } } if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 88f4dc140751..458db34239a7 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -421,7 +421,7 @@ struct qeth_ipacmd_setassparms { } data; } __attribute__ ((packed)); -#define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\ +#define SETASS_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setassparms,\ data.field) /* SETRTG IPA Command: ****************************************************/ @@ -535,7 +535,7 @@ struct qeth_ipacmd_setadpparms { } data; } __attribute__ ((packed)); -#define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\ +#define SETADP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setadpparms,\ data.field) /* CREATE_ADDR IPA Command: ***********************************************/ @@ -648,7 +648,7 @@ struct qeth_ipacmd_vnicc { } data; }; -#define VNICC_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\ +#define VNICC_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_vnicc,\ data.field) /* SETBRIDGEPORT IPA Command: *********************************************/ @@ -729,7 +729,7 @@ struct qeth_ipacmd_setbridgeport { } data; } __packed; -#define SBP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\ +#define SBP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setbridgeport,\ data.field) /* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/ @@ -790,7 +790,7 @@ struct qeth_ipa_cmd { } data; } __attribute__ ((packed)); -#define IPA_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipa_cmd, data.field) +#define IPA_DATA_SIZEOF(field) sizeof_field(struct qeth_ipa_cmd, data.field) /* * special command for ARP processing. @@ -899,6 +899,11 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; #define QETH_IDX_ACT_ERR_AUTH 0x1E #define QETH_IDX_ACT_ERR_AUTH_USER 0x20 +#define QETH_IDX_TERMINATE 0xc0 +#define QETH_IDX_TERMINATE_MASK 0xc0 +#define QETH_IDX_TERM_BAD_TRANSPORT 0x41 +#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6 + #define PDU_ENCAPSULATION(buffer) \ (buffer + *(buffer + (*(buffer + 0x0b)) + \ *(buffer + *(buffer + 0x0b) + 0x11) + 0x07)) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index e81170ab6d9a..7bd86027f559 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -207,7 +207,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; } else if (sysfs_streq(buf, "prio_queueing_vlan")) { if (IS_LAYER3(card)) { - rc = -ENOTSUPP; + rc = -EOPNOTSUPP; goto out; } card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9086bc04fa6b..47d37e75dda6 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -287,14 +287,15 @@ static void qeth_l2_stop_card(struct qeth_card *card) card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); flush_workqueue(card->event_wq); card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + card->info.promisc_mode = 0; } static int qeth_l2_process_inbound_buffer(struct qeth_card *card, @@ -1951,8 +1952,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) /* check if VNICC is currently enabled */ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) { - /* if everything is turned off, VNICC is not active */ - if (!card->options.vnicc.cur_chars) + if (!card->options.vnicc.sup_chars) return false; /* default values are only OK if rx_bcast was not enabled by user * or the card is offline. @@ -2039,8 +2039,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card) /* enforce assumed default values and recover settings, if changed */ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, timeout); - chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; - chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; + /* Change chars, if necessary */ + chars_tmp = card->options.vnicc.wanted_chars ^ + card->options.vnicc.cur_chars; chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; for_each_set_bit(i, &chars_tmp, chars_len) { vnicc = BIT(i); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index f70c7aac2dcc..7fa325cf6f8d 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -262,7 +262,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) return; mutex_lock(&card->sbp_lock); - if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { + if (!card->options.sbp.reflect_promisc && + card->options.sbp.role != QETH_SBP_ROLE_NONE) { /* Conditional to avoid spurious error messages */ qeth_bridgeport_setrole(card, card->options.sbp.role); /* Let the callback function refresh the stored role value. */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 27126330a4b0..5508ab89b518 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1307,13 +1307,14 @@ static void qeth_l3_stop_card(struct qeth_card *card) card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - qeth_qdio_clear_card(card, 0); qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } + qeth_qdio_clear_card(card, 0); flush_workqueue(card->event_wq); + card->info.promisc_mode = 0; } static void qeth_l3_set_promisc_mode(struct qeth_card *card) diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index f9067ed6c7d3..e8c848f72c6d 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -242,21 +242,33 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; char *tmp; - int rc; if (!IS_IQD(card)) return -EPERM; - if (card->state != CARD_STATE_DOWN) - return -EPERM; - if (card->options.sniffer) - return -EPERM; - if (card->options.cq == QETH_CQ_NOTAVAILABLE) - return -EPERM; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + if (card->options.sniffer) { + rc = -EPERM; + goto out; + } + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -EPERM; + goto out; + } tmp = strsep((char **)&buf, "\n"); - if (strlen(tmp) > 8) - return -EINVAL; + if (strlen(tmp) > 8) { + rc = -EINVAL; + goto out; + } if (card->options.hsuid[0]) /* delete old ip address */ @@ -267,11 +279,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, card->options.hsuid[0] = '\0'; memcpy(card->dev->perm_addr, card->options.hsuid, 9); qeth_configure_cq(card, QETH_CQ_DISABLED); - return count; + goto out; } - if (qeth_configure_cq(card, QETH_CQ_ENABLED)) - return -EPERM; + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) { + rc = -EPERM; + goto out; + } snprintf(card->options.hsuid, sizeof(card->options.hsuid), "%-8s", tmp); @@ -280,6 +294,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, rc = qeth_l3_modify_hsuid(card, true); +out: + mutex_unlock(&card->conf_mutex); return rc ? rc : count; } diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 90cf4691b8c3..a7881f8eb05e 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)" config BLK_DEV_SD tristate "SCSI disk support" depends on SCSI + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY ---help--- If you want to use SCSI hard disks, Fibre Channel disks, Serial ATA (SATA) or Parallel ATA (PATA) hard disks, diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index e36608ce937a..33dbc051bff9 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -535,7 +535,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr) if ((le32_to_cpu(get_name_reply->status) == CT_OK) && (get_name_reply->data[0] != '\0')) { char *sp = get_name_reply->data; - int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); + int data_size = sizeof_field(struct aac_get_name_resp, data); sp[data_size - 1] = '\0'; while (*sp == ' ') @@ -574,7 +574,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) dev = (struct aac_dev *)scsicmd->device->host->hostdata; - data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); + data_size = sizeof_field(struct aac_get_name_resp, data); cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c index 8466aa784ec1..8b891a05d9e7 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c @@ -293,7 +293,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, if (!request_mem_region(start, 0x1000, "aic79xx")) error = ENOMEM; if (!error) { - *maddr = ioremap_nocache(base_page, base_offset + 512); + *maddr = ioremap(base_page, base_offset + 512); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 717d8d1082ce..9b293b1f0b71 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -372,7 +372,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, if (!request_mem_region(start, 0x1000, "aic7xxx")) error = ENOMEM; if (error == 0) { - *maddr = ioremap_nocache(start, 256); + *maddr = ioremap(start, 256); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index db687ef8a99e..40dc8eac0e3a 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -270,7 +270,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) break; } case ACB_ADAPTER_TYPE_C:{ - acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); + acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!acb->pmuC) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 063dccc18f70..5f9f0b18ddf3 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1300,7 +1300,7 @@ struct be_cmd_get_port_name { /* Returns the number of items in the field array. */ #define BE_NUMBER_OF_FIELD(_type_, _field_) \ - (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\ + (sizeof_field(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\ /** * Different types of iSCSI completions to host driver for both initiator diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 0760d0bd8a10..9b81cfbbc5c5 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -453,14 +453,14 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, u8 __iomem *addr; int pcicfg_reg; - addr = ioremap_nocache(pci_resource_start(pcidev, 2), + addr = ioremap(pci_resource_start(pcidev, 2), pci_resource_len(pcidev, 2)); if (addr == NULL) return -ENOMEM; phba->ctrl.csr = addr; phba->csr_va = addr; - addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); + addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); if (addr == NULL) goto pci_map_err; phba->ctrl.db = addr; @@ -471,7 +471,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, else pcicfg_reg = 0; - addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), + addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), pci_resource_len(pcidev, pcicfg_reg)); if (addr == NULL) diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index f069e09beb10..6f8335ddb1f2 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1414,7 +1414,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); - tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); + tgt->ctx_base = ioremap(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; return 0; diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 12666313b937..e53ebc5eff85 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -2715,7 +2715,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) reg_base = pci_resource_start(ep->hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); - ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); + ep->qp.ctx_base = ioremap(reg_base + reg_off, 4); if (!ep->qp.ctx_base) return -ENOMEM; goto arm_cq; @@ -2736,7 +2736,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) /* 5709 device in normal node and 5706/5708 devices */ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); - ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, + ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off, MB_KERNEL_CTX_SIZE); if (!ep->qp.ctx_base) return -ENOMEM; diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 2e8a3ac575cb..8dea7d53788a 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -529,7 +529,7 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev) goto err_free_hw; /* Get the start address of registers from BAR 0 */ - hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0), + hw->regstart = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!hw->regstart) { csio_err(hw, "Could not map BAR 0, regstart = %p\n", diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 0d044c165960..4bc794d2f51c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) "cdev 0x%p, p# %u.\n", cdev, cdev->nports); cxgbi_hbas_remove(cdev); cxgbi_device_portmap_cleanup(cdev); - cxgbi_ppm_release(cdev->cdev2ppm(cdev)); + if (cdev->cdev2ppm) + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); if (cdev->pmap.max_connect) cxgbi_free_big_mem(cdev->pmap.port_csk); kfree(cdev); @@ -2746,7 +2747,7 @@ static int __init libcxgbi_init_module(void) { pr_info("%s", version); - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < + BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < sizeof(struct cxgbi_skb_cb)); return 0; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 8ef150dfb6f7..b60795893994 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 1f55b9e4e74a..1b88a3b53eee 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) int vnic_dev_hang_notify(struct vnic_dev *vdev) { - u64 a0, a1; + u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); } int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { - u64 a0, a1; + u64 a[2] = {}; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; - err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait); if (err) return err; for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = ((u8 *)&a0)[i]; + mac_addr[i] = ((u8 *)&a)[i]; return 0; } @@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait); if (err) pr_err("Can't add addr [%pM], %d\n", addr, err); } void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait); if (err) pr_err("Can't del addr [%pM], %d\n", addr, err); } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 216e557f703e..1a4ddfacb458 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6876,7 +6876,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; - void __iomem *page_remapped = ioremap_nocache(page_base, + void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c index abac2f350aee..c48a73a0f517 100644 --- a/drivers/scsi/lasi700.c +++ b/drivers/scsi/lasi700.c @@ -98,7 +98,7 @@ lasi700_probe(struct parisc_device *dev) hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); - hostdata->base = ioremap_nocache(base, 0x100); + hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; if (dev->id.sversion == LASI_700_SVERSION) { diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index ebd47c0cf9e9..70b99c0e2e67 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1945,7 +1945,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); - spin_lock(&session->frwd_lock); + spin_lock_bh(&session->frwd_lock); task = (struct iscsi_task *)sc->SCp.ptr; if (!task) { /* @@ -2072,7 +2072,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) done: if (task) task->last_timeout = jiffies; - spin_unlock(&session->frwd_lock); + spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index f47b4b281b14..d7302c2052f9 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -81,12 +81,21 @@ static int sas_get_port_device(struct asd_sas_port *port) else dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; - } else { + } else if (port->oob_mode == SAS_OOB_MODE) { struct sas_identify_frame *id = (struct sas_identify_frame *) dev->frame_rcvd; dev->dev_type = id->dev_type; dev->iproto = id->initiator_bits; dev->tproto = id->target_bits; + } else { + /* If the oob mode is OOB_NOT_CONNECTED, the port is + * disconnected due to race with PHY down. We cannot + * continue to discover this port + */ + sas_put_device(dev); + pr_warn("Port %016llx is disconnected when discovering\n", + SAS_ADDR(port->attached_sas_addr)); + return -ENODEV; } sas_init_dev(dev); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index d4e1b120cc9e..0ea03ae93d91 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -4489,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, phba->mbox_ext_buf_ctx.seqNum++; nemb_tp = phba->mbox_ext_buf_ctx.nembType; - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (!dd_data) { - rc = -ENOMEM; - goto job_error; - } - pbuf = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, @@ -4531,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, "2968 SLI_CONFIG ext-buffer wr all %d " "ebuffers received\n", phba->mbox_ext_buf_ctx.numBuf); + + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { @@ -4579,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, return SLI_CONFIG_HANDLED; job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2e6a68d9ea4f..a5ecbce4eda2 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -5385,7 +5385,6 @@ static const struct file_operations lpfc_debugfs_ras_log = { .read = lpfc_debugfs_read, .release = lpfc_debugfs_ras_log_release, }; -#endif #undef lpfc_debugfs_op_dumpHBASlim static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { @@ -5557,7 +5556,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = { .write = lpfc_idiag_extacc_write, .release = lpfc_idiag_cmd_release, }; - +#endif /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command * @phba: Pointer to HBA context object. diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6298b1729098..6a04fdb3fbf2 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "1804 Invalid asynchrous event code: " + "1804 Invalid asynchronous event code: " "x%x\n", bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)); break; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index db4a04a207ec..f6c8963c915d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1985,6 +1985,8 @@ out_unlock: /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { + .module = THIS_MODULE, + /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index c82b5792da98..625c046ac4ef 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8555,7 +8555,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); - /* wake up worker thread to post asynchronlous mailbox command */ + /* wake up worker thread to post asynchronous mailbox command */ lpfc_worker_wake_up(phba); } @@ -8823,7 +8823,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, return rc; } - /* Now, interrupt mode asynchrous mailbox command */ + /* Now, interrupt mode asynchronous mailbox command */ rc = lpfc_mbox_cmd_check(phba, mboxq); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, @@ -13112,11 +13112,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) } /** - * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event + * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event * @phba: Pointer to HBA context object. * @cqe: Pointer to mailbox completion queue entry. * - * This routine process a mailbox completion queue entry with asynchrous + * This routine process a mailbox completion queue entry with asynchronous * event. * * Return: true if work posted to worker thread, otherwise false. @@ -13270,7 +13270,7 @@ out_no_mqe_complete: * @cqe: Pointer to mailbox completion queue entry. * * This routine process a mailbox completion queue entry, it invokes the - * proper mailbox complete handling or asynchrous event handling routine + * proper mailbox complete handling or asynchronous event handling routine * according to the MCQE's async bit. * * Return: true if work posted to worker thread, otherwise false. diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index f6ac819e6e96..8443f2f35be2 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -731,7 +731,7 @@ megaraid_init_mbox(adapter_t *adapter) goto out_free_raid_dev; } - raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128); + raid_dev->baseaddr = ioremap(raid_dev->baseport, 128); if (!raid_dev->baseaddr) { diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a4bc81479284..c60cd9fc4240 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5875,7 +5875,7 @@ static int megasas_init_fw(struct megasas_instance *instance) } base_addr = pci_resource_start(instance->pdev, instance->bar); - instance->reg_set = ioremap_nocache(base_addr, 8192); + instance->reg_set = ioremap(base_addr, 8192); if (!instance->reg_set) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 848fbec7bda6..45fd8dfb7c40 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5248,7 +5248,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) &ct->chain_buffer_dma); if (!ct->chain_buffer) { ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n"); - _base_release_memory_pools(ioc); goto out; } } diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index 539ac8ce4fcd..d4bd31a75b9d 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -3531,7 +3531,7 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev, spin_lock_init(&cb->queue_lock); if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; - cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size); + cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size); if (cb->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index eb0dd566330a..5c5666491c2e 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2311,7 +2311,7 @@ static struct myrs_hba *myrs_detect(struct pci_dev *pdev, /* Map the Controller Register Window. */ if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; - cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size); + cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size); if (cs->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 93616f9fd6d7..d79ce97a04bd 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -1560,7 +1560,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) goto next_entry; data->MmioAddress = (unsigned long) - ioremap_nocache(p_dev->resource[2]->start, + ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); data->MmioLength = resource_size(p_dev->resource[2]); } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index ae97e2f310a3..d7e7043f9eab 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -178,6 +178,7 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, faddr = ha->flt_region_nvram; if (IS_QLA28XX(ha)) { + qla28xx_get_aux_images(vha, &active_regions); if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_nvram_sec; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 99f0a1a08143..cbaf178fc979 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2399,7 +2399,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job) struct qla_active_regions regions = { }; struct active_regions active_regions = { }; - qla28xx_get_aux_images(vha, &active_regions); + qla27xx_get_active_image(vha, &active_regions); regions.global_image = active_regions.global; if (IS_QLA28XX(ha)) { diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 460f443f6471..2edd9f7b3074 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2401,6 +2401,7 @@ typedef struct fc_port { unsigned int id_changed:1; unsigned int scan_needed:1; unsigned int n2n_flag:1; + unsigned int explicit_logout:1; struct completion nvme_del_done; uint32_t nvme_prli_service_param; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 59f6903e5abe..9dc09c117416 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1523,6 +1523,10 @@ struct qla_flt_header { #define FLT_REG_NVRAM_SEC_28XX_1 0x10F #define FLT_REG_NVRAM_SEC_28XX_2 0x111 #define FLT_REG_NVRAM_SEC_28XX_3 0x113 +#define FLT_REG_MPI_PRI_28XX 0xD3 +#define FLT_REG_MPI_SEC_28XX 0xF0 +#define FLT_REG_PEP_PRI_28XX 0xD1 +#define FLT_REG_PEP_SEC_28XX 0xF1 struct qla_flt_region { uint16_t code; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 6c28f38f8021..aa5204163bec 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -533,6 +533,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; + fcport->disc_state = DSC_LOGIN_PEND; return qla2x00_post_work(vha, e); } @@ -1526,8 +1527,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) } } - /* for pure Target Mode. Login will not be initiated */ - if (vha->host->active_mode == MODE_TARGET) + /* Target won't initiate port login if fabric is present */ + if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) return 0; if (fcport->flags & FCF_ASYNC_SENT) { @@ -1719,6 +1720,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) { + /* for pure Target Mode, PRLI will not be initiated */ + if (vha->host->active_mode == MODE_TARGET) + return; + ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post PRLI\n", __func__, __LINE__, ea->fcport->port_name); @@ -4852,6 +4857,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_WORK(&fcport->free_work, qlt_free_session_done); INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); @@ -4930,14 +4936,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) set_bit(RSCN_UPDATE, &flags); clear_bit(LOCAL_LOOP_UPDATE, &flags); - } else if (ha->current_topology == ISP_CFG_N) { - clear_bit(RSCN_UPDATE, &flags); - if (qla_tgt_mode_enabled(vha)) { - /* allow the other side to start the login */ - clear_bit(LOCAL_LOOP_UPDATE, &flags); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } - } else if (ha->current_topology == ISP_CFG_NL) { + } else if (ha->current_topology == ISP_CFG_NL || + ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || @@ -5054,7 +5054,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb, sizeof(ha->plogi_els_payld.data)); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } else { ql_dbg(ql_dbg_init, vha, 0x00d1, "PLOGI ELS param read fail.\n"); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index b25f87ff8cde..8b050f0b4333 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2405,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) static void qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) { + u16 control_flags = LCF_COMMAND_LOGO; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; - logio->control_flags = - cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); - if (!sp->fcport->keep_nport_handle) - logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); + + if (sp->fcport->explicit_logout) { + control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; + } else { + control_flags |= LCF_IMPL_LOGO; + + if (!sp->fcport->keep_nport_handle) + control_flags |= LCF_FREE_NPORT; + } + + logio->control_flags = cpu_to_le16(control_flags); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; @@ -2617,6 +2625,10 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, sizeof(struct els_logo_payload)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, + elsio->u.els_logo.els_logo_pyld, + sizeof(*elsio->u.els_logo.els_logo_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -2676,7 +2688,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI ELS IOCB:\n"); ql_dump_buffer(ql_log_info, vha, 0x0109, - (uint8_t *)els_iocb, 0x70); + (uint8_t *)els_iocb, + sizeof(*els_iocb)); } else { els_iocb->control_flags = 1 << 13; els_iocb->tx_byte_count = @@ -2688,6 +2701,11 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_byte_count = 0; els_iocb->rx_address = 0; els_iocb->rx_len = 0; + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, + "LOGO ELS IOCB:"); + ql_dump_buffer(ql_log_info, vha, 0x010b, + els_iocb, + sizeof(*els_iocb)); } sp->vha->qla_stats.control_requests++; @@ -2934,7 +2952,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, - (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); + (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, + sizeof(*elsio->u.els_plogi.els_plogi_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 2601d7673c37..7b8a6bfcf08d 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1061,8 +1061,6 @@ global_port_update: ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); - - qlt_async_event(mb[0], vha, mb); break; } @@ -1079,8 +1077,6 @@ global_port_update: set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); - - qlt_async_event(mb[0], vha, mb); break; case MBA_RSCN_UPDATE: /* State Change Registration */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 0cf94f05f008..b7c1108c48e2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3921,6 +3921,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, vha->d_id.b24 = 0; vha->d_id.b.al_pa = 1; ha->flags.n2n_bigger = 1; + ha->flags.n2n_ae = 0; id.b.al_pa = 2; ql_dbg(ql_dbg_async, vha, 0x5075, @@ -3931,6 +3932,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, "Format 1: Remote login - Waiting for WWPN %8phC.\n", rptid_entry->u.f1.port_name); ha->flags.n2n_bigger = 0; + ha->flags.n2n_ae = 1; } qla24xx_post_newsess_work(vha, &id, rptid_entry->u.f1.port_name, @@ -3942,7 +3944,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, /* if our portname is higher then initiate N2N login */ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); - ha->flags.n2n_ae = 1; return; break; case TOPO_FL: diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 605b59c76c90..a3a44d4ace1e 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -789,7 +789,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->cregbase = - ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); + ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); if (!ha->cregbase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); @@ -810,7 +810,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->iobase = - ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); + ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); if (!ha->iobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 941aa53363f5..bfcd02fdf2b8 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) } static struct nvme_fc_port_template qla_nvme_fc_transport = { + .module = THIS_MODULE, .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index f2d5115b2d8d..bbe90354f49b 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -847,15 +847,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_sec = start; break; case FLT_REG_FW_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_fw_sec = start; break; case FLT_REG_BOOTLOAD_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_boot_sec = start; break; case FLT_REG_AUX_IMG_PRI_28XX: @@ -2725,8 +2725,11 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Region %x is secure\n", region.code); - if (region.code == FLT_REG_FW || - region.code == FLT_REG_FW_SEC_27XX) { + switch (region.code) { + case FLT_REG_FW: + case FLT_REG_FW_SEC_27XX: + case FLT_REG_MPI_PRI_28XX: + case FLT_REG_MPI_SEC_28XX: fw_array = dwptr; /* 1st fw array */ @@ -2757,9 +2760,23 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, buf_size_without_sfub += risc_size; fw_array += risc_size; } - } else { - ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, - "Secure region %x not supported\n", + break; + + case FLT_REG_PEP_PRI_28XX: + case FLT_REG_PEP_SEC_28XX: + fw_array = dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); + risc_attr = be32_to_cpu(fw_array[9]); + + buf_size_without_sfub = risc_size; + fw_array += risc_size; + break; + + default: + ql_log(ql_log_warn + ql_dbg_verbose, vha, + 0xffff, "Secure region %x not supported\n", region.code); rval = QLA_COMMAND_ERROR; goto done; @@ -2880,7 +2897,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, "Sending Secure Flash MB Cmd\n"); rval = qla28xx_secure_flash_update(vha, 0, region.code, buf_size_without_sfub, sfub_dma, - sizeof(struct secure_flash_update_block)); + sizeof(struct secure_flash_update_block) >> 2); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Secure Flash MB Cmd failed %x.", rval); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 51b275a575a5..68c14143e50e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1104,6 +1104,7 @@ void qlt_free_session_done(struct work_struct *work) } } + sess->explicit_logout = 0; spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); sess->free_pending = 0; @@ -1160,7 +1161,6 @@ void qlt_unreg_sess(struct fc_port *sess) sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; - INIT_WORK(&sess->free_work, qlt_free_session_done); queue_work(sess->vha->hw->wq, &sess->free_work); } EXPORT_SYMBOL(qlt_unreg_sess); @@ -1265,7 +1265,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) "Scheduling sess %p for deletion %8phC\n", sess, sess->port_name); - INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } @@ -4804,6 +4803,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, switch (sess->disc_state) { case DSC_DELETED: + case DSC_LOGIN_PEND: qlt_plogi_ack_unref(vha, pla); break; diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 042a24314edc..abe7f79bb789 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -246,6 +246,8 @@ static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) */ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) { + if (!mcmd) + return; INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); } @@ -348,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) target_sess_cmd_list_set_waiting(se_sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + sess->explicit_logout = 1; tcm_qla2xxx_put_sess(sess); } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 8c674eca09f1..2323432a0edb 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) return QLA_SUCCESS; mem_alloc_error_exit: - qla4xxx_mem_free(ha); return QLA_ERROR; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 417b868d8735..ed8d9709b9b9 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -24,6 +24,8 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" +#define ISCSI_SEND_MAX_ALLOWED 10 + #define CREATE_TRACE_POINTS #include <trace/events/iscsi.h> @@ -3682,6 +3684,7 @@ iscsi_if_rx(struct sk_buff *skb) struct nlmsghdr *nlh; struct iscsi_uevent *ev; uint32_t group; + int retries = ISCSI_SEND_MAX_ALLOWED; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || @@ -3712,6 +3715,10 @@ iscsi_if_rx(struct sk_buff *skb) break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); + if (err == -EAGAIN && --retries < 0) { + printk(KERN_WARNING "Send reply failed, error %d\n", err); + break; + } } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cea625906440..902b649fc8ef 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2211,8 +2211,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer u8 type; int ret = 0; - if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { + sdkp->protection_type = 0; return ret; + } type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ @@ -2956,15 +2958,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) q->limits.zoned = BLK_ZONED_HM; } else { sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) + if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) { /* Host-aware */ q->limits.zoned = BLK_ZONED_HA; - else + } else { /* - * Treat drive-managed devices as - * regular block devices. + * Treat drive-managed devices and host-aware devices + * with partitions as regular block devices. */ q->limits.zoned = BLK_ZONED_NONE; + } } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 7b7ef3acb504..b7492568e02f 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -7457,7 +7457,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) goto disable_device; } - ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( + ctrl_info->iomem_base = ioremap(pci_resource_start( ctrl_info->pci_dev, 0), sizeof(struct pqi_ctrl_registers)); if (!ctrl_info->iomem_base) { @@ -8689,11 +8689,11 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, data.delete_operational_queue.queue_id) != 12); BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, data.create_operational_iq) != 64 - 11); - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, data.create_operational_oq) != 64 - 11); - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, data.delete_operational_queue) != 64 - 11); BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index a85d52b5dc32..f8397978f8ab 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -71,7 +71,7 @@ static int snirm710_probe(struct platform_device *dev) hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); - hostdata->base = ioremap_nocache(base, 0x100); + hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; hostdata->clock = SNIRM710_CLOCK; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index f8faf8b3d965..fb41636519ee 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1842,9 +1842,11 @@ static int storvsc_probe(struct hv_device *device, */ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); /* + * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. */ - host->nr_hw_queues = num_present_cpus(); + if (!dev_is_ide) + host->nr_hw_queues = num_present_cpus(); /* * Set the error handler work queue. diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 440a73eae647..f37df79e37e1 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -190,7 +190,7 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!res || !res->start) goto fail_unlink; - esp->regs = ioremap_nocache(res->start, 0x20); + esp->regs = ioremap(res->start, 0x20); if (!esp->regs) goto fail_unmap_regs; @@ -198,7 +198,7 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!res || !res->start) goto fail_unmap_regs; - esp->dma_regs = ioremap_nocache(res->start, 0x10); + esp->dma_regs = ioremap(res->start, 0x10); esp->command_block = dma_alloc_coherent(esp->dev, 16, &esp->command_block_dma, diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c index b2af04c57a39..6feeb0faf123 100644 --- a/drivers/scsi/ufs/cdns-pltfrm.c +++ b/drivers/scsi/ufs/cdns-pltfrm.c @@ -99,6 +99,12 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, */ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); + /* + * Disabling Autohibern8 feature in cadence UFS + * to mask unexpected interrupt trigger. + */ + hba->ahit = 0; + return 0; } diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index baeecee35d1e..53dd87628cbe 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -203,7 +203,7 @@ int ufs_bsg_probe(struct ufs_hba *hba) bsg_dev->parent = get_device(parent); bsg_dev->release = ufs_bsg_node_release; - dev_set_name(bsg_dev, "ufs-bsg"); + dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no); ret = device_add(bsg_dev); if (ret) diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 77bce208210e..7eac76cccc4c 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -89,7 +89,7 @@ zalon_probe(struct parisc_device *dev) struct gsc_irq gsc_irq; u32 zalon_vers; int error = -ENODEV; - void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096); + void __iomem *zalon = ioremap(dev->hpa.start, 4096); void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET; static int unit = 0; struct Scsi_Host *host; diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c index a23a8e5794f5..bdd82e497d5f 100644 --- a/drivers/scsi/zorro_esp.c +++ b/drivers/scsi/zorro_esp.c @@ -801,7 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z, /* additional setup required for Fastlane */ if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { /* map full address space up to ESP base for DMA */ - zep->board_base = ioremap_nocache(board, + zep->board_base = ioremap(board, FASTLANE_ESP_ADDR-1); if (!zep->board_base) { pr_err("Cannot allocate board address space\n"); @@ -816,7 +816,7 @@ static int zorro_esp_probe(struct zorro_dev *z, esp->ops = zdd->esp_ops; if (ioaddr > 0xffffff) - esp->regs = ioremap_nocache(ioaddr, 0x20); + esp->regs = ioremap(ioaddr, 0x20); else /* ZorroII address space remapped nocache by early startup */ esp->regs = ZTWO_VADDR(ioaddr); @@ -842,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z, * Only Fastlane Z3 for now - add switch for correct struct * dma_registers size if adding any more */ - esp->dma_regs = ioremap_nocache(dmaaddr, + esp->dma_regs = ioremap(dmaaddr, sizeof(struct fastlane_dma_registers)); } else /* ZorroII address space remapped nocache by early startup */ diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index 9475353f49d6..d996782a7106 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -368,7 +368,7 @@ static int clk_establish_mapping(struct clk *clk) if (!mapping->base && mapping->phys) { kref_init(&mapping->ref); - mapping->base = ioremap_nocache(mapping->phys, mapping->len); + mapping->base = ioremap(mapping->phys, mapping->len); if (unlikely(!mapping->base)) return -ENXIO; } else if (mapping->base) { diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 8485e812d9b2..f8e070d67fa3 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -213,7 +213,7 @@ int __init register_intc_controller(struct intc_desc *desc) WARN_ON(resource_type(res) != IORESOURCE_MEM); d->window[k].phys = res->start; d->window[k].size = resource_size(res); - d->window[k].virt = ioremap_nocache(res->start, + d->window[k].virt = ioremap(res->start, resource_size(res)); if (!d->window[k].virt) goto err2; diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c index 87d69e7471f9..f9f043a3d90a 100644 --- a/drivers/sh/intc/userimask.c +++ b/drivers/sh/intc/userimask.c @@ -73,7 +73,7 @@ int register_intc_userimask(unsigned long addr) if (unlikely(uimask)) return -EBUSY; - uimask = ioremap_nocache(addr, SZ_4K); + uimask = ioremap(addr, SZ_4K); if (unlikely(!uimask)) return -ENOMEM; diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 833e04a7835c..1778f8c62861 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -14,6 +14,7 @@ source "drivers/soc/qcom/Kconfig" source "drivers/soc/renesas/Kconfig" source "drivers/soc/rockchip/Kconfig" source "drivers/soc/samsung/Kconfig" +source "drivers/soc/sifive/Kconfig" source "drivers/soc/sunxi/Kconfig" source "drivers/soc/tegra/Kconfig" source "drivers/soc/ti/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 2ec355003524..8b49d782a1ab 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -20,6 +20,7 @@ obj-y += qcom/ obj-y += renesas/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ +obj-$(CONFIG_SOC_SIFIVE) += sifive/ obj-y += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c index 5823f5b67d16..3f0261d53ad9 100644 --- a/drivers/soc/amlogic/meson-ee-pwrc.c +++ b/drivers/soc/amlogic/meson-ee-pwrc.c @@ -323,6 +323,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, struct meson_ee_pwrc *pwrc, struct meson_ee_pwrc_domain *dom) { + int ret; + dom->pwrc = pwrc; dom->num_rstc = dom->desc.reset_names_count; dom->num_clks = dom->desc.clk_names_count; @@ -368,15 +370,21 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, * prepare/enable counters won't be in sync. */ if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) { - int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); + ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); if (ret) return ret; - pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false); - } else - pm_genpd_init(&dom->base, NULL, - (dom->desc.get_power ? - dom->desc.get_power(dom) : true)); + ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov, + false); + if (ret) + return ret; + } else { + ret = pm_genpd_init(&dom->base, NULL, + (dom->desc.get_power ? + dom->desc.get_power(dom) : true)); + if (ret) + return ret; + } return 0; } @@ -441,9 +449,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev) pwrc->xlate.domains[i] = &dom->base; } - of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); - - return 0; + return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); } static void meson_ee_pwrc_shutdown(struct platform_device *pdev) diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig new file mode 100644 index 000000000000..58cf8c40d08d --- /dev/null +++ b/drivers/soc/sifive/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +if SOC_SIFIVE + +config SIFIVE_L2 + bool "Sifive L2 Cache controller" + help + Support for the L2 cache controller on SiFive platforms. + +endif diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile new file mode 100644 index 000000000000..b5caff77938f --- /dev/null +++ b/drivers/soc/sifive/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SIFIVE_L2) += sifive_l2_cache.o diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c new file mode 100644 index 000000000000..a5069394cd61 --- /dev/null +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SiFive L2 cache controller Driver + * + * Copyright (C) 2018-2019 SiFive, Inc. + * + */ +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <soc/sifive/sifive_l2_cache.h> + +#define SIFIVE_L2_DIRECCFIX_LOW 0x100 +#define SIFIVE_L2_DIRECCFIX_HIGH 0x104 +#define SIFIVE_L2_DIRECCFIX_COUNT 0x108 + +#define SIFIVE_L2_DATECCFIX_LOW 0x140 +#define SIFIVE_L2_DATECCFIX_HIGH 0x144 +#define SIFIVE_L2_DATECCFIX_COUNT 0x148 + +#define SIFIVE_L2_DATECCFAIL_LOW 0x160 +#define SIFIVE_L2_DATECCFAIL_HIGH 0x164 +#define SIFIVE_L2_DATECCFAIL_COUNT 0x168 + +#define SIFIVE_L2_CONFIG 0x00 +#define SIFIVE_L2_WAYENABLE 0x08 +#define SIFIVE_L2_ECCINJECTERR 0x40 + +#define SIFIVE_L2_MAX_ECCINTR 3 + +static void __iomem *l2_base; +static int g_irq[SIFIVE_L2_MAX_ECCINTR]; + +enum { + DIR_CORR = 0, + DATA_CORR, + DATA_UNCORR, +}; + +#ifdef CONFIG_DEBUG_FS +static struct dentry *sifive_test; + +static ssize_t l2_write(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + unsigned int val; + + if (kstrtouint_from_user(data, count, 0, &val)) + return -EINVAL; + if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF)) + writel(val, l2_base + SIFIVE_L2_ECCINJECTERR); + else + return -EINVAL; + return count; +} + +static const struct file_operations l2_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = l2_write +}; + +static void setup_sifive_debug(void) +{ + sifive_test = debugfs_create_dir("sifive_l2_cache", NULL); + + debugfs_create_file("sifive_debug_inject_error", 0200, + sifive_test, NULL, &l2_fops); +} +#endif + +static void l2_config_read(void) +{ + u32 regval, val; + + regval = readl(l2_base + SIFIVE_L2_CONFIG); + val = regval & 0xFF; + pr_info("L2CACHE: No. of Banks in the cache: %d\n", val); + val = (regval & 0xFF00) >> 8; + pr_info("L2CACHE: No. of ways per bank: %d\n", val); + val = (regval & 0xFF0000) >> 16; + pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val); + val = (regval & 0xFF000000) >> 24; + pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val); + + regval = readl(l2_base + SIFIVE_L2_WAYENABLE); + pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval); +} + +static const struct of_device_id sifive_l2_ids[] = { + { .compatible = "sifive,fu540-c000-ccache" }, + { /* end of table */ }, +}; + +static ATOMIC_NOTIFIER_HEAD(l2_err_chain); + +int register_sifive_l2_error_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&l2_err_chain, nb); +} +EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier); + +int unregister_sifive_l2_error_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&l2_err_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier); + +static irqreturn_t l2_int_handler(int irq, void *device) +{ + unsigned int add_h, add_l; + + if (irq == g_irq[DIR_CORR]) { + add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH); + add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW); + pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l); + /* Reading this register clears the DirError interrupt sig */ + readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT); + atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE, + "DirECCFix"); + } + if (irq == g_irq[DATA_CORR]) { + add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH); + add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW); + pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l); + /* Reading this register clears the DataError interrupt sig */ + readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT); + atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE, + "DatECCFix"); + } + if (irq == g_irq[DATA_UNCORR]) { + add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH); + add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW); + pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l); + /* Reading this register clears the DataFail interrupt sig */ + readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT); + atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE, + "DatECCFail"); + } + + return IRQ_HANDLED; +} + +static int __init sifive_l2_init(void) +{ + struct device_node *np; + struct resource res; + int i, rc; + + np = of_find_matching_node(NULL, sifive_l2_ids); + if (!np) + return -ENODEV; + + if (of_address_to_resource(np, 0, &res)) + return -ENODEV; + + l2_base = ioremap(res.start, resource_size(&res)); + if (!l2_base) + return -ENOMEM; + + for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) { + g_irq[i] = irq_of_parse_and_map(np, i); + rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL); + if (rc) { + pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]); + return rc; + } + } + + l2_config_read(); + +#ifdef CONFIG_DEBUG_FS + setup_sifive_debug(); +#endif + return 0; +} +device_initcall(sifive_l2_init); diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c index eb96a3086d6d..5db919d96aba 100644 --- a/drivers/soc/tegra/flowctrl.c +++ b/drivers/soc/tegra/flowctrl.c @@ -219,7 +219,7 @@ static int __init tegra_flowctrl_init(void) return 0; } - tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res)); + tegra_flowctrl_base = ioremap(res.start, resource_size(&res)); if (!tegra_flowctrl_base) return -ENXIO; diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 4d719d4b8d5a..606abbe55bba 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -408,7 +408,7 @@ static int __init tegra_init_fuse(void) } } - fuse->base = ioremap_nocache(regs.start, resource_size(®s)); + fuse->base = ioremap(regs.start, resource_size(®s)); if (!fuse->base) { pr_err("failed to map FUSE registers\n"); return -ENXIO; diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index df76778af601..a2fd6ccd48f9 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -159,11 +159,11 @@ void __init tegra_init_apbmisc(void) } } - apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc)); + apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc)); if (!apbmisc_base) pr_err("failed to map APBMISC registers\n"); - strapping_base = ioremap_nocache(straps.start, resource_size(&straps)); + strapping_base = ioremap(straps.start, resource_size(&straps)); if (!strapping_base) pr_err("failed to map strapping options registers\n"); diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index ea0e11a09c12..1699dda6b393 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2826,7 +2826,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, of_address_to_resource(np, index, ®s); - wake = ioremap_nocache(regs.start, resource_size(®s)); + wake = ioremap(regs.start, resource_size(®s)); if (!wake) { dev_err(pmc->dev, "failed to map PMC wake registers\n"); return; @@ -3097,7 +3097,7 @@ static int __init tegra_pmc_early_init(void) } } - pmc->base = ioremap_nocache(regs.start, resource_size(®s)); + pmc->base = ioremap(regs.start, resource_size(®s)); if (!pmc->base) { pr_err("failed to map PMC registers\n"); of_node_put(np); diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig index cf545f428d03..4486e055794c 100644 --- a/drivers/soc/ti/Kconfig +++ b/drivers/soc/ti/Kconfig @@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS called ti_sci_pm_domains. Note this is needed early in boot before rootfs may be available. +config TI_K3_RINGACC + bool "K3 Ring accelerator Sub System" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_INTA_IRQCHIP + help + Say y here to support the K3 Ring accelerator module. + The Ring Accelerator (RINGACC or RA) provides hardware acceleration + to enable straightforward passing of work between a producer + and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs + If unsure, say N. + endif # SOC_TI config TI_SCI_INTA_MSI_DOMAIN diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile index 788b5cd1e180..bec827937a5f 100644 --- a/drivers/soc/ti/Makefile +++ b/drivers/soc/ti/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o +obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c new file mode 100644 index 000000000000..5fb2ee2ac978 --- /dev/null +++ b/drivers/soc/ti/k3-ringacc.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI K3 NAVSS Ring Accelerator subsystem driver + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> + +static LIST_HEAD(k3_ringacc_list); +static DEFINE_MUTEX(k3_ringacc_list_lock); + +#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0) + +/** + * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region + * + * @resv_16: Reserved + * @db: Ring Doorbell Register + * @resv_4: Reserved + * @occ: Ring Occupancy Register + * @indx: Ring Current Index Register + * @hwocc: Ring Hardware Occupancy Register + * @hwindx: Ring Hardware Current Index Register + */ +struct k3_ring_rt_regs { + u32 resv_16[4]; + u32 db; + u32 resv_4[1]; + u32 occ; + u32 indx; + u32 hwocc; + u32 hwindx; +}; + +#define K3_RINGACC_RT_REGS_STEP 0x1000 + +/** + * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region + * + * @head_data: Ring Head Entry Data Registers + * @tail_data: Ring Tail Entry Data Registers + * @peek_head_data: Ring Peek Head Entry Data Regs + * @peek_tail_data: Ring Peek Tail Entry Data Regs + */ +struct k3_ring_fifo_regs { + u32 head_data[128]; + u32 tail_data[128]; + u32 peek_head_data[128]; + u32 peek_tail_data[128]; +}; + +/** + * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region + * + * @revision: Revision Register + * @config: Config Register + */ +struct k3_ringacc_proxy_gcfg_regs { + u32 revision; + u32 config; +}; + +#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0) + +/** + * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region + * + * @control: Proxy Control Register + * @status: Proxy Status Register + * @resv_512: Reserved + * @data: Proxy Data Register + */ +struct k3_ringacc_proxy_target_regs { + u32 control; + u32 status; + u8 resv_512[504]; + u32 data[128]; +}; + +#define K3_RINGACC_PROXY_TARGET_STEP 0x1000 +#define K3_RINGACC_PROXY_NOT_USED (-1) + +enum k3_ringacc_proxy_access_mode { + PROXY_ACCESS_MODE_HEAD = 0, + PROXY_ACCESS_MODE_TAIL = 1, + PROXY_ACCESS_MODE_PEEK_HEAD = 2, + PROXY_ACCESS_MODE_PEEK_TAIL = 3, +}; + +#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U) +#define K3_RINGACC_FIFO_REGS_STEP 0x1000 +#define K3_RINGACC_MAX_DB_RING_CNT (127U) + +struct k3_ring_ops { + int (*push_tail)(struct k3_ring *ring, void *elm); + int (*push_head)(struct k3_ring *ring, void *elm); + int (*pop_tail)(struct k3_ring *ring, void *elm); + int (*pop_head)(struct k3_ring *ring, void *elm); +}; + +/** + * struct k3_ring - RA Ring descriptor + * + * @rt: Ring control/status registers + * @fifos: Ring queues registers + * @proxy: Ring Proxy Datapath registers + * @ring_mem_dma: Ring buffer dma address + * @ring_mem_virt: Ring buffer virt address + * @ops: Ring operations + * @size: Ring size in elements + * @elm_size: Size of the ring element + * @mode: Ring mode + * @flags: flags + * @free: Number of free elements + * @occ: Ring occupancy + * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING) + * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING) + * @ring_id: Ring Id + * @parent: Pointer on struct @k3_ringacc + * @use_count: Use count for shared rings + * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY) + */ +struct k3_ring { + struct k3_ring_rt_regs __iomem *rt; + struct k3_ring_fifo_regs __iomem *fifos; + struct k3_ringacc_proxy_target_regs __iomem *proxy; + dma_addr_t ring_mem_dma; + void *ring_mem_virt; + struct k3_ring_ops *ops; + u32 size; + enum k3_ring_size elm_size; + enum k3_ring_mode mode; + u32 flags; +#define K3_RING_FLAG_BUSY BIT(1) +#define K3_RING_FLAG_SHARED BIT(2) + u32 free; + u32 occ; + u32 windex; + u32 rindex; + u32 ring_id; + struct k3_ringacc *parent; + u32 use_count; + int proxy_id; +}; + +/** + * struct k3_ringacc - Rings accelerator descriptor + * + * @dev: pointer on RA device + * @proxy_gcfg: RA proxy global config registers + * @proxy_target_base: RA proxy datapath region + * @num_rings: number of ring in RA + * @rings_inuse: bitfield for ring usage tracking + * @rm_gp_range: general purpose rings range from tisci + * @dma_ring_reset_quirk: DMA reset w/a enable + * @num_proxies: number of RA proxies + * @proxy_inuse: bitfield for proxy usage tracking + * @rings: array of rings descriptors (struct @k3_ring) + * @list: list of RAs in the system + * @req_lock: protect rings allocation + * @tisci: pointer ti-sci handle + * @tisci_ring_ops: ti-sci rings ops + * @tisci_dev_id: ti-sci device id + */ +struct k3_ringacc { + struct device *dev; + struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg; + void __iomem *proxy_target_base; + u32 num_rings; /* number of rings in Ringacc module */ + unsigned long *rings_inuse; + struct ti_sci_resource *rm_gp_range; + + bool dma_ring_reset_quirk; + u32 num_proxies; + unsigned long *proxy_inuse; + + struct k3_ring *rings; + struct list_head list; + struct mutex req_lock; /* protect rings allocation */ + + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_ringacc_ops *tisci_ring_ops; + u32 tisci_dev_id; +}; + +static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring) +{ + return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES - + (4 << ring->elm_size); +} + +static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx) +{ + return (ring->ring_mem_virt + idx * (4 << ring->elm_size)); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_ring_ops = { + .push_tail = k3_ringacc_ring_push_mem, + .pop_head = k3_ringacc_ring_pop_mem, +}; + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_msg_ops = { + .push_tail = k3_ringacc_ring_push_io, + .push_head = k3_ringacc_ring_push_head_io, + .pop_tail = k3_ringacc_ring_pop_tail_io, + .pop_head = k3_ringacc_ring_pop_io, +}; + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_proxy_ops = { + .push_tail = k3_ringacc_ring_push_tail_proxy, + .push_head = k3_ringacc_ring_push_head_proxy, + .pop_tail = k3_ringacc_ring_pop_tail_proxy, + .pop_head = k3_ringacc_ring_pop_head_proxy, +}; + +static void k3_ringacc_ring_dump(struct k3_ring *ring) +{ + struct device *dev = ring->parent->dev; + + dev_dbg(dev, "dump ring: %d\n", ring->ring_id); + dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt, + &ring->ring_mem_dma); + dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n", + ring->elm_size, ring->size, ring->mode, ring->proxy_id); + + dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db)); + dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ)); + dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx)); + dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc)); + dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx)); + + if (ring->ring_mem_virt) + print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE, + 16, 1, ring->ring_mem_virt, 16 * 8, false); +} + +struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, + int id, u32 flags) +{ + int proxy_id = K3_RINGACC_PROXY_NOT_USED; + + mutex_lock(&ringacc->req_lock); + + if (id == K3_RINGACC_RING_ID_ANY) { + /* Request for any general purpose ring */ + struct ti_sci_resource_desc *gp_rings = + &ringacc->rm_gp_range->desc[0]; + unsigned long size; + + size = gp_rings->start + gp_rings->num; + id = find_next_zero_bit(ringacc->rings_inuse, size, + gp_rings->start); + if (id == size) + goto error; + } else if (id < 0) { + goto error; + } + + if (test_bit(id, ringacc->rings_inuse) && + !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) + goto error; + else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) + goto out; + + if (flags & K3_RINGACC_RING_USE_PROXY) { + proxy_id = find_next_zero_bit(ringacc->proxy_inuse, + ringacc->num_proxies, 0); + if (proxy_id == ringacc->num_proxies) + goto error; + } + + if (proxy_id != K3_RINGACC_PROXY_NOT_USED) { + set_bit(proxy_id, ringacc->proxy_inuse); + ringacc->rings[id].proxy_id = proxy_id; + dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id, + proxy_id); + } else { + dev_dbg(ringacc->dev, "Giving ring#%d\n", id); + } + + set_bit(id, ringacc->rings_inuse); +out: + ringacc->rings[id].use_count++; + mutex_unlock(&ringacc->req_lock); + return &ringacc->rings[id]; + +error: + mutex_unlock(&ringacc->req_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(k3_ringacc_request_ring); + +static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + ring->size, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + k3_ringacc_ring_reset_sci(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset); + +static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring, + enum k3_ring_mode mode) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + mode, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + if (!ring->parent->dma_ring_reset_quirk) + goto reset; + + if (!occ) + occ = readl(&ring->rt->occ); + + if (occ) { + u32 db_ring_cnt, db_ring_cnt_cur; + + dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__, + ring->ring_id, occ); + /* TI-SCI ring reset */ + k3_ringacc_ring_reset_sci(ring); + + /* + * Setup the ring in ring/doorbell mode (if not already in this + * mode) + */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci( + ring, K3_RINGACC_RING_MODE_RING); + /* + * Ring the doorbell 2**22 – ringOcc times. + * This will wrap the internal UDMAP ring state occupancy + * counter (which is 21-bits wide) to 0. + */ + db_ring_cnt = (1U << 22) - occ; + + while (db_ring_cnt != 0) { + /* + * Ring the doorbell with the maximum count each + * iteration if possible to minimize the total + * of writes + */ + if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT) + db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT; + else + db_ring_cnt_cur = db_ring_cnt; + + writel(db_ring_cnt_cur, &ring->rt->db); + db_ring_cnt -= db_ring_cnt_cur; + } + + /* Restore the original ring mode (if not ring mode) */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); + } + +reset: + /* Reset the ring */ + k3_ringacc_ring_reset(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma); + +static void k3_ringacc_ring_free_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +int k3_ringacc_ring_free(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc; + + if (!ring) + return -EINVAL; + + ringacc = ring->parent; + + dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags); + + if (!test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + mutex_lock(&ringacc->req_lock); + + if (--ring->use_count) + goto out; + + if (!(ring->flags & K3_RING_FLAG_BUSY)) + goto no_init; + + k3_ringacc_ring_free_sci(ring); + + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, ring->ring_mem_dma); + ring->flags = 0; + ring->ops = NULL; + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { + clear_bit(ring->proxy_id, ringacc->proxy_inuse); + ring->proxy = NULL; + ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + +no_init: + clear_bit(ring->ring_id, ringacc->rings_inuse); + +out: + mutex_unlock(&ringacc->req_lock); + return 0; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_free); + +u32 k3_ringacc_get_ring_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->ring_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id); + +u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->parent->tisci_dev_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id); + +int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) +{ + int irq_num; + + if (!ring) + return -EINVAL; + + irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id); + if (irq_num <= 0) + irq_num = -EINVAL; + return irq_num; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num); + +static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + u32 ring_idx; + int ret; + + if (!ringacc->tisci) + return -EINVAL; + + ring_idx = ring->ring_id; + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring_idx, + lower_32_bits(ring->ring_mem_dma), + upper_32_bits(ring->ring_mem_dma), + ring->size, + ring->mode, + ring->elm_size, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", + ret, ring_idx); + + return ret; +} + +int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret = 0; + + if (!ring || !cfg) + return -EINVAL; + if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 || + cfg->mode >= K3_RINGACC_RING_MODE_INVALID || + cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK || + !test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE && + ring->proxy_id == K3_RINGACC_PROXY_NOT_USED && + cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) { + dev_err(ringacc->dev, + "Message mode must use proxy for %u element size\n", + 4 << ring->elm_size); + return -EINVAL; + } + + /* + * In case of shared ring only the first user (master user) can + * configure the ring. The sequence should be by the client: + * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user + * k3_ringacc_ring_cfg(ring, cfg); # master configuration + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + */ + if (ring->use_count != 1) + return 0; + + ring->size = cfg->size; + ring->elm_size = cfg->elm_size; + ring->mode = cfg->mode; + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) + ring->proxy = ringacc->proxy_target_base + + ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; + + switch (ring->mode) { + case K3_RINGACC_RING_MODE_RING: + ring->ops = &k3_ring_mode_ring_ops; + break; + case K3_RINGACC_RING_MODE_MESSAGE: + if (ring->proxy) + ring->ops = &k3_ring_mode_proxy_ops; + else + ring->ops = &k3_ring_mode_msg_ops; + break; + default: + ring->ops = NULL; + ret = -EINVAL; + goto err_free_proxy; + }; + + ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + &ring->ring_mem_dma, GFP_KERNEL); + if (!ring->ring_mem_virt) { + dev_err(ringacc->dev, "Failed to alloc ring mem\n"); + ret = -ENOMEM; + goto err_free_ops; + } + + ret = k3_ringacc_ring_cfg_sci(ring); + + if (ret) + goto err_free_mem; + + ring->flags |= K3_RING_FLAG_BUSY; + ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ? + K3_RING_FLAG_SHARED : 0; + + k3_ringacc_ring_dump(ring); + + return 0; + +err_free_mem: + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, + ring->ring_mem_dma); +err_free_ops: + ring->ops = NULL; +err_free_proxy: + ring->proxy = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg); + +u32 k3_ringacc_ring_get_size(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return ring->size; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size); + +u32 k3_ringacc_ring_get_free(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->free) + ring->free = ring->size - readl(&ring->rt->occ); + + return ring->free; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free); + +u32 k3_ringacc_ring_get_occ(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return readl(&ring->rt->occ); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ); + +u32 k3_ringacc_ring_is_full(struct k3_ring *ring) +{ + return !k3_ringacc_ring_get_free(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full); + +enum k3_ringacc_access_mode { + K3_RINGACC_ACCESS_MODE_PUSH_HEAD, + K3_RINGACC_ACCESS_MODE_POP_HEAD, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL, + K3_RINGACC_ACCESS_MODE_POP_TAIL, + K3_RINGACC_ACCESS_MODE_PEEK_HEAD, + K3_RINGACC_ACCESS_MODE_PEEK_TAIL, +}; + +#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16) +#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24) +static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring, + enum k3_ringacc_proxy_access_mode mode) +{ + u32 val; + + val = ring->ring_id; + val |= K3_RINGACC_PROXY_MODE(mode); + val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size); + writel(val, &ring->proxy->control); + return 0; +} + +static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + ptr = (void __iomem *)&ring->proxy->data; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free, + ring->occ); + return 0; +} + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + ptr = (void __iomem *)&ring->fifos->head_data; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + ptr = (void __iomem *)&ring->fifos->tail_data; + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free, + ring->windex, ring->occ, ring->rindex); + return 0; +} + +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex); + + memcpy(elem_ptr, elem, (4 << ring->elm_size)); + + ring->windex = (ring->windex + 1) % ring->size; + ring->free--; + writel(1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n", + ring->free, ring->windex); + + return 0; +} + +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex); + + memcpy(elem, elem_ptr, (4 << ring->elm_size)); + + ring->rindex = (ring->rindex + 1) % ring->size; + ring->occ--; + writel(-1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n", + ring->occ, ring->rindex, elem_ptr); + return 0; +} + +int k3_ringacc_ring_push(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free, + ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_tail) + ret = ring->ops->push_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push); + +int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n", + ring->free, ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_head) + ret = ring->ops->push_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head); + +int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_head) + ret = ring->ops->pop_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop); + +int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_tail) + ret = ring->ops->pop_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail); + +struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, + const char *property) +{ + struct device_node *ringacc_np; + struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER); + struct k3_ringacc *entry; + + ringacc_np = of_parse_phandle(np, property, 0); + if (!ringacc_np) + return ERR_PTR(-ENODEV); + + mutex_lock(&k3_ringacc_list_lock); + list_for_each_entry(entry, &k3_ringacc_list, list) + if (entry->dev->of_node == ringacc_np) { + ringacc = entry; + break; + } + mutex_unlock(&k3_ringacc_list_lock); + of_node_put(ringacc_np); + + return ringacc; +} +EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle); + +static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc) +{ + struct device_node *node = ringacc->dev->of_node; + struct device *dev = ringacc->dev; + struct platform_device *pdev = to_platform_device(dev); + int ret; + + if (!node) { + dev_err(dev, "device tree info unavailable\n"); + return -ENODEV; + } + + ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings); + if (ret) { + dev_err(dev, "ti,num-rings read failure %d\n", ret); + return ret; + } + + ringacc->dma_ring_reset_quirk = + of_property_read_bool(node, "ti,dma-ring-reset-quirk"); + + ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci"); + if (IS_ERR(ringacc->tisci)) { + ret = PTR_ERR(ringacc->tisci); + if (ret != -EPROBE_DEFER) + dev_err(dev, "ti,sci read fail %d\n", ret); + ringacc->tisci = NULL; + return ret; + } + + ret = of_property_read_u32(node, "ti,sci-dev-id", + &ringacc->tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read fail %d\n", ret); + return ret; + } + + pdev->id = ringacc->tisci_dev_id; + + ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev, + ringacc->tisci_dev_id, + "ti,sci-rm-range-gp-rings"); + if (IS_ERR(ringacc->rm_gp_range)) { + dev_err(dev, "Failed to allocate MSI interrupts\n"); + return PTR_ERR(ringacc->rm_gp_range); + } + + return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev, + ringacc->rm_gp_range); +} + +static int k3_ringacc_probe(struct platform_device *pdev) +{ + struct k3_ringacc *ringacc; + void __iomem *base_fifo, *base_rt; + struct device *dev = &pdev->dev; + struct resource *res; + int ret, i; + + ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); + if (!ringacc) + return -ENOMEM; + + ringacc->dev = dev; + mutex_init(&ringacc->req_lock); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + ret = k3_ringacc_probe_dt(ringacc); + if (ret) + return ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt"); + base_rt = devm_ioremap_resource(dev, res); + if (IS_ERR(base_rt)) + return PTR_ERR(base_rt); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos"); + base_fifo = devm_ioremap_resource(dev, res); + if (IS_ERR(base_fifo)) + return PTR_ERR(base_fifo); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg"); + ringacc->proxy_gcfg = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_gcfg)) + return PTR_ERR(ringacc->proxy_gcfg); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "proxy_target"); + ringacc->proxy_target_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_target_base)) + return PTR_ERR(ringacc->proxy_target_base); + + ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) & + K3_RINGACC_PROXY_CFG_THREADS_MASK; + + ringacc->rings = devm_kzalloc(dev, + sizeof(*ringacc->rings) * + ringacc->num_rings, + GFP_KERNEL); + ringacc->rings_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_rings), + sizeof(unsigned long), GFP_KERNEL); + ringacc->proxy_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_proxies), + sizeof(unsigned long), GFP_KERNEL); + + if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) + return -ENOMEM; + + for (i = 0; i < ringacc->num_rings; i++) { + ringacc->rings[i].rt = base_rt + + K3_RINGACC_RT_REGS_STEP * i; + ringacc->rings[i].fifos = base_fifo + + K3_RINGACC_FIFO_REGS_STEP * i; + ringacc->rings[i].parent = ringacc; + ringacc->rings[i].ring_id = i; + ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + dev_set_drvdata(dev, ringacc); + + ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; + + mutex_lock(&k3_ringacc_list_lock); + list_add_tail(&ringacc->list, &k3_ringacc_list); + mutex_unlock(&k3_ringacc_list_lock); + + dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", + ringacc->num_rings, + ringacc->rm_gp_range->desc[0].start, + ringacc->rm_gp_range->desc[0].num, + ringacc->tisci_dev_id); + dev_info(dev, "dma-ring-reset-quirk: %s\n", + ringacc->dma_ring_reset_quirk ? "enabled" : "disabled"); + dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n", + readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies); + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id k3_ringacc_of_match[] = { + { .compatible = "ti,am654-navss-ringacc", }, + {}, +}; + +static struct platform_driver k3_ringacc_driver = { + .probe = k3_ringacc_probe, + .driver = { + .name = "k3-ringacc", + .of_match_table = k3_ringacc_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(k3_ringacc_driver); diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 378369d9364a..e9ece45d7a33 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc) ret = rproc_boot(m3_ipc->rproc); if (ret) dev_err(dev, "rproc_boot failed\n"); + else + m3_ipc_state = m3_ipc; do_exit(0); } @@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) goto err_put_rproc; } - m3_ipc_state = m3_ipc; - return 0; err_put_rproc: diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c index a840c0272135..a3aa40996f13 100644 --- a/drivers/soc/xilinx/xlnx_vcu.c +++ b/drivers/soc/xilinx/xlnx_vcu.c @@ -511,7 +511,7 @@ static int xvcu_probe(struct platform_device *pdev) return -ENODEV; } - xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start, + xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!xvcu->vcu_slcr_ba) { dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n"); @@ -524,7 +524,7 @@ static int xvcu_probe(struct platform_device *pdev) return -ENODEV; } - xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start, + xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!xvcu->logicore_reg_ba) { dev_err(&pdev->dev, "logicore register mapping failed.\n"); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 870f7797b56b..d6ed0c355954 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI This controller does not support generic SPI messages. It only supports the high-level SPI memory interface. +config SPI_HISI_SFC_V3XX + tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets" + depends on (ARM64 && ACPI) || COMPILE_TEST + depends on HAS_IOMEM + select CONFIG_MTD_SPI_NOR + help + This enables support for HiSilicon v3xx SPI-NOR flash controller + found in hi16xx chipsets. + config SPI_NXP_FLEXSPI tristate "NXP Flex SPI controller" depends on ARCH_LAYERSCAPE || HAS_IOMEM diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index bb49c9e6d0a0..9b65ec5afc5e 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o +obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o obj-$(CONFIG_SPI_IMX) += spi-imx.o obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 56f0ca361deb..013458cabe3c 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master, master->dma_tx = dma_request_chan(dev, "tx"); if (IS_ERR(master->dma_tx)) { err = PTR_ERR(master->dma_tx); - if (err == -EPROBE_DEFER) { - dev_warn(dev, "no DMA channel available at the moment\n"); - goto error_clear; - } - dev_err(dev, - "DMA TX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + if (err != -EPROBE_DEFER) + dev_err(dev, "No TX DMA channel, DMA is disabled\n"); goto error_clear; } - /* - * No reason to check EPROBE_DEFER here since we have already requested - * tx channel. If it fails here, it's for another reason. - */ - master->dma_rx = dma_request_slave_channel(dev, "rx"); - - if (!master->dma_rx) { - dev_err(dev, - "DMA RX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + master->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(master->dma_rx)) { + err = PTR_ERR(master->dma_rx); + /* + * No reason to check EPROBE_DEFER here since we have already + * requested tx channel. + */ + dev_err(dev, "No RX DMA channel, DMA is disabled\n"); goto error; } @@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master, return 0; error: - if (master->dma_rx) + if (!IS_ERR(master->dma_rx)) dma_release_channel(master->dma_rx); if (!IS_ERR(master->dma_tx)) dma_release_channel(master->dma_tx); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 85bad70f59e3..23d295f36c80 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev, name = qspi_irq_tab[val].irq_name; if (qspi_irq_tab[val].irq_source == SINGLE_L2) { /* get the l2 interrupts */ - irq = platform_get_irq_byname(pdev, name); + irq = platform_get_irq_byname_optional(pdev, name); } else if (!num_ints && soc_intc) { /* all mspi, bspi intrs muxed to one L1 intr */ irq = platform_get_irq(pdev, 0); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index fb61a620effc..11c235879bb7 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -68,7 +68,7 @@ #define BCM2835_SPI_FIFO_SIZE 64 #define BCM2835_SPI_FIFO_SIZE_3_4 48 #define BCM2835_SPI_DMA_MIN_LENGTH 96 -#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */ +#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */ #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | SPI_NO_CS | SPI_3WIRE) @@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr, } } -static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, - struct bcm2835_spi *bs) +static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, + struct bcm2835_spi *bs) { struct dma_slave_config slave_config; const __be32 *addr; @@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); if (!addr) { dev_err(dev, "could not get DMA-register address - not using dma mode\n"); - goto err; + /* Fall back to interrupt mode */ + return 0; } dma_reg_base = be32_to_cpup(addr); /* get tx/rx dma */ - ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); - if (!ctlr->dma_tx) { + ctlr->dma_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(ctlr->dma_tx)) { dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_tx); + ctlr->dma_tx = NULL; goto err; } - ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); - if (!ctlr->dma_rx) { + ctlr->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(ctlr->dma_rx)) { dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_rx); + ctlr->dma_rx = NULL; goto err_release; } @@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, /* all went well, so set can_dma */ ctlr->can_dma = bcm2835_spi_can_dma; - return; + return 0; err_config: dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", @@ -1005,7 +1010,14 @@ err_config: err_release: bcm2835_dma_release(ctlr, bs); err: - return; + /* + * Only report error for deferred probing, otherwise fall back to + * interrupt mode + */ + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, @@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev) bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); - dev_err(&pdev->dev, "could not get clk: %d\n", err); + if (err == -EPROBE_DEFER) + dev_dbg(&pdev->dev, "could not get clk: %d\n", err); + else + dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_controller_put; } @@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev) clk_prepare_enable(bs->clk); - bcm2835_dma_init(ctlr, &pdev->dev, bs); + err = bcm2835_dma_init(ctlr, &pdev->dev, bs); + if (err) + goto out_clk_disable; /* initialise the hardware with the default polarities */ bcm2835_wr(bs, BCM2835_SPI_CS, @@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev) dev_name(&pdev->dev), ctlr); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } err = devm_spi_register_controller(&pdev->dev, ctlr); if (err) { dev_err(&pdev->dev, "could not register SPI controller: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); return 0; +out_dma_release: + bcm2835_dma_release(ctlr, bs); out_clk_disable: clk_disable_unprepare(bs->clk); out_controller_put: diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index d84e22dd6f9f..68491a8bf7b5 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable) int spi_bitbang_init(struct spi_bitbang *bitbang) { struct spi_master *master = bitbang->master; + bool custom_cs; - if (!master || !bitbang->chipselect) + if (!master) + return -EINVAL; + /* + * We only need the chipselect callback if we are actually using it. + * If we just use GPIO descriptors, it is surplus. If the + * SPI_MASTER_GPIO_SS flag is set, we always need to call the + * driver-specific chipselect routine. + */ + custom_cs = (!master->use_gpio_descriptors || + (master->flags & SPI_MASTER_GPIO_SS)); + + if (custom_cs && !bitbang->chipselect) return -EINVAL; mutex_init(&bitbang->lock); @@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang) master->prepare_transfer_hardware = spi_bitbang_prepare_hardware; master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware; master->transfer_one = spi_bitbang_transfer_one; - master->set_cs = spi_bitbang_set_cs; + /* + * When using GPIO descriptors, the ->set_cs() callback doesn't even + * get called unless SPI_MASTER_GPIO_SS is set. + */ + if (custom_cs) + master->set_cs = spi_bitbang_set_cs; if (!bitbang->txrx_bufs) { bitbang->use_dma = 0; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index c36587b42e95..82a0ee09cbe1 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -168,16 +168,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi) /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure - * @enable: Select (1) or deselect (0) the chip select line + * @is_high: Select(0) or deselect (1) the chip select line */ -static void cdns_spi_chipselect(struct spi_device *spi, bool enable) +static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); - if (!enable) { + if (is_high) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c index d12e149f1a41..fd6b9caffaf0 100644 --- a/drivers/spi/spi-cavium-thunderx.c +++ b/drivers/spi/spi-cavium-thunderx.c @@ -82,6 +82,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev, error: clk_disable_unprepare(p->clk); + pci_release_regions(pdev); spi_master_put(master); return ret; } @@ -96,6 +97,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev) return; clk_disable_unprepare(p->clk); + pci_release_regions(pdev); /* Put everything in a known state. */ writeq(0, p->register_base + OCTEON_SPI_CFG(p)); } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 2663bb12d9ce..0d86c37e0aeb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -301,7 +301,7 @@ int dw_spi_mid_init(struct dw_spi *dws) void __iomem *clk_reg; u32 clk_cdiv; - clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16); + clk_reg = ioremap(MRST_CLK_SPI_REG, 16); if (!clk_reg) return -ENOMEM; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index a92aa5cd4fbe..31e3f866d11a 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -129,10 +129,11 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct chip_data *chip = spi_get_ctldata(spi); + /* Chip select logic is inverted from spi_set_cs() */ if (chip && chip->cs_control) - chip->cs_control(enable); + chip->cs_control(!enable); - if (enable) + if (!enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -171,9 +172,11 @@ static inline u32 rx_max(struct dw_spi *dws) static void dw_writer(struct dw_spi *dws) { - u32 max = tx_max(dws); + u32 max; u16 txw = 0; + spin_lock(&dws->buf_lock); + max = tx_max(dws); while (max--) { /* Set the tx word if the transfer's original "tx" is not null */ if (dws->tx_end - dws->len) { @@ -185,13 +188,16 @@ static void dw_writer(struct dw_spi *dws) dw_write_io_reg(dws, DW_SPI_DR, txw); dws->tx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void dw_reader(struct dw_spi *dws) { - u32 max = rx_max(dws); + u32 max; u16 rxw; + spin_lock(&dws->buf_lock); + max = rx_max(dws); while (max--) { rxw = dw_read_io_reg(dws, DW_SPI_DR); /* Care rx only if the transfer's original "rx" is not null */ @@ -203,6 +209,7 @@ static void dw_reader(struct dw_spi *dws) } dws->rx += dws->n_bytes; } + spin_unlock(&dws->buf_lock); } static void int_error_stop(struct dw_spi *dws, const char *msg) @@ -275,18 +282,23 @@ static int dw_spi_transfer_one(struct spi_controller *master, { struct dw_spi *dws = spi_controller_get_devdata(master); struct chip_data *chip = spi_get_ctldata(spi); + unsigned long flags; u8 imask = 0; u16 txlevel = 0; u32 cr0; int ret; dws->dma_mapped = 0; - + spin_lock_irqsave(&dws->buf_lock, flags); dws->tx = (void *)transfer->tx_buf; dws->tx_end = dws->tx + transfer->len; dws->rx = transfer->rx_buf; dws->rx_end = dws->rx + transfer->len; dws->len = transfer->len; + spin_unlock_irqrestore(&dws->buf_lock, flags); + + /* Ensure dw->rx and dw->rx_end are visible */ + smp_mb(); spi_enable_chip(dws, 0); @@ -460,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) struct spi_controller *master; int ret; - BUG_ON(dws == NULL); + if (!dws) + return -EINVAL; master = spi_alloc_master(dev, 0); if (!master) @@ -470,6 +483,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->type = SSI_MOTO_SPI; dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); + spin_lock_init(&dws->buf_lock); spi_controller_set_devdata(master, dws); diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 38c7de1f0aa9..1bf5713e047d 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -119,6 +119,7 @@ struct dw_spi { size_t len; void *tx; void *tx_end; + spinlock_t buf_lock; void *rx; void *rx_end; int dma_mapped; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 442cff71a0d2..6ec2dcb8c57a 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -185,6 +185,7 @@ struct fsl_dspi { struct spi_transfer *cur_transfer; struct spi_message *cur_msg; struct chip_data *cur_chip; + size_t progress; size_t len; const void *tx; void *rx; @@ -395,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) if (!dma) return -ENOMEM; - dma->chan_rx = dma_request_slave_channel(dev, "rx"); - if (!dma->chan_rx) { + dma->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(dma->chan_rx)) { dev_err(dev, "rx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_rx); return ret; } - dma->chan_tx = dma_request_slave_channel(dev, "tx"); - if (!dma->chan_tx) { + dma->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(dma->chan_tx)) { dev_err(dev, "tx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_tx); goto err_tx_channel; } @@ -586,21 +587,14 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi) dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { - /* Write two TX FIFO entries first, and then the corresponding - * CMD FIFO entry. + /* Write the CMD FIFO entry first, and then the two + * corresponding TX FIFO entries. */ u32 data = dspi_pop_tx(dspi); - if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) { - /* LSB */ - tx_fifo_write(dspi, data & 0xFFFF); - tx_fifo_write(dspi, data >> 16); - } else { - /* MSB */ - tx_fifo_write(dspi, data >> 16); - tx_fifo_write(dspi, data & 0xFFFF); - } cmd_fifo_write(dspi); + tx_fifo_write(dspi, data & 0xFFFF); + tx_fifo_write(dspi, data >> 16); } else { /* Write one entry to both TX FIFO and CMD FIFO * simultaneously. @@ -658,7 +652,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) u32 spi_tcr; spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, - dspi->tx - dspi->bytes_per_word, !dspi->irq); + dspi->progress, !dspi->irq); /* Get transfer counter (in number of SPI transfers). It was * reset to 0 when transfer(s) were started. @@ -667,6 +661,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr); /* Update total number of bytes that were transferred */ msg->actual_length += spi_tcnt * dspi->bytes_per_word; + dspi->progress += spi_tcnt; trans_mode = dspi->devtype_data->trans_mode; if (trans_mode == DSPI_EOQ_MODE) @@ -679,7 +674,7 @@ static int dspi_rxtx(struct fsl_dspi *dspi) return 0; spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, - dspi->tx, !dspi->irq); + dspi->progress, !dspi->irq); if (trans_mode == DSPI_EOQ_MODE) dspi_eoq_write(dspi); @@ -768,6 +763,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->rx = transfer->rx_buf; dspi->rx_end = dspi->rx + transfer->len; dspi->len = transfer->len; + dspi->progress = 0; /* Validated transfer specific frame size (defaults applied) */ dspi->bits_per_word = transfer->bits_per_word; if (transfer->bits_per_word <= 8) @@ -789,7 +785,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_CTARE_DTCP(1)); spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, - dspi->tx, !dspi->irq); + dspi->progress, !dspi->irq); trans_mode = dspi->devtype_data->trans_mode; switch (trans_mode) { diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 2cc0ddb4a988..d0b8cc741a24 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller, fsl_lpspi->watermark = fsl_lpspi->txfifosize; if (fsl_lpspi_can_dma(controller, spi, t)) - fsl_lpspi->usedma = 1; + fsl_lpspi->usedma = true; else - fsl_lpspi->usedma = 0; + fsl_lpspi->usedma = false; return fsl_lpspi_config(fsl_lpspi); } @@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev) fsl_lpspi->dev = &pdev->dev; fsl_lpspi->is_slave = is_slave; + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + controller->transfer_one = fsl_lpspi_transfer_one; + controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; + controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + controller->dev.of_node = pdev->dev.of_node; + controller->bus_num = pdev->id; + controller->slave_abort = fsl_lpspi_slave_abort; + + ret = devm_spi_register_controller(&pdev->dev, controller); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_controller error.\n"); + goto out_controller_put; + } + if (!fsl_lpspi->is_slave) { for (i = 0; i < controller->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); @@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) controller->prepare_message = fsl_lpspi_prepare_message; } - controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - controller->transfer_one = fsl_lpspi_transfer_one; - controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; - controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; - controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; - controller->dev.of_node = pdev->dev.of_node; - controller->bus_num = pdev->id; - controller->slave_abort = fsl_lpspi_slave_abort; - init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); - ret = devm_spi_register_controller(&pdev->dev, controller); - if (ret < 0) { - dev_err(&pdev->dev, "spi_register_controller error.\n"); - goto out_controller_put; - } - return 0; out_controller_put: diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 79b1558b74b8..e8a499cd1f13 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem, op->data.nbytes > q->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } static void fsl_qspi_prepare_lut(struct fsl_qspi *q, diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 114801a32371..3b81772fea0d 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -611,6 +611,7 @@ static struct spi_master * fsl_spi_probe(struct device *dev, master->setup = fsl_spi_setup; master->cleanup = fsl_spi_cleanup; master->transfer_one_message = fsl_spi_do_one_msg; + master->use_gpio_descriptors = true; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->max_bits_per_word = 32; @@ -705,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; - int irq = 0, type; - int ret = -ENOMEM; + int irq, type; + int ret; ret = of_mpc8xxx_spi_probe(ofdev); if (ret) @@ -721,37 +722,35 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) if (spisel_boot) { pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4); - if (!pinfo->immr_spi_cs) { - ret = -ENOMEM; - goto err; - } + if (!pinfo->immr_spi_cs) + return -ENOMEM; } #endif - - pdata->cs_control = fsl_spi_cs_control; + /* + * Handle the case where we have one hardwired (always selected) + * device on the first "chipselect". Else we let the core code + * handle any GPIOs or native chip selects and assign the + * appropriate callback for dealing with the CS lines. This isn't + * supported on the GRLIB variant. + */ + ret = gpiod_count(dev, "cs"); + if (ret <= 0) + pdata->max_chipselect = 1; + else + pdata->cs_control = fsl_spi_cs_control; } ret = of_address_to_resource(np, 0, &mem); if (ret) - goto err; + return ret; - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - ret = -EINVAL; - goto err; - } + irq = platform_get_irq(ofdev, 0); + if (irq < 0) + return irq; master = fsl_spi_probe(dev, &mem, irq); - if (IS_ERR(master)) { - ret = PTR_ERR(master); - goto err; - } - - return 0; -err: - irq_dispose_mapping(irq); - return ret; + return PTR_ERR_OR_ZERO(master); } static int of_fsl_spi_remove(struct platform_device *ofdev) diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c new file mode 100644 index 000000000000..4cf8fc80a7b7 --- /dev/null +++ b/drivers/spi/spi-hisi-sfc-v3xx.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets +// +// Copyright (c) 2019 HiSilicon Technologies Co., Ltd. +// Author: John Garry <john.garry@huawei.com> + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +#define HISI_SFC_V3XX_VERSION (0x1f8) + +#define HISI_SFC_V3XX_CMD_CFG (0x300) +#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9 +#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8) +#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7) +#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4 +#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3) +#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1 +#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0) +#define HISI_SFC_V3XX_CMD_INS (0x308) +#define HISI_SFC_V3XX_CMD_ADDR (0x30c) +#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400) + +struct hisi_sfc_v3xx_host { + struct device *dev; + void __iomem *regbase; + int max_cmd_dword; +}; + +#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000 +#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10 + +static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg, + !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK), + HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US, + HISI_SFC_V3XX_WAIT_TIMEOUT_US); +} + +static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct spi_device *spi = mem->spi; + struct hisi_sfc_v3xx_host *host; + uintptr_t addr = (uintptr_t)op->data.buf.in; + int max_byte_count; + + host = spi_controller_get_devdata(spi->master); + + max_byte_count = host->max_cmd_dword * 4; + + if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4) + op->data.nbytes = 4 - (addr % 4); + else if (op->data.nbytes > max_byte_count) + op->data.nbytes = max_byte_count; + + return 0; +} + +/* + * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the + * DATABUF registers -so use __io{read,write}32_copy when possible. For + * trailing bytes, copy them byte-by-byte from the DATABUF register, as we + * can't clobber outside the source/dest buffer. + * + * For efficient data read/write, we try to put any start 32b unaligned data + * into a separate transaction in hisi_sfc_v3xx_adjust_op_size(). + */ +static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host, + u8 *to, unsigned int len) +{ + void __iomem *from; + int i; + + from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)to, 4)) { + int words = len / 4; + + __ioread32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val; + + to += words * 4; + from += words * 4; + + val = __raw_readl(from); + + for (i = 0; i < len; i++, val >>= 8, to++) + *to = (u8)val; + } + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) { + u32 val = __raw_readl(from); + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + to++, val >>= 8, j++) + *to = (u8)val; + } + } +} + +static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host, + const u8 *from, unsigned int len) +{ + void __iomem *to; + int i; + + to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)from, 4)) { + int words = len / 4; + + __iowrite32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val = 0; + + to += words * 4; + from += words * 4; + + for (i = 0; i < len; i++, from++) + val |= *from << i * 8; + __raw_writel(val, to); + } + + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) { + u32 val = 0; + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + from++, j++) + val |= *from << j * 8; + __raw_writel(val, to); + } + } +} + +static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host, + const struct spi_mem_op *op, + u8 chip_select) +{ + int ret, len = op->data.nbytes; + u32 config = 0; + + if (op->addr.nbytes) + config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK; + + if (op->data.dir != SPI_MEM_NO_DATA) { + config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF; + config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK; + } + + if (op->data.dir == SPI_MEM_DATA_OUT) + hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len); + else if (op->data.dir == SPI_MEM_DATA_IN) + config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK; + + config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF | + chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF | + HISI_SFC_V3XX_CMD_CFG_START_MSK; + + writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR); + writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS); + + writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG); + + ret = hisi_sfc_v3xx_wait_cmd_idle(host); + if (ret) + return ret; + + if (op->data.dir == SPI_MEM_DATA_IN) + hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len); + + return 0; +} + +static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct hisi_sfc_v3xx_host *host; + struct spi_device *spi = mem->spi; + u8 chip_select = spi->chip_select; + + host = spi_controller_get_devdata(spi->master); + + return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select); +} + +static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = { + .adjust_op_size = hisi_sfc_v3xx_adjust_op_size, + .exec_op = hisi_sfc_v3xx_exec_op, +}; + +static int hisi_sfc_v3xx_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hisi_sfc_v3xx_host *host; + struct spi_controller *ctlr; + u32 version; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*host)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD; + + host = spi_controller_get_devdata(ctlr); + host->dev = dev; + + platform_set_drvdata(pdev, host); + + host->regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->regbase)) { + ret = PTR_ERR(host->regbase); + goto err_put_master; + } + + ctlr->bus_num = -1; + ctlr->num_chipselect = 1; + ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops; + + version = readl(host->regbase + HISI_SFC_V3XX_VERSION); + + switch (version) { + case 0x351: + host->max_cmd_dword = 64; + break; + default: + host->max_cmd_dword = 16; + break; + } + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) + goto err_put_master; + + dev_info(&pdev->dev, "hw version 0x%x\n", version); + + return 0; + +err_put_master: + spi_master_put(ctlr); + return ret; +} + +#if IS_ENABLED(CONFIG_ACPI) +static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = { + {"HISI0341", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids); +#endif + +static struct platform_driver hisi_sfc_v3xx_spi_driver = { + .driver = { + .name = "hisi-sfc-v3xx", + .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids), + }, + .probe = hisi_sfc_v3xx_probe, +}; + +module_platform_driver(hisi_sfc_v3xx_spi_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); +MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets"); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index f4a8f470aecc..8543f5ed1099 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev) master->unprepare_message = img_spfi_unprepare; master->handle_err = img_spfi_handle_err; - spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); - spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); + spfi->tx_ch = dma_request_chan(spfi->dev, "tx"); + if (IS_ERR(spfi->tx_ch)) { + ret = PTR_ERR(spfi->tx_ch); + spfi->tx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + + spfi->rx_ch = dma_request_chan(spfi->dev, "rx"); + if (IS_ERR(spfi->rx_ch)) { + ret = PTR_ERR(spfi->rx_ch); + spfi->rx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + if (!spfi->tx_ch || !spfi->rx_ch) { if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 49f0099db0cb..f4f28a400a96 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi, } if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) - spi_imx->usedma = 1; + spi_imx->usedma = true; else - spi_imx->usedma = 0; + spi_imx->usedma = false; if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) { spi_imx->rx = mx53_ecspi_rx_slave; diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c index cc49fa41fbab..bba10f030e33 100644 --- a/drivers/spi/spi-jcore.c +++ b/drivers/spi/spi-jcore.c @@ -170,7 +170,7 @@ static int jcore_spi_probe(struct platform_device *pdev) if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) goto exit_busy; - hw->base = devm_ioremap_nocache(&pdev->dev, res->start, + hw->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!hw->base) goto exit_busy; diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index f3f10443f9e2..7f5680fe2568 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -19,7 +19,6 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/reset.h> -#include <linux/gpio.h> /* * The Meson SPICC controller could support DMA based transfers, but is not @@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master) static int meson_spicc_setup(struct spi_device *spi) { - int ret = 0; - if (!spi->controller_state) spi->controller_state = spi_master_get_devdata(spi->master); - else if (gpio_is_valid(spi->cs_gpio)) - goto out_gpio; - else if (spi->cs_gpio == -ENOENT) - return 0; - - if (gpio_is_valid(spi->cs_gpio)) { - ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); - if (ret) { - dev_err(&spi->dev, "failed to request cs gpio\n"); - return ret; - } - } - -out_gpio: - ret = gpio_direction_output(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); - return ret; + return 0; } static void meson_spicc_cleanup(struct spi_device *spi) { - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); - spi->controller_state = NULL; } @@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev) master->prepare_message = meson_spicc_prepare_message; master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer; master->transfer_one = meson_spicc_transfer_one; + master->use_gpio_descriptors = true; /* Setup max rate according to the Meson GX datasheet */ if ((rate >> 2) > SPICC_MAX_FREQ) diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 996c1c8a9c71..dce85ee07cd0 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev) if (ret) goto out_master_free; - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(ssp->dev, "Failed to request DMA\n"); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_master_free; } diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index cb52fd8008d0..d25ee32862e0 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -603,7 +603,7 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc) if (!chip->flash_region_mapped_ptr) { chip->flash_region_mapped_ptr = - devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start + + devm_ioremap(fiu->dev, (fiu->res_mem->start + (fiu->info->max_map_size * desc->mem->spi->chip_select)), (u32)desc->info.length); diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index fe624731c74c..87cd0233c60b 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -12,6 +12,7 @@ #include <linux/spi/spi.h> #include <linux/gpio.h> #include <linux/of_gpio.h> +#include <linux/reset.h> #include <asm/unaligned.h> @@ -20,7 +21,7 @@ struct npcm_pspi { struct completion xfer_done; - struct regmap *rst_regmap; + struct reset_control *reset; struct spi_master *master; unsigned int tx_bytes; unsigned int rx_bytes; @@ -59,12 +60,6 @@ struct npcm_pspi { #define NPCM_PSPI_MIN_CLK_DIVIDER 4 #define NPCM_PSPI_DEFAULT_CLK 25000000 -/* reset register */ -#define NPCM7XX_IPSRST2_OFFSET 0x24 - -#define NPCM7XX_PSPI1_RESET BIT(22) -#define NPCM7XX_PSPI2_RESET BIT(23) - static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : 2; @@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, priv->mode = spi->mode; } + /* + * If transfer is even length, and 8 bits per word transfer, + * then implement 16 bits-per-word transfer. + */ + if (priv->bits_per_word == 8 && !(t->len & 0x1)) + t->bits_per_word = 16; + if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) { npcm_pspi_set_transfer_size(priv, t->bits_per_word); priv->bits_per_word = t->bits_per_word; @@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, static void npcm_pspi_send(struct npcm_pspi *priv) { int wsize; + u16 val; wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes); priv->tx_bytes -= wsize; @@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv) switch (wsize) { case 1: - iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + iowrite8(val, NPCM_PSPI_DATA + priv->base); break; case 2: - iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + val = *priv->tx_buf++ | (val << 8); + iowrite16(val, NPCM_PSPI_DATA + priv->base); break; default: WARN_ON_ONCE(1); return; } - - priv->tx_buf += wsize; } static void npcm_pspi_recv(struct npcm_pspi *priv) @@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv) switch (rsize) { case 1: - val = ioread8(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA); break; case 2: val = ioread16(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = (val >> 8); + *priv->rx_buf++ = val & 0xff; break; default: WARN_ON_ONCE(1); return; } - - *priv->rx_buf = val; - priv->rx_buf += rsize; } static int npcm_pspi_transfer_one(struct spi_master *master, @@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master) static void npcm_pspi_reset_hw(struct npcm_pspi *priv) { - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, - NPCM7XX_PSPI1_RESET << priv->id); - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0); + reset_control_assert(priv->reset); + udelay(5); + reset_control_deassert(priv->reset); } static irqreturn_t npcm_pspi_handler(int irq, void *dev_id) @@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (num_cs < 0) return num_cs; - pdev->id = of_alias_get_id(np, "spi"); - if (pdev->id < 0) - pdev->id = 0; - master = spi_alloc_master(&pdev->dev, sizeof(*priv)); if (!master) return -ENOMEM; @@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) priv = spi_master_get_devdata(master); priv->master = master; priv->is_save_param = false; - priv->id = pdev->id; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { @@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev) goto out_disable_clk; } - priv->rst_regmap = - syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst"); - if (IS_ERR(priv->rst_regmap)) { - dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n"); - return PTR_ERR(priv->rst_regmap); + priv->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(priv->reset)) { + ret = PTR_ERR(priv->reset); + goto out_disable_clk; } /* reset SPI-HW block */ @@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER); master->mode_bits = SPI_CPHA | SPI_CPOL; master->dev.of_node = pdev->dev.of_node; - master->bus_num = pdev->id; + master->bus_num = -1; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->transfer_one = npcm_pspi_transfer_one; master->prepare_transfer_hardware = @@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (ret) goto out_disable_clk; - pr_info("NPCM Peripheral SPI %d probed\n", pdev->id); + pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num); return 0; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index c36bb1bb464e..8c5084a3a617 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -439,7 +439,7 @@ static bool nxp_fspi_supports_op(struct spi_mem *mem, op->data.nbytes > f->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } /* Instead of busy looping invoke readl_poll_timeout functionality. */ diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index e2331eb7b47a..9df7c5979c29 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c @@ -20,7 +20,6 @@ #include <linux/spi/spi_bitbang.h> #include <linux/spi/spi_oc_tiny.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/of.h> #define DRV_NAME "spi_oc_tiny" @@ -50,8 +49,6 @@ struct tiny_spi { unsigned int txc, rxc; const u8 *txp; u8 *rxp; - int gpio_cs_count; - int *gpio_cs; }; static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev) @@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz) return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1; } -static void tiny_spi_chipselect(struct spi_device *spi, int is_active) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - - if (hw->gpio_cs_count > 0) { - gpio_set_value(hw->gpio_cs[spi->chip_select], - (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); - } -} - static int tiny_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { @@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node; - unsigned int i; u32 val; if (!np) return 0; - hw->gpio_cs_count = of_gpio_count(np); - if (hw->gpio_cs_count > 0) { - hw->gpio_cs = devm_kcalloc(&pdev->dev, - hw->gpio_cs_count, sizeof(unsigned int), - GFP_KERNEL); - if (!hw->gpio_cs) - return -ENOMEM; - } - for (i = 0; i < hw->gpio_cs_count; i++) { - hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL); - if (hw->gpio_cs[i] < 0) - return -ENODEV; - } hw->bitbang.master->dev.of_node = pdev->dev.of_node; if (!of_property_read_u32(np, "clock-frequency", &val)) hw->freq = val; @@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev) struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev); struct tiny_spi *hw; struct spi_master *master; - unsigned int i; int err = -ENODEV; master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi)); @@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the master state. */ master->bus_num = pdev->id; - master->num_chipselect = 255; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->setup = tiny_spi_setup; + master->use_gpio_descriptors = true; hw = spi_master_get_devdata(master); platform_set_drvdata(pdev, hw); @@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the state for the bitbang driver */ hw->bitbang.master = master; hw->bitbang.setup_transfer = tiny_spi_setup_transfer; - hw->bitbang.chipselect = tiny_spi_chipselect; hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs; /* find and map our resources */ @@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev) } /* find platform data */ if (platp) { - hw->gpio_cs_count = platp->gpio_cs_count; - hw->gpio_cs = platp->gpio_cs; - if (platp->gpio_cs_count && !platp->gpio_cs) { - err = -EBUSY; - goto exit; - } hw->freq = platp->freq; hw->baudwidth = platp->baudwidth; } else { @@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev) if (err) goto exit; } - for (i = 0; i < hw->gpio_cs_count; i++) { - err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev)); - if (err) - goto exit_gpio; - gpio_direction_output(hw->gpio_cs[i], 1); - } - hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count); /* register our spi controller */ err = spi_bitbang_start(&hw->bitbang); @@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev) return 0; -exit_gpio: - while (i-- > 0) - gpio_free(hw->gpio_cs[i]); exit: spi_master_put(master); return err; @@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct spi_master *master = hw->bitbang.master; - unsigned int i; spi_bitbang_stop(&hw->bitbang); - for (i = 0; i < hw->gpio_cs_count; i++) - gpio_free(hw->gpio_cs[i]); spi_master_put(master); return 0; } diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 16b6b2ad4e7c..4c7a71f0fb3e 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data) return limit; } +static void pxa2xx_spi_off(struct driver_data *drv_data) +{ + /* On MMP, disabling SSE seems to corrupt the rx fifo */ + if (drv_data->ssp_type == MMP2_SSP) + return; + + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); +} + static int null_writer(struct driver_data *drv_data) { u8 n_bytes = drv_data->n_bytes; @@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_err(&drv_data->pdev->dev, "%s\n", msg); @@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) static void handle_bad_msg(struct driver_data *drv_data) { - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1); if (!pxa25x_ssp_comp(drv_data)) @@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) != (cr1 & change_mask)) { /* stop the SSP, and update the other bits */ - pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); + if (drv_data->ssp_type != MMP2_SSP) + pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, chip->timeout); /* first set CR1 without interrupt and service enables */ @@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); @@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller, struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); /* Clear and disable interrupts and service requests */ write_SSSR_CS(drv_data, drv_data->clear_sr); pxa2xx_spi_write(drv_data, SSCR1, @@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP now */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); return 0; } @@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { /* KBL-H */ { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP }, { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP }, + /* CML-V */ + { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP }, + { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP }, /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP }, @@ -1443,6 +1452,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP }, + /* JSL */ + { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP }, + { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP }, /* APL */ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c index 250fd60e1678..3c4f83bf7084 100644 --- a/drivers/spi/spi-qcom-qspi.c +++ b/drivers/spi/spi-qcom-qspi.c @@ -137,7 +137,7 @@ enum qspi_clocks { struct qcom_qspi { void __iomem *base; struct device *dev; - struct clk_bulk_data clks[QSPI_NUM_CLKS]; + struct clk_bulk_data *clks; struct qspi_xfer xfer; /* Lock to protect xfer and IRQ accessed registers */ spinlock_t lock; @@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev) goto exit_probe_master_put; } + ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, + sizeof(*ctrl->clks), GFP_KERNEL); + if (!ctrl->clks) { + ret = -ENOMEM; + goto exit_probe_master_put; + } + ctrl->clks[QSPI_CLK_CORE].id = "core"; ctrl->clks[QSPI_CLK_IFACE].id = "iface"; ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 7222c7689c3c..85575d45901c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -159,7 +159,7 @@ #define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0 #define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1 #define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */ -#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */ +#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */ #define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */ #define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */ #define SPCMD_CPHA 0x0001 /* Clock Phase Setting */ @@ -242,6 +242,7 @@ struct spi_ops { u16 mode_bits; u16 flags; u16 fifo_size; + u8 num_hw_ss; }; /* @@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) return n; } -#define set_config_register(spi, n) spi->ops->set_config_register(spi, n) - static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) { rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR); @@ -620,9 +619,8 @@ no_dma_tx: dmaengine_terminate_all(rspi->ctlr->dma_rx); no_dma_rx: if (ret == -EAGAIN) { - pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->ctlr->dev), - dev_name(&rspi->ctlr->dev)); + dev_warn_once(&rspi->ctlr->dev, + "DMA not available, falling back to PIO\n"); } return ret; } @@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr, if (spi->mode & SPI_CPHA) rspi->spcmd |= SPCMD_CPHA; + /* Configure slave signal to assert */ + rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs + : spi->chip_select); + /* CMOS output mode and MOSI signal from previous transfer */ rspi->sppcr = 0; if (spi->mode & SPI_LOOP) rspi->sppcr |= SPPCR_SPLP; - set_config_register(rspi, 8); + rspi->ops->set_config_register(rspi, 8); if (msg->spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) { @@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, + .num_hw_ss = 2, }; static const struct spi_ops rspi_rz_ops = { @@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ + .num_hw_ss = 1, }; static const struct spi_ops qspi_ops = { @@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = { SPI_RX_DUAL | SPI_RX_QUAD, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, + .num_hw_ss = 1, }; #ifdef CONFIG_OF @@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev) ctlr->mode_bits = ops->mode_bits; ctlr->flags = ops->flags; ctlr->dev.of_node = pdev->dev.of_node; + ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = rspi->ops->num_hw_ss; ret = platform_get_irq_byname_optional(pdev, "rx"); if (ret < 0) { @@ -1314,8 +1321,6 @@ error1: static const struct platform_device_id spi_driver_ids[] = { { "rspi", (kernel_ulong_t)&rspi_ops }, - { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops }, - { "qspi", (kernel_ulong_t)&qspi_ops }, {}, }; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 8f134735291f..1c11a00a2c36 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -14,8 +14,6 @@ #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/err.h> -#include <linux/gpio.h> -#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -55,7 +53,6 @@ struct sh_msiof_spi_priv { void *rx_dma_page; dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; - unsigned short unused_ss; bool native_cs_inited; bool native_cs_high; bool slave_aborted; @@ -63,140 +60,140 @@ struct sh_msiof_spi_priv { #define MAX_SS 3 /* Maximum number of native chip selects */ -#define TMDR1 0x00 /* Transmit Mode Register 1 */ -#define TMDR2 0x04 /* Transmit Mode Register 2 */ -#define TMDR3 0x08 /* Transmit Mode Register 3 */ -#define RMDR1 0x10 /* Receive Mode Register 1 */ -#define RMDR2 0x14 /* Receive Mode Register 2 */ -#define RMDR3 0x18 /* Receive Mode Register 3 */ -#define TSCR 0x20 /* Transmit Clock Select Register */ -#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ -#define CTR 0x28 /* Control Register */ -#define FCTR 0x30 /* FIFO Control Register */ -#define STR 0x40 /* Status Register */ -#define IER 0x44 /* Interrupt Enable Register */ -#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ -#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ -#define TFDR 0x50 /* Transmit FIFO Data Register */ -#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ -#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ -#define RFDR 0x60 /* Receive FIFO Data Register */ - -/* TMDR1 and RMDR1 */ -#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ -#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ -#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */ -#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */ -#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ -#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ -#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ -#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ -#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ -#define MDR1_FLD_SHIFT 2 -#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ -/* TMDR1 */ -#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */ -#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ -#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ - -/* TMDR2 and RMDR2 */ -#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ -#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ -#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ - -/* TSCR and RSCR */ -#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ -#define SCR_BRPS(i) (((i) - 1) << 8) -#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ -#define SCR_BRDV_DIV_2 0 -#define SCR_BRDV_DIV_4 1 -#define SCR_BRDV_DIV_8 2 -#define SCR_BRDV_DIV_16 3 -#define SCR_BRDV_DIV_32 4 -#define SCR_BRDV_DIV_1 7 - -/* CTR */ -#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ -#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ -#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ -#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ -#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ -#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ -#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ -#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ -#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ -#define CTR_TXDIZ_LOW (0 << 22) /* 0 */ -#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */ -#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ -#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ -#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ -#define CTR_TXE BIT(9) /* Transmit Enable */ -#define CTR_RXE BIT(8) /* Receive Enable */ -#define CTR_TXRST BIT(1) /* Transmit Reset */ -#define CTR_RXRST BIT(0) /* Receive Reset */ - -/* FCTR */ -#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ -#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ -#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ -#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ -#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ -#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ -#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ -#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ -#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ -#define FCTR_TFUA_SHIFT 20 -#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT) -#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ -#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ -#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ -#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ -#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ -#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ -#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ -#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ -#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ -#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ -#define FCTR_RFUA_SHIFT 4 -#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT) - -/* STR */ -#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */ -#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */ -#define STR_TEOF BIT(23) /* Frame Transmission End */ -#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ -#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */ -#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */ -#define STR_RFFUL BIT(13) /* Receive FIFO Full */ -#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */ -#define STR_REOF BIT(7) /* Frame Reception End */ -#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ -#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */ -#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */ - -/* IER */ -#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ -#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ -#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ -#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */ -#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ -#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ -#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ -#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ -#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */ -#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ -#define IER_REOFE BIT(7) /* Frame Reception End Enable */ -#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ -#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ -#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ +#define SITMDR1 0x00 /* Transmit Mode Register 1 */ +#define SITMDR2 0x04 /* Transmit Mode Register 2 */ +#define SITMDR3 0x08 /* Transmit Mode Register 3 */ +#define SIRMDR1 0x10 /* Receive Mode Register 1 */ +#define SIRMDR2 0x14 /* Receive Mode Register 2 */ +#define SIRMDR3 0x18 /* Receive Mode Register 3 */ +#define SITSCR 0x20 /* Transmit Clock Select Register */ +#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ +#define SICTR 0x28 /* Control Register */ +#define SIFCTR 0x30 /* FIFO Control Register */ +#define SISTR 0x40 /* Status Register */ +#define SIIER 0x44 /* Interrupt Enable Register */ +#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ +#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ +#define SITFDR 0x50 /* Transmit FIFO Data Register */ +#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ +#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ +#define SIRFDR 0x60 /* Receive FIFO Data Register */ + +/* SITMDR1 and SIRMDR1 */ +#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ +#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ +#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */ +#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */ +#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ +#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ +#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ +#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ +#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ +#define SIMDR1_FLD_SHIFT 2 +#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ +/* SITMDR1 */ +#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ +#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ +#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ + +/* SITMDR2 and SIRMDR2 */ +#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ +#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ +#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ + +/* SITSCR and SIRSCR */ +#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ +#define SISCR_BRPS(i) (((i) - 1) << 8) +#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ +#define SISCR_BRDV_DIV_2 0 +#define SISCR_BRDV_DIV_4 1 +#define SISCR_BRDV_DIV_8 2 +#define SISCR_BRDV_DIV_16 3 +#define SISCR_BRDV_DIV_32 4 +#define SISCR_BRDV_DIV_1 7 + +/* SICTR */ +#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ +#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ +#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ +#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ +#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ +#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ +#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ +#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ +#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ +#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */ +#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */ +#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ +#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ +#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ +#define SICTR_TXE BIT(9) /* Transmit Enable */ +#define SICTR_RXE BIT(8) /* Receive Enable */ +#define SICTR_TXRST BIT(1) /* Transmit Reset */ +#define SICTR_RXRST BIT(0) /* Receive Reset */ + +/* SIFCTR */ +#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ +#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ +#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ +#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ +#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ +#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ +#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ +#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ +#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ +#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ +#define SIFCTR_TFUA_SHIFT 20 +#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) +#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ +#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ +#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ +#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ +#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ +#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ +#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ +#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ +#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ +#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ +#define SIFCTR_RFUA_SHIFT 4 +#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT) + +/* SISTR */ +#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ +#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ +#define SISTR_TEOF BIT(23) /* Frame Transmission End */ +#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ +#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ +#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ +#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ +#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ +#define SISTR_REOF BIT(7) /* Frame Reception End */ +#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ +#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ +#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ + +/* SIIER */ +#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ +#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ +#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ +#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ +#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ +#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ +#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ +#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ +#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ +#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ +#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ +#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ +#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ +#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: return ioread16(p->mapbase + reg_offs); default: return ioread32(p->mapbase + reg_offs); @@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, u32 value) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: iowrite16(value, p->mapbase + reg_offs); break; default: @@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, u32 mask = clr | set; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data &= ~clr; data |= set; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - return readl_poll_timeout_atomic(p->mapbase + CTR, data, + return readl_poll_timeout_atomic(p->mapbase + SICTR, data, (data & mask) == set, 1, 100); } @@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) struct sh_msiof_spi_priv *p = data; /* just disable the interrupt and wake up */ - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); complete(&p->done); return IRQ_HANDLED; @@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p) { - u32 mask = CTR_TXRST | CTR_RXRST; + u32 mask = SICTR_TXRST | SICTR_RXRST; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data |= mask; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1, + readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1, 100); } static const u32 sh_msiof_spi_div_array[] = { - SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4, - SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32, + SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4, + SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32, }; static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, @@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, div = DIV_ROUND_UP(parent_rate, spi_hz); if (div <= 1024) { - /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ + /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ if (!div_pow && div <= 32 && div > 2) div_pow = 1; @@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, brps = 32; } - scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); - sh_msiof_write(p, TSCR, scr); + scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps); + sh_msiof_write(p, SITSCR, scr); if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, RSCR, scr); + sh_msiof_write(p, SIRSCR, scr); } static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl) @@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) return 0; } - val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT; - val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT; + val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT; + val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT; return val; } @@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, * 1 0 11 11 0 0 * 1 1 11 11 1 1 */ - tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; - tmp |= !cs_high << MDR1_SYNCAC_SHIFT; - tmp |= lsb_first << MDR1_BITLSB_SHIFT; + tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP; + tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT; + tmp |= lsb_first << SIMDR1_BITLSB_SHIFT; tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); if (spi_controller_is_slave(p->ctlr)) { - sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); + sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON); } else { - sh_msiof_write(p, TMDR1, - tmp | MDR1_TRMD | TMDR1_PCON | - (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); + sh_msiof_write(p, SITMDR1, + tmp | SIMDR1_TRMD | SITMDR1_PCON | + (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT); } if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ tmp &= ~0x0000ffff; } - sh_msiof_write(p, RMDR1, tmp); + sh_msiof_write(p, SIRMDR1, tmp); tmp = 0; - tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT; - tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT; + tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT; + tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT; edge = cpol ^ !cpha; - tmp |= edge << CTR_TEDG_SHIFT; - tmp |= edge << CTR_REDG_SHIFT; - tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW; - sh_msiof_write(p, CTR, tmp); + tmp |= edge << SICTR_TEDG_SHIFT; + tmp |= edge << SICTR_REDG_SHIFT; + tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW; + sh_msiof_write(p, SICTR, tmp); } static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, const void *tx_buf, void *rx_buf, u32 bits, u32 words) { - u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); + u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words); if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, TMDR2, dr2); + sh_msiof_write(p, SITMDR2, dr2); else - sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); + sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1); if (rx_buf) - sh_msiof_write(p, RMDR2, dr2); + sh_msiof_write(p, SIRMDR2, dr2); } static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) { - sh_msiof_write(p, STR, - sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); + sh_msiof_write(p, SISTR, + sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ)); } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, @@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_8[k] << fs); + sh_msiof_write(p, SITFDR, buf_8[k] << fs); } static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, @@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_16[k] << fs); + sh_msiof_write(p, SITFDR, buf_16[k] << fs); } static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, @@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs); } static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, @@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_32[k] << fs); + sh_msiof_write(p, SITFDR, buf_32[k] << fs); } static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, @@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs); } static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, @@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs)); + sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs)); } static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs)); + sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs)); } static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, @@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_8[k] = sh_msiof_read(p, RFDR) >> fs; + buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, @@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_16[k] = sh_msiof_read(p, RFDR) >> fs; + buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, @@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]); } static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, @@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = sh_msiof_read(p, RFDR) >> fs; + buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, @@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]); } static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, @@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs); + buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs); } static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]); + put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]); } static int sh_msiof_spi_setup(struct spi_device *spi) @@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; /* Configure native chip select mode/polarity early */ - clr = MDR1_SYNCMD_MASK; - set = MDR1_SYNCMD_SPI; + clr = SIMDR1_SYNCMD_MASK; + set = SIMDR1_SYNCMD_SPI; if (spi->mode & SPI_CS_HIGH) - clr |= BIT(MDR1_SYNCAC_SHIFT); + clr |= BIT(SIMDR1_SYNCAC_SHIFT); else - set |= BIT(MDR1_SYNCAC_SHIFT); + set |= BIT(SIMDR1_SYNCAC_SHIFT); pm_runtime_get_sync(&p->pdev->dev); - tmp = sh_msiof_read(p, TMDR1) & ~clr; - sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON); - tmp = sh_msiof_read(p, RMDR1) & ~clr; - sh_msiof_write(p, RMDR1, tmp | set); + tmp = sh_msiof_read(p, SITMDR1) & ~clr; + sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON); + tmp = sh_msiof_read(p, SIRMDR1) & ~clr; + sh_msiof_write(p, SIRMDR1, tmp | set); pm_runtime_put(&p->pdev->dev); p->native_cs_high = spi->mode & SPI_CS_HIGH; p->native_cs_inited = true; @@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, /* Configure pins before asserting CS */ if (spi->cs_gpiod) { - ss = p->unused_ss; + ss = ctlr->unused_native_cs; cs_high = p->native_cs_high; } else { ss = spi->chip_select; @@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) /* setup clock and rx/tx signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE); /* start by setting frame bit */ if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE); return ret; } @@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) /* shut down frame, rx/tx and clock signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0); if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0); return ret; } @@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, fifo_shift = 32 - bits; /* default FIFO watermarks for PIO */ - sh_msiof_write(p, FCTR, 0); + sh_msiof_write(p, SIFCTR, 0); /* setup msiof transfer mode registers */ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); - sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE); /* write tx fifo */ if (tx_buf) @@ -731,7 +728,7 @@ stop_reset: sh_msiof_reset_str(p); sh_msiof_spi_stop(p, rx_buf); stop_ier: - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { - ier_bits |= IER_RDREQE | IER_RDMAE; + ier_bits |= SIIER_RDREQE | SIIER_RDMAE; desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, p->rx_dma_addr, len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } if (tx) { - ier_bits |= IER_TDREQE | IER_TDMAE; + ier_bits |= SIIER_TDREQE | SIIER_TDMAE; dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, p->tx_dma_addr, len, DMA_TO_DEVICE); desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, @@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } /* 1 stage FIFO watermarks for DMA */ - sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1); + sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1); /* setup msiof transfer mode registers (32-bit words) */ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); - sh_msiof_write(p, IER, ier_bits); + sh_msiof_write(p, SIIER, ier_bits); reinit_completion(&p->done); if (tx) @@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, if (ret) goto stop_reset; - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); } else { /* wait for tx fifo to be emptied */ - sh_msiof_write(p, IER, IER_TEOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE); ret = sh_msiof_wait_for_completion(p, &p->done); if (ret) goto stop_reset; @@ -856,7 +853,7 @@ stop_dma: no_dma_tx: if (rx) dmaengine_terminate_all(p->ctlr->dma_rx); - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) } #endif -static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p) -{ - struct device *dev = &p->pdev->dev; - unsigned int used_ss_mask = 0; - unsigned int cs_gpios = 0; - unsigned int num_cs, i; - int ret; - - ret = gpiod_count(dev, "cs"); - if (ret <= 0) - return 0; - - num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); - for (i = 0; i < num_cs; i++) { - struct gpio_desc *gpiod; - - gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); - if (!IS_ERR(gpiod)) { - devm_gpiod_put(dev, gpiod); - cs_gpios++; - continue; - } - - if (PTR_ERR(gpiod) != -ENOENT) - return PTR_ERR(gpiod); - - if (i >= MAX_SS) { - dev_err(dev, "Invalid native chip select %d\n", i); - return -EINVAL; - } - used_ss_mask |= BIT(i); - } - p->unused_ss = ffz(used_ss_mask); - if (cs_gpios && p->unused_ss >= MAX_SS) { - dev_err(dev, "No unused native chip select available\n"); - return -EINVAL; - } - return 0; -} - static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev, enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr) { @@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) ctlr = p->ctlr; ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - dma_tx_id, res->start + TFDR); + dma_tx_id, res->start + SITFDR); if (!ctlr->dma_tx) return -ENODEV; ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - dma_rx_id, res->start + RFDR); + dma_rx_id, res->start + SIRFDR); if (!ctlr->dma_rx) goto free_tx_chan; @@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) if (p->info->rx_fifo_override) p->rx_fifo_size = p->info->rx_fifo_override; - /* Setup GPIO chip selects */ - ctlr->num_chipselect = p->info->num_chipselect; - ret = sh_msiof_get_cs_gpios(p); - if (ret) - goto err1; - /* init controller code */ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; ctlr->flags = chipdata->ctlr_flags; ctlr->bus_num = pdev->id; + ctlr->num_chipselect = p->info->num_chipselect; ctlr->dev.of_node = pdev->dev.of_node; ctlr->setup = sh_msiof_spi_setup; ctlr->prepare_message = sh_msiof_prepare_message; @@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ctlr->auto_runtime_pm = true; ctlr->transfer_one = sh_msiof_transfer_one; ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = MAX_SS; ret = sh_msiof_request_dma(p); if (ret < 0) diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index e1e639191557..8419e6722e17 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) sspi->bitbang.master->dev.of_node = pdev->dev.of_node; /* request DMA channels */ - sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx"); - if (!sspi->rx_chan) { + sspi->rx_chan = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(sspi->rx_chan)) { dev_err(&pdev->dev, "can not allocate rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->rx_chan); goto free_master; } - sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx"); - if (!sspi->tx_chan) { + sspi->tx_chan = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(sspi->tx_chan)) { dev_err(&pdev->dev, "can not allocate tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->tx_chan); goto free_rx_dma; } diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index 2ee1feb41681..6678f1cbc566 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -678,7 +678,7 @@ static int sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t) if (d->unit != SPI_DELAY_UNIT_SCK) return -EINVAL; - val = readl_relaxed(ss->base + SPRD_SPI_CTL7); + val = readl_relaxed(ss->base + SPRD_SPI_CTL0); val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX); /* Set default chip selection, clock phase and clock polarity */ val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 4e726929bb4f..4ef569b47aa6 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi) return 0; } -static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) +static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) { struct dma_slave_config dma_cfg; struct device *dev = qspi->dev; + int ret = 0; memset(&dma_cfg, 0, sizeof(dma_cfg)); @@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) dma_cfg.src_maxburst = 4; dma_cfg.dst_maxburst = 4; - qspi->dma_chrx = dma_request_slave_channel(dev, "rx"); - if (qspi->dma_chrx) { + qspi->dma_chrx = dma_request_chan(dev, "rx"); + if (IS_ERR(qspi->dma_chrx)) { + ret = PTR_ERR(qspi->dma_chrx); + qspi->dma_chrx = NULL; + if (ret == -EPROBE_DEFER) + goto out; + } else { if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) { dev_err(dev, "dma rx config failed\n"); dma_release_channel(qspi->dma_chrx); @@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } - qspi->dma_chtx = dma_request_slave_channel(dev, "tx"); - if (qspi->dma_chtx) { + qspi->dma_chtx = dma_request_chan(dev, "tx"); + if (IS_ERR(qspi->dma_chtx)) { + ret = PTR_ERR(qspi->dma_chtx); + qspi->dma_chtx = NULL; + } else { if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) { dev_err(dev, "dma tx config failed\n"); dma_release_channel(qspi->dma_chtx); @@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } +out: init_completion(&qspi->dma_completion); + + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static void stm32_qspi_dma_free(struct stm32_qspi *qspi) @@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev) qspi->dev = dev; platform_set_drvdata(pdev, qspi); - stm32_qspi_dma_setup(qspi); + ret = stm32_qspi_dma_setup(qspi); + if (ret) + goto err; + mutex_init(&qspi->lock); ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index b222ce8d083e..e041f9c4ec47 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -9,7 +9,6 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -974,29 +973,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } /** - * stm32_spi_setup - setup device chip select - */ -static int stm32_spi_setup(struct spi_device *spi_dev) -{ - int ret = 0; - - if (!gpio_is_valid(spi_dev->cs_gpio)) { - dev_err(&spi_dev->dev, "%d is not a valid gpio\n", - spi_dev->cs_gpio); - return -EINVAL; - } - - dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__, - spi_dev->cs_gpio, - (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high"); - - ret = gpio_direction_output(spi_dev->cs_gpio, - !(spi_dev->mode & SPI_CS_HIGH)); - - return ret; -} - -/** * stm32_spi_prepare_msg - set up the controller to transfer a single message */ static int stm32_spi_prepare_msg(struct spi_master *master, @@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev) struct spi_master *master; struct stm32_spi *spi; struct resource *res; - int i, ret; + int ret; master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi)); if (!master) { @@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev) master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; - master->setup = stm32_spi_setup; + master->use_gpio_descriptors = true; master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; master->unprepare_message = stm32_spi_unprepare_msg; - spi->dma_tx = dma_request_slave_channel(spi->dev, "tx"); - if (!spi->dma_tx) + spi->dma_tx = dma_request_chan(spi->dev, "tx"); + if (IS_ERR(spi->dma_tx)) { + ret = PTR_ERR(spi->dma_tx); + spi->dma_tx = NULL; + if (ret == -EPROBE_DEFER) + goto err_clk_disable; + dev_warn(&pdev->dev, "failed to request tx dma channel\n"); - else + } else { master->dma_tx = spi->dma_tx; + } + + spi->dma_rx = dma_request_chan(spi->dev, "rx"); + if (IS_ERR(spi->dma_rx)) { + ret = PTR_ERR(spi->dma_rx); + spi->dma_rx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma_release; - spi->dma_rx = dma_request_slave_channel(spi->dev, "rx"); - if (!spi->dma_rx) dev_warn(&pdev->dev, "failed to request rx dma channel\n"); - else + } else { master->dma_rx = spi->dma_rx; + } if (spi->dma_tx || spi->dma_rx) master->can_dma = stm32_spi_can_dma; @@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "spi master registration failed: %d\n", ret); - goto err_dma_release; + goto err_pm_disable; } - if (!master->cs_gpios) { + if (!master->cs_gpiods) { dev_err(&pdev->dev, "no CS gpios available\n"); ret = -EINVAL; - goto err_dma_release; - } - - for (i = 0; i < master->num_chipselect; i++) { - if (!gpio_is_valid(master->cs_gpios[i])) { - dev_err(&pdev->dev, "%i is not a valid gpio\n", - master->cs_gpios[i]); - ret = -EINVAL; - goto err_dma_release; - } - - ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], - DRIVER_NAME); - if (ret) { - dev_err(&pdev->dev, "can't get CS gpio %i\n", - master->cs_gpios[i]); - goto err_dma_release; - } + goto err_pm_disable; } dev_info(&pdev->dev, "driver initialized\n"); return 0; +err_pm_disable: + pm_runtime_disable(&pdev->dev); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); if (spi->dma_rx) dma_release_channel(spi->dma_rx); - - pm_runtime_disable(&pdev->dev); err_clk_disable: clk_disable_unprepare(spi->clk); err_master_put: diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index fc40ab146c86..83edabdb41ad 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param( if ((bits_per_word == 8 || bits_per_word == 16 || bits_per_word == 32) && t->len > 3) { - tspi->is_packed = 1; + tspi->is_packed = true; tspi->words_per_32bit = 32/bits_per_word; } else { - tspi->is_packed = 0; + tspi->is_packed = false; tspi->words_per_32bit = 1; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 3cb65371ae3b..366a3e5cca6b 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -62,6 +62,7 @@ struct ti_qspi { u32 dc; bool mmap_enabled; + int current_cs; }; #define QSPI_PID (0x0) @@ -79,8 +80,6 @@ struct ti_qspi { #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000) -#define QSPI_FCLK 192000000 - /* Clock Control */ #define QSPI_CLK_EN (1 << 31) #define QSPI_CLK_DIV_MAX 0xffff @@ -315,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, { int wlen; unsigned int cmd; + u32 rx; + u8 rxlen, rx_wlen; u8 *rxbuf; rxbuf = t->rx_buf; @@ -331,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, break; } wlen = t->bits_per_word >> 3; /* in bytes */ + rx_wlen = wlen; while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); if (qspi_is_busy(qspi)) return -EBUSY; + switch (wlen) { + case 1: + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + rxlen = QSPI_WLEN_MAX_BYTES; + } else { + rxlen = min(count, 4); + } + rx_wlen = rxlen << 3; + cmd &= ~QSPI_WLEN_MASK; + cmd |= QSPI_WLEN(rx_wlen); + break; + default: + rxlen = wlen; + break; + } + ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); if (ti_qspi_poll_wc(qspi)) { dev_err(qspi->dev, "read timed out\n"); return -ETIMEDOUT; } + switch (wlen) { case 1: - *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG); + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + u32 *rxp = (u32 *) rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG_3); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_2); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_1); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + *rxp++ = be32_to_cpu(rx); + } else { + u8 *rxp = rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + if (rx_wlen >= 8) + *rxp++ = rx >> (rx_wlen - 8); + if (rx_wlen >= 16) + *rxp++ = rx >> (rx_wlen - 16); + if (rx_wlen >= 24) + *rxp++ = rx >> (rx_wlen - 24); + if (rx_wlen >= 32) + *rxp++ = rx; + } break; case 2: *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG); @@ -353,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG); break; } - rxbuf += wlen; - count -= wlen; + rxbuf += rxlen; + count -= rxlen; } return 0; @@ -487,6 +535,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi) MEM_CS_EN(spi->chip_select)); } qspi->mmap_enabled = true; + qspi->current_cs = spi->chip_select; } static void ti_qspi_disable_memory_map(struct spi_device *spi) @@ -498,6 +547,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, MEM_CS_MASK, 0); qspi->mmap_enabled = false; + qspi->current_cs = -1; } static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, @@ -524,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, QSPI_SPI_SETUP_REG(spi->chip_select)); } +static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master); + size_t max_len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (op->addr.val < qspi->mmap_size) { + /* Limit MMIO to the mmaped region */ + if (op->addr.val + op->data.nbytes > qspi->mmap_size) { + max_len = qspi->mmap_size - op->addr.val; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } else { + /* + * Use fallback mode (SW generated transfers) above the + * mmaped region. + * Adjust size to comply with the QSPI max frame length. + */ + max_len = QSPI_FRAME; + max_len -= 1 + op->addr.nbytes + op->dummy.nbytes; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } + + return 0; +} + static int ti_qspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { @@ -543,7 +622,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); - if (!qspi->mmap_enabled) + if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select) ti_qspi_enable_memory_map(mem->spi); ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, op->addr.nbytes, op->dummy.nbytes); @@ -574,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, static const struct spi_controller_mem_ops ti_qspi_mem_ops = { .exec_op = ti_qspi_exec_mem_op, + .adjust_op_size = ti_qspi_adjust_op_size, }; static int ti_qspi_start_transfer_one(struct spi_master *master, @@ -799,6 +879,7 @@ no_dma: } } qspi->mmap_enabled = false; + qspi->current_cs = -1; ret = devm_spi_register_master(&pdev->dev, master); if (!ret) diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 223353fa2d8a..d7ea6af74743 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Tx DMA */ param = &dma->param_tx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2; /* Tx = 0, 2 */; + param->chan_id = data->ch * 2; /* Tx = 0, 2 */ param->tx_reg = data->io_base_addr + PCH_SPDWR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); @@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Rx DMA */ param = &dma->param_rx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */; + param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */ param->rx_reg = data->io_base_addr + PCH_SPDRR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 47cde1864630..0fa50979644d 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -8,6 +8,7 @@ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -23,6 +24,7 @@ struct uniphier_spi_priv { void __iomem *base; + dma_addr_t base_dma_addr; struct clk *clk; struct spi_master *master; struct completion xfer_done; @@ -32,6 +34,7 @@ struct uniphier_spi_priv { unsigned int rx_bytes; const u8 *tx_buf; u8 *rx_buf; + atomic_t dma_busy; bool is_save_param; u8 bits_per_word; @@ -61,11 +64,16 @@ struct uniphier_spi_priv { #define SSI_FPS_FSTRT BIT(14) #define SSI_SR 0x14 +#define SSI_SR_BUSY BIT(7) #define SSI_SR_RNE BIT(0) #define SSI_IE 0x18 +#define SSI_IE_TCIE BIT(4) #define SSI_IE_RCIE BIT(3) +#define SSI_IE_TXRE BIT(2) +#define SSI_IE_RXRE BIT(1) #define SSI_IE_RORIE BIT(0) +#define SSI_IE_ALL_MASK GENMASK(4, 0) #define SSI_IS 0x1c #define SSI_IS_RXRS BIT(9) @@ -87,15 +95,19 @@ struct uniphier_spi_priv { #define SSI_RXDR 0x24 #define SSI_FIFO_DEPTH 8U +#define SSI_FIFO_BURST_NUM 1 + +#define SSI_DMA_RX_BUSY BIT(1) +#define SSI_DMA_TX_BUSY BIT(0) static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4); } -static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) writel(val, priv->base + SSI_IE); } -static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -290,25 +302,32 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv) } } -static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv, + unsigned int threshold) { - unsigned int fifo_threshold, fill_bytes; u32 val; - fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, - bytes_per_word(priv->bits_per_word)); - fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); - - fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes); - - /* set fifo threshold */ val = readl(priv->base + SSI_FC); val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK); - val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold); - val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold); + val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold); + val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold); writel(val, priv->base + SSI_FC); +} + +static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv) +{ + unsigned int fifo_threshold, fill_words; + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw); + fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH); + + uniphier_spi_set_fifo_threshold(priv, fifo_threshold); + + fill_words = fifo_threshold - + DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw); - while (fill_bytes--) + while (fill_words--) uniphier_spi_send(priv); } @@ -327,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable) writel(val, priv->base + SSI_FPS); } +static bool uniphier_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + if ((!master->dma_tx && !master->dma_rx) + || (!master->dma_tx && t->tx_buf) + || (!master->dma_rx && t->rx_buf)) + return false; + + return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH; +} + +static void uniphier_spi_dma_rxcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_RXRE); + + if (!(state & SSI_DMA_TX_BUSY)) + spi_finalize_current_transfer(master); +} + +static void uniphier_spi_dma_txcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_TXRE); + + if (!(state & SSI_DMA_RX_BUSY)) + spi_finalize_current_transfer(master); +} + +static int uniphier_spi_transfer_one_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; + int buswidth; + + atomic_set(&priv->dma_busy, 0); + + uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM); + + if (priv->bits_per_word <= 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (priv->bits_per_word <= 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + + if (priv->rx_buf) { + struct dma_slave_config rxconf = { + .direction = DMA_DEV_TO_MEM, + .src_addr = priv->base_dma_addr + SSI_RXDR, + .src_addr_width = buswidth, + .src_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_rx, &rxconf); + + rxdesc = dmaengine_prep_slave_sg( + master->dma_rx, + t->rx_sg.sgl, t->rx_sg.nents, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto out_err_prep; + + rxdesc->callback = uniphier_spi_dma_rxcb; + rxdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_RXRE); + atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy); + + dmaengine_submit(rxdesc); + dma_async_issue_pending(master->dma_rx); + } + + if (priv->tx_buf) { + struct dma_slave_config txconf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = priv->base_dma_addr + SSI_TXDR, + .dst_addr_width = buswidth, + .dst_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_tx, &txconf); + + txdesc = dmaengine_prep_slave_sg( + master->dma_tx, + t->tx_sg.sgl, t->tx_sg.nents, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto out_err_prep; + + txdesc->callback = uniphier_spi_dma_txcb; + txdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_TXRE); + atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy); + + dmaengine_submit(txdesc); + dma_async_issue_pending(master->dma_tx); + } + + /* signal that we need to wait for completion */ + return (priv->tx_buf || priv->rx_buf); + +out_err_prep: + if (rxdesc) + dmaengine_terminate_sync(master->dma_rx); + + return -EINVAL; +} + static int uniphier_spi_transfer_one_irq(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) @@ -339,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master, uniphier_spi_fill_tx_fifo(priv); - uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE); time_left = wait_for_completion_timeout(&priv->xfer_done, msecs_to_jiffies(SSI_TIMEOUT_MS)); - uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE); if (!time_left) { dev_err(dev, "transfer timeout.\n"); @@ -388,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master, { struct uniphier_spi_priv *priv = spi_master_get_devdata(master); unsigned long threshold; + bool use_dma; /* Terminate and return success for 0 byte length transfer */ if (!t->len) @@ -395,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master, uniphier_spi_setup_transfer(spi, t); + use_dma = master->can_dma ? master->can_dma(master, spi, t) : false; + if (use_dma) + return uniphier_spi_transfer_one_dma(master, spi, t); + /* * If the transfer operation will take longer than * SSI_POLL_TIMEOUT_US, it should use irq. @@ -425,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master) return 0; } +static void uniphier_spi_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + u32 val; + + /* stop running spi transfer */ + writel(0, priv->base + SSI_CTL); + + /* reset FIFOs */ + val = SSI_FC_TXFFL | SSI_FC_RXFFL; + writel(val, priv->base + SSI_FC); + + uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK); + + if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) { + dmaengine_terminate_async(master->dma_tx); + atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + } + + if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) { + dmaengine_terminate_async(master->dma_rx); + atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + } +} + static irqreturn_t uniphier_spi_handler(int irq, void *dev_id) { struct uniphier_spi_priv *priv = dev_id; @@ -470,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev) { struct uniphier_spi_priv *priv; struct spi_master *master; + struct resource *res; + struct dma_slave_caps caps; + u32 dma_tx_burst = 0, dma_rx_burst = 0; unsigned long clk_rate; int irq; int ret; @@ -484,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev) priv->master = master; priv->is_save_param = false; - priv->base = devm_platform_ioremap_resource(pdev, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out_master_put; } + priv->base_dma_addr = res->start; priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { @@ -531,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev) = uniphier_spi_prepare_transfer_hardware; master->unprepare_transfer_hardware = uniphier_spi_unprepare_transfer_hardware; + master->handle_err = uniphier_spi_handle_err; + master->can_dma = uniphier_spi_can_dma; + master->num_chipselect = 1; + master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + + master->dma_tx = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR_OR_NULL(master->dma_tx)) { + if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_tx = NULL; + dma_tx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_tx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_tx_burst = caps.max_burst; + } + + master->dma_rx = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR_OR_NULL(master->dma_rx)) { + if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_rx = NULL; + dma_rx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_rx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_rx_burst = caps.max_burst; + } + + master->max_dma_len = min(dma_tx_burst, dma_rx_burst); ret = devm_spi_register_master(&pdev->dev, master); if (ret) @@ -551,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev) { struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); + if (priv->master->dma_tx) + dma_release_channel(priv->master->dma_tx); + if (priv->master->dma_rx) + dma_release_channel(priv->master->dma_rx); + clk_disable_unprepare(priv->clk); return 0; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 5e4c4532f7f3..38b4c78df506 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1499,8 +1499,7 @@ static void spi_pump_messages(struct kthread_work *work) * advances its @tx buffer pointer monotonically. * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped - * @tx: Pointer to the current word within the xfer->tx_buf that the driver is - * preparing to transmit right now. + * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will disable IRQs and preemption for the duration of the * transfer, for less jitter in time measurement. Only compatible * with PIO drivers. If true, must follow up with @@ -1510,21 +1509,19 @@ static void spi_pump_messages(struct kthread_work *work) */ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off) + size_t progress, bool irqs_off) { - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); - if (!xfer->ptp_sts) return; if (xfer->timestamped_pre) return; - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word)) + if (progress < xfer->ptp_sts_word_pre) return; /* Capture the resolution of the timestamp */ - xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word; + xfer->ptp_sts_word_pre = progress; xfer->timestamped_pre = true; @@ -1546,23 +1543,20 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); * timestamped. * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped - * @tx: Pointer to the current word within the xfer->tx_buf that the driver has - * just transmitted. + * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. */ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, - const void *tx, bool irqs_off) + size_t progress, bool irqs_off) { - u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8); - if (!xfer->ptp_sts) return; if (xfer->timestamped_post) return; - if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word)) + if (progress < xfer->ptp_sts_word_post) return; ptp_read_system_postts(xfer->ptp_sts); @@ -1573,7 +1567,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, } /* Capture the resolution of the timestamp */ - xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word; + xfer->ptp_sts_word_post = progress; xfer->timestamped_post = true; } @@ -1680,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } + if (unlikely(ctlr->ptp_sts_supported)) { + list_for_each_entry(xfer, &mesg->transfers, transfer_list) { + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre); + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post); + } + } + spi_unmap_msg(ctlr, mesg); if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { @@ -2457,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) int nb, i; struct gpio_desc **cs; struct device *dev = &ctlr->dev; + unsigned long native_cs_mask = 0; + unsigned int num_cs_gpios = 0; nb = gpiod_count(dev, "cs"); ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); @@ -2498,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) if (!gpioname) return -ENOMEM; gpiod_set_consumer_name(cs[i], gpioname); + num_cs_gpios++; + continue; } + + if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { + dev_err(dev, "Invalid native chip select %d\n", i); + return -EINVAL; + } + native_cs_mask |= BIT(i); + } + + ctlr->unused_native_cs = ffz(native_cs_mask); + if (num_cs_gpios && ctlr->max_native_cs && + ctlr->unused_native_cs >= ctlr->max_native_cs) { + dev_err(dev, "No unused native chip select available\n"); + return -EINVAL; } return 0; diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c index 06b68dd6e022..bc275968fcc6 100644 --- a/drivers/ssb/driver_extif.c +++ b/drivers/ssb/driver_extif.c @@ -63,7 +63,7 @@ int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports for (i = 0; i < 2; i++) { void __iomem *uart_regs; - uart_regs = ioremap_nocache(SSB_EUART, 16); + uart_regs = ioremap(SSB_EUART, 16); if (uart_regs) { uart_regs += (i * 8); diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index 6a5622e0ded5..c1186415896b 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -122,7 +122,7 @@ static int ssb_extpci_read_config(struct ssb_pcicore *pc, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, len); + mmio = ioremap(addr, len); if (!mmio) goto out; @@ -168,7 +168,7 @@ static int ssb_extpci_write_config(struct ssb_pcicore *pc, if (unlikely(!addr)) goto out; err = -ENOMEM; - mmio = ioremap_nocache(addr, len); + mmio = ioremap(addr, len); if (!mmio) goto out; @@ -382,7 +382,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) /* Ok, ready to run, register it to the system. * The following needs change, if we want to port hostmode * to non-MIPS platform. */ - ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000); + ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000); set_io_port_base(ssb_pcicore_controller.io_map_base); /* Give some time to the PCI controller to configure itself with the new * values. Not waiting at this point causes crashes of the machine. */ diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig index 3fffe4d6f327..f180a8e9f58a 100644 --- a/drivers/staging/axis-fifo/Kconfig +++ b/drivers/staging/axis-fifo/Kconfig @@ -4,7 +4,7 @@ # config XIL_AXIS_FIFO tristate "Xilinx AXI-Stream FIFO IP core driver" - depends on OF + depends on OF && HAS_IOMEM help This adds support for the Xilinx AXI-Stream FIFO IP core driver. The AXI Streaming FIFO allows memory mapped access to a AXI Streaming diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index dbff0f7e7cf5..ddc0dc93d08b 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -46,8 +46,8 @@ #define PCI171X_RANGE_UNI BIT(4) #define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0) #define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */ -#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8) -#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0) +#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8) +#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0) #define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x)) #define PCI171X_STATUS_REG 0x06 /* R: status register */ #define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */ diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c index 4bdf44d82879..dc62db1ee1dd 100644 --- a/drivers/staging/comedi/drivers/gsc_hpdi.c +++ b/drivers/staging/comedi/drivers/gsc_hpdi.c @@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE, &devpriv->dio_buffer_phys_addr[i], GFP_KERNEL); + if (!devpriv->dio_buffer[i]) { + dev_warn(dev->class_dev, + "failed to allocate DMA buffer\n"); + return -ENOMEM; + } } /* allocate dma descriptors */ devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev, @@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, NUM_DMA_DESCRIPTORS, &devpriv->dma_desc_phys_addr, GFP_KERNEL); + if (!devpriv->dma_desc) { + dev_warn(dev->class_dev, + "failed to allocate DMA descriptors\n"); + return -ENOMEM; + } if (devpriv->dma_desc_phys_addr & 0xf) { dev_warn(dev->class_dev, " dma descriptors not quad-word aligned (bug)\n"); diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c index 673d732dcb8f..8f398b30f5bf 100644 --- a/drivers/staging/comedi/drivers/ni_routes.c +++ b/drivers/staging/comedi/drivers/ni_routes.c @@ -72,9 +72,6 @@ static int ni_find_device_routes(const char *device_family, } } - if (!rv) - return -ENODATA; - /* Second, find the set of routes valid for this device. */ for (i = 0; ni_device_routes_list[i]; ++i) { if (memcmp(ni_device_routes_list[i]->device, board_name, @@ -84,12 +81,12 @@ static int ni_find_device_routes(const char *device_family, } } - if (!dr) - return -ENODATA; - tables->route_values = rv; tables->valid_routes = dr; + if (!rv || !dr) + return -ENODATA; + return 0; } @@ -487,6 +484,9 @@ int ni_find_route_source(const u8 src_sel_reg_value, int dest, { int src; + if (!tables->route_values) + return -EINVAL; + dest = B(dest); /* subtract NI names offset */ /* ensure we are not going to under/over run the route value table */ if (dest < 0 || dest >= NI_NUM_NAMES) diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h index 2aac1e000977..51c665a924b7 100644 --- a/drivers/staging/exfat/exfat.h +++ b/drivers/staging/exfat/exfat.h @@ -805,8 +805,8 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir, s32 create_file(struct inode *inode, struct chain_t *p_dir, struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid); void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry); -s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry, - struct uni_name_t *p_uniname, struct file_id_t *fid); +s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry, + struct uni_name_t *p_uniname, struct file_id_t *fid); s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry, struct chain_t *p_newdir, struct uni_name_t *p_uniname, struct file_id_t *fid); diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c index d2d3447083c7..794000e7bc6f 100644 --- a/drivers/staging/exfat/exfat_core.c +++ b/drivers/staging/exfat/exfat_core.c @@ -192,8 +192,6 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu) exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b); - return sector_write(sb, sector, p_fs->vol_amap[i], 0); - #ifdef CONFIG_EXFAT_DISCARD if (opts->discard) { ret = sb_issue_discard(sb, START_SECTOR(clu), @@ -202,9 +200,13 @@ static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu) if (ret == -EOPNOTSUPP) { pr_warn("discard not supported by device, disabling"); opts->discard = 0; + } else { + return ret; } } #endif /* CONFIG_EXFAT_DISCARD */ + + return sector_write(sb, sector, p_fs->vol_amap[i], 0); } static u32 test_alloc_bitmap(struct super_block *sb, u32 clu) @@ -2322,8 +2324,8 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry) fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries); } -s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry, - struct uni_name_t *p_uniname, struct file_id_t *fid) +s32 exfat_rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry, + struct uni_name_t *p_uniname, struct file_id_t *fid) { s32 ret, newentry = -1, num_old_entries, num_new_entries; sector_t sector_old, sector_new; diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c index 6e481908c59f..9f91853b189b 100644 --- a/drivers/staging/exfat/exfat_super.c +++ b/drivers/staging/exfat/exfat_super.c @@ -1262,8 +1262,8 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid, fs_set_vol_flags(sb, VOL_DIRTY); if (olddir.dir == newdir.dir) - ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name, - fid); + ret = exfat_rename_file(new_parent_inode, &olddir, dentry, + &uni_name, fid); else ret = move_file(new_parent_inode, &olddir, dentry, &newdir, &uni_name, fid); diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c index e763205e9e4f..f61e373c75e9 100644 --- a/drivers/staging/fbtft/fb_uc1611.c +++ b/drivers/staging/fbtft/fb_uc1611.c @@ -63,11 +63,17 @@ static int init_display(struct fbtft_par *par) { int ret; - /* Set CS active high */ - par->spi->mode |= SPI_CS_HIGH; + /* + * Set CS active inverse polarity: just setting SPI_CS_HIGH does not + * work with GPIO based chip selects that are logically active high + * but inverted inside the GPIO library, so enforce inverted + * semantics. + */ + par->spi->mode ^= SPI_CS_HIGH; ret = spi_setup(par->spi); if (ret) { - dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); + dev_err(par->info->device, + "Could not set inverse CS polarity\n"); return ret; } diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c index 27cc8eabcbe9..76b25df376b8 100644 --- a/drivers/staging/fbtft/fb_watterott.c +++ b/drivers/staging/fbtft/fb_watterott.c @@ -150,10 +150,17 @@ static int init_display(struct fbtft_par *par) /* enable SPI interface by having CS and MOSI low during reset */ save_mode = par->spi->mode; - par->spi->mode |= SPI_CS_HIGH; - ret = spi_setup(par->spi); /* set CS inactive low */ + /* + * Set CS active inverse polarity: just setting SPI_CS_HIGH does not + * work with GPIO based chip selects that are logically active high + * but inverted inside the GPIO library, so enforce inverted + * semantics. + */ + par->spi->mode ^= SPI_CS_HIGH; + ret = spi_setup(par->spi); if (ret) { - dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); + dev_err(par->info->device, + "Could not set inverse CS polarity\n"); return ret; } write_reg(par, 0x00); /* make sure mode is set */ diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index ffb84987dd86..d3e098b41b1a 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -913,7 +913,7 @@ static int fbtft_init_display_from_property(struct fbtft_par *par) if (count == 0) return -EINVAL; - values = kmalloc_array(count, sizeof(*values), GFP_KERNEL); + values = kmalloc_array(count + 1, sizeof(*values), GFP_KERNEL); if (!values) return -ENOMEM; @@ -926,9 +926,9 @@ static int fbtft_init_display_from_property(struct fbtft_par *par) gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ index = -1; - while (index < count) { - val = values[++index]; + val = values[++index]; + while (index < count) { if (val & FBTFT_OF_INIT_CMD) { val &= 0xFFFF; i = 0; diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c index cd8be80d2076..be6b50f454b4 100644 --- a/drivers/staging/gasket/gasket_core.c +++ b/drivers/staging/gasket/gasket_core.c @@ -303,7 +303,7 @@ static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num) } gasket_dev->bar_data[bar_num].virt_base = - ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base, + ioremap(gasket_dev->bar_data[bar_num].phys_base, gasket_dev->bar_data[bar_num].length_bytes); if (!gasket_dev->bar_data[bar_num].virt_base) { dev_err(gasket_dev->dev, diff --git a/drivers/staging/hp/Kconfig b/drivers/staging/hp/Kconfig index fb395cfe6b92..f20ab21a6b2a 100644 --- a/drivers/staging/hp/Kconfig +++ b/drivers/staging/hp/Kconfig @@ -6,6 +6,7 @@ config NET_VENDOR_HP bool "HP devices" default y + depends on ETHERNET depends on ISA || EISA || PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c index 1b9b43659bdf..a20c0bfa68f3 100644 --- a/drivers/staging/isdn/gigaset/usb-gigaset.c +++ b/drivers/staging/isdn/gigaset/usb-gigaset.c @@ -571,8 +571,7 @@ static int gigaset_initcshw(struct cardstate *cs) { struct usb_cardstate *ucs; - cs->hw.usb = ucs = - kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); + cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL); if (!ucs) { pr_err("out of memory\n"); return -ENOMEM; @@ -584,9 +583,6 @@ static int gigaset_initcshw(struct cardstate *cs) ucs->bchars[3] = 0; ucs->bchars[4] = 0x11; ucs->bchars[5] = 0x13; - ucs->bulk_out_buffer = NULL; - ucs->bulk_out_urb = NULL; - ucs->read_urb = NULL; tasklet_init(&cs->write_tasklet, gigaset_modem_fill, (unsigned long) cs); @@ -685,6 +681,11 @@ static int gigaset_probe(struct usb_interface *interface, return -ENODEV; } + if (hostif->desc.bNumEndpoints < 2) { + dev_err(&interface->dev, "missing endpoints\n"); + return -ENODEV; + } + dev_info(&udev->dev, "%s: Device matched ... !\n", __func__); /* allocate memory for our device state and initialize it */ @@ -704,6 +705,12 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[0].desc; + if (!usb_endpoint_is_bulk_out(endpoint)) { + dev_err(&interface->dev, "missing bulk-out endpoint\n"); + retval = -ENODEV; + goto error; + } + buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->bulk_out_size = buffer_size; ucs->bulk_out_epnum = usb_endpoint_num(endpoint); @@ -723,6 +730,12 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[1].desc; + if (!usb_endpoint_is_int_in(endpoint)) { + dev_err(&interface->dev, "missing int-in endpoint\n"); + retval = -ENODEV; + goto error; + } + ucs->busy = 0; ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c index 0a23727d0dc3..93cf28febdf6 100644 --- a/drivers/staging/kpc2000/kpc2000/core.c +++ b/drivers/staging/kpc2000/kpc2000/core.c @@ -338,7 +338,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, reg_bar_phys_addr = pci_resource_start(pcard->pdev, REG_BAR); reg_bar_phys_len = pci_resource_len(pcard->pdev, REG_BAR); - pcard->regs_bar_base = ioremap_nocache(reg_bar_phys_addr, PAGE_SIZE); + pcard->regs_bar_base = ioremap(reg_bar_phys_addr, PAGE_SIZE); if (!pcard->regs_bar_base) { dev_err(&pcard->pdev->dev, "probe: REG_BAR could not remap memory to virtual space\n"); @@ -367,7 +367,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev, dma_bar_phys_addr = pci_resource_start(pcard->pdev, DMA_BAR); dma_bar_phys_len = pci_resource_len(pcard->pdev, DMA_BAR); - pcard->dma_bar_base = ioremap_nocache(dma_bar_phys_addr, + pcard->dma_bar_base = ioremap(dma_bar_phys_addr, dma_bar_phys_len); if (!pcard->dma_bar_base) { dev_err(&pcard->pdev->dev, diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c index 5460bf973c9c..592099a1fca5 100644 --- a/drivers/staging/kpc2000/kpc2000_i2c.c +++ b/drivers/staging/kpc2000/kpc2000_i2c.c @@ -659,7 +659,7 @@ static int pi2c_probe(struct platform_device *pldev) if (!res) return -ENXIO; - priv->smba = (unsigned long)devm_ioremap_nocache(&pldev->dev, + priv->smba = (unsigned long)devm_ioremap(&pldev->dev, res->start, resource_size(res)); if (!priv->smba) diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c index 8becf972af9c..1c360daa703d 100644 --- a/drivers/staging/kpc2000/kpc2000_spi.c +++ b/drivers/staging/kpc2000/kpc2000_spi.c @@ -464,7 +464,7 @@ kp_spi_probe(struct platform_device *pldev) goto free_master; } - kpspi->base = devm_ioremap_nocache(&pldev->dev, r->start, + kpspi->base = devm_ioremap(&pldev->dev, r->start, resource_size(r)); status = spi_register_master(master); diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c index a05ae6d40db9..ec79a8500caf 100644 --- a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c +++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c @@ -122,7 +122,7 @@ int kpc_dma_probe(struct platform_device *pldev) rv = -ENXIO; goto err_kfree; } - ldev->eng_regs = ioremap_nocache(r->start, resource_size(r)); + ldev->eng_regs = ioremap(r->start, resource_size(r)); if (!ldev->eng_regs) { dev_err(&ldev->pldev->dev, "%s: failed to ioremap engine regs!\n", __func__); rv = -ENXIO; diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c index 6f0cd0784786..3be41698df4c 100644 --- a/drivers/staging/media/allegro-dvt/allegro-core.c +++ b/drivers/staging/media/allegro-dvt/allegro-core.c @@ -2914,7 +2914,7 @@ static int allegro_probe(struct platform_device *pdev) "regs resource missing from device tree\n"); return -EINVAL; } - regs = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); + regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(regs)) { dev_err(&pdev->dev, "failed to map registers\n"); return PTR_ERR(regs); @@ -2932,7 +2932,7 @@ static int allegro_probe(struct platform_device *pdev) "sram resource missing from device tree\n"); return -EINVAL; } - sram_regs = devm_ioremap_nocache(&pdev->dev, + sram_regs = devm_ioremap(&pdev->dev, sram_res->start, resource_size(sram_res)); if (IS_ERR(sram_regs)) { diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h index 08eaa0bad0de..1c9c3ba4d518 100644 --- a/drivers/staging/media/ipu3/include/intel-ipu3.h +++ b/drivers/staging/media/ipu3/include/intel-ipu3.h @@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s { __u16 reserved1; __u32 bayer_sign; __u8 bayer_nf; - __u8 reserved2[3]; + __u8 reserved2[7]; } __attribute__((aligned(32))) __packed; /** diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 5319909eb2f6..e7f4ddcc1361 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig @@ -3,6 +3,7 @@ config OCTEON_ETHERNET tristate "Cavium Networks Octeon Ethernet support" depends on CAVIUM_OCTEON_SOC || COMPILE_TEST depends on NETDEVICES + depends on BROKEN select PHYLIB select MDIO_OCTEON help diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c index a6886cc5654c..56d116d79e56 100644 --- a/drivers/staging/qlge/qlge_ethtool.c +++ b/drivers/staging/qlge/qlge_ethtool.c @@ -41,7 +41,7 @@ struct ql_stats { int stat_offset; }; -#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m) +#define QL_SIZEOF(m) sizeof_field(struct ql_adapter, m) #define QL_OFF(m) offsetof(struct ql_adapter, m) static const struct ql_stats ql_gstrings_stats[] = { diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 6ad4515311f7..d890d38a1d29 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -4455,7 +4455,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, pdev->needs_freset = 1; pci_save_state(pdev); qdev->reg_base = - ioremap_nocache(pci_resource_start(pdev, 1), + ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!qdev->reg_base) { dev_err(&pdev->dev, "Register mapping failed.\n"); @@ -4465,7 +4465,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, qdev->doorbell_area_size = pci_resource_len(pdev, 3); qdev->doorbell_area = - ioremap_nocache(pci_resource_start(pdev, 3), + ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3)); if (!qdev->doorbell_area) { dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 4fac9dca798e..b5d42f411dd8 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -37,6 +37,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ + {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ @@ -70,7 +71,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf) phost_conf = pusbd->actconfig; pconf_desc = &phost_conf->desc; - phost_iface = &usb_intf->altsetting[0]; + phost_iface = usb_intf->cur_altsetting; piface_desc = &phost_iface->desc; pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index dace81a7d1ba..e895473fcfd7 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2463,7 +2463,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev, } - ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len); + ioaddr = (unsigned long)ioremap(pmem_start, pmem_len); if (ioaddr == (unsigned long)NULL) { netdev_err(dev, "ioremap failed!"); goto err_rel_mem; diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index ba1288297ee4..a87562f632a7 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -247,7 +247,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter) pdvobjpriv->padapter = padapter; padapter->eeprom_address_size = 6; - phost_iface = &pintf->altsetting[0]; + phost_iface = pintf->cur_altsetting; piface_desc = &phost_iface->desc; pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints; if (pusbd->speed == USB_SPEED_HIGH) { diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index cb95ad6fa4f9..fbb42e5258fd 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -858,7 +858,7 @@ static int rtsx_probe(struct pci_dev *pci, dev_info(&pci->dev, "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci, 0)); dev->addr = pci_resource_start(pci, 0); - dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0)); + dev->remap_addr = ioremap(dev->addr, pci_resource_len(pci, 0)); if (!dev->remap_addr) { dev_err(&pci->dev, "ioremap error\n"); err = -ENXIO; diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c index ea1d3d4efbc2..b8d60701f898 100644 --- a/drivers/staging/sm750fb/sm750_hw.c +++ b/drivers/staging/sm750fb/sm750_hw.c @@ -50,7 +50,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev) } /* now map mmio and vidmem */ - sm750_dev->pvReg = ioremap_nocache(sm750_dev->vidreg_start, + sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start, sm750_dev->vidreg_size); if (!sm750_dev->pvReg) { pr_err("mmio failed\n"); diff --git a/drivers/staging/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c index 34020ed351ab..a5ab255d7d36 100644 --- a/drivers/staging/uwb/whc-rc.c +++ b/drivers/staging/uwb/whc-rc.c @@ -216,11 +216,11 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc) goto error_request_region; } - whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len); + whcrc->rc_base = ioremap(whcrc->area, whcrc->rc_len); if (whcrc->rc_base == NULL) { dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n", whcrc->rc_len, whcrc->area, result); - goto error_ioremap_nocache; + goto error_ioremap; } result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED, @@ -254,7 +254,7 @@ error_cmd_buffer: free_irq(umc_dev->irq, whcrc); error_request_irq: iounmap(whcrc->rc_base); -error_ioremap_nocache: +error_ioremap: release_mem_region(whcrc->area, whcrc->rc_len); error_request_region: return result; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 02148a24818a..4458c1e60fa3 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -3309,7 +3309,7 @@ static int __init vchiq_driver_init(void) return 0; region_unregister: - platform_driver_unregister(&vchiq_driver); + unregister_chrdev_region(vchiq_devid, 1); class_destroy: class_destroy(vchiq_class); diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c index 8d19ae71e7cc..4e651b698617 100644 --- a/drivers/staging/vt6656/baseband.c +++ b/drivers/staging/vt6656/baseband.c @@ -449,8 +449,8 @@ int vnt_vt3184_init(struct vnt_private *priv) memcpy(array, addr, length); - ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0, - MESSAGE_REQUEST_BBREG, length, array); + ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE, + MESSAGE_REQUEST_BBREG, length, array); if (ret) goto end; diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c index 56cd77fd9ea0..7958fc165462 100644 --- a/drivers/staging/vt6656/card.c +++ b/drivers/staging/vt6656/card.c @@ -719,7 +719,7 @@ end: */ int vnt_radio_power_on(struct vnt_private *priv) { - int ret = true; + int ret = 0; vnt_exit_deep_sleep(priv); diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h index 6074ceda78bf..50e1c8918040 100644 --- a/drivers/staging/vt6656/device.h +++ b/drivers/staging/vt6656/device.h @@ -259,6 +259,7 @@ struct vnt_private { u8 mac_hw; /* netdev */ struct usb_device *usb; + struct usb_interface *intf; u64 tsf_time; u8 rx_rate; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 4ac85ecb0921..9cb924c54571 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -949,7 +949,7 @@ static const struct ieee80211_ops vnt_mac_ops = { int vnt_init(struct vnt_private *priv) { - if (!(vnt_init_registers(priv))) + if (vnt_init_registers(priv)) return -EAGAIN; SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr); @@ -992,6 +992,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id) priv = hw->priv; priv->hw = hw; priv->usb = udev; + priv->intf = intf; vnt_set_options(priv); diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index d3304df6bd53..d977d4777e4f 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -59,7 +59,9 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: @@ -74,6 +76,23 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data) reg_off, reg, sizeof(u8), &data); } +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 length, u8 *data) +{ + int ret = 0, i; + + for (i = 0; i < length; i += block) { + u16 len = min_t(int, length - i, block); + + ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, + i, reg, len, data + i); + if (ret) + goto end; + } +end: + return ret; +} + int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { @@ -103,7 +122,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, kfree(usb_buffer); - if (ret >= 0 && ret < (int)length) + if (ret == (int)length) + ret = 0; + else ret = -EIO; end_unlock: diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h index 95147ec7b96a..b65d9c01a211 100644 --- a/drivers/staging/vt6656/usbpipe.h +++ b/drivers/staging/vt6656/usbpipe.h @@ -18,6 +18,8 @@ #include "device.h" +#define VNT_REG_BLOCK_SIZE 64 + int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer); int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, @@ -26,6 +28,9 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data); int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data); +int vnt_control_out_blocks(struct vnt_private *priv, + u16 block, u8 reg, u16 len, u8 *data); + int vnt_start_interrupt_urb(struct vnt_private *priv); int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb); int vnt_tx_context(struct vnt_private *priv, diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c index 3eb2f11a5de1..2c5250ca2801 100644 --- a/drivers/staging/vt6656/wcmd.c +++ b/drivers/staging/vt6656/wcmd.c @@ -99,6 +99,7 @@ void vnt_run_command(struct work_struct *work) if (vnt_init(priv)) { /* If fail all ends TODO retry */ dev_err(&priv->usb->dev, "failed to start\n"); + usb_set_intfdata(priv->intf, NULL); ieee80211_free_hw(priv->hw); return; } diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c index b722e9773232..b13d7341f8bb 100644 --- a/drivers/staging/wfx/data_tx.c +++ b/drivers/staging/wfx/data_tx.c @@ -16,7 +16,7 @@ #include "traces.h" #include "hif_tx_mib.h" -#define WFX_INVALID_RATE_ID (0xFF) +#define WFX_INVALID_RATE_ID 15 #define WFX_LINK_ID_NO_ASSOC 15 #define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ)) @@ -184,7 +184,7 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif, */ entry = list_entry(cache->free.prev, struct tx_policy, link); memcpy(entry->rates, wanted.rates, sizeof(entry->rates)); - entry->uploaded = 0; + entry->uploaded = false; entry->usage_count = 0; idx = entry - cache->cache; } @@ -202,6 +202,8 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx) int usage, locked; struct tx_policy_cache *cache = &wvif->tx_policy_cache; + if (idx == WFX_INVALID_RATE_ID) + return; spin_lock_bh(&cache->lock); locked = list_empty(&cache->free); usage = wfx_tx_policy_release(cache, &cache->cache[idx]); @@ -239,7 +241,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif) dst->terminate = 1; dst->count_init = 1; memcpy(&dst->rates, src->rates, sizeof(src->rates)); - src->uploaded = 1; + src->uploaded = true; arg->num_tx_rate_policies++; } } @@ -249,7 +251,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif) return 0; } -static void wfx_tx_policy_upload_work(struct work_struct *work) +void wfx_tx_policy_upload_work(struct work_struct *work) { struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work); @@ -270,7 +272,6 @@ void wfx_tx_policy_init(struct wfx_vif *wvif) spin_lock_init(&cache->lock); INIT_LIST_HEAD(&cache->used); INIT_LIST_HEAD(&cache->free); - INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) list_add(&cache->cache[i].link, &cache->free); @@ -523,9 +524,9 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) { if (rates[i + 1].idx == rates[i].idx && rates[i].idx != -1) { - rates[i].count = - max_t(int, rates[i].count, - rates[i + 1].count); + rates[i].count += rates[i + 1].count; + if (rates[i].count > 15) + rates[i].count = 15; rates[i + 1].idx = -1; rates[i + 1].count = 0; @@ -537,6 +538,17 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) } } } while (!finished); + // Ensure that MCS0 or 1Mbps is present at the end of the retry list + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if (rates[i].idx == 0) + break; + if (rates[i].idx == -1) { + rates[i].idx = 0; + rates[i].count = 8; // == hw->max_rate_tries + rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS; + break; + } + } // All retries use long GI for (i = 1; i < IEEE80211_TX_MAX_RATES; i++) rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; @@ -550,7 +562,8 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif, rate_id = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew); - WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy"); + if (rate_id == WFX_INVALID_RATE_ID) + dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy"); if (tx_policy_renew) { /* FIXME: It's not so optimal to stop TX queues every now and @@ -679,7 +692,7 @@ void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct ieee80211_sta *sta = control ? control->sta : NULL; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info, + size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data); compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room, @@ -735,7 +748,9 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg) rate = &tx_info->status.rates[i]; if (rate->idx < 0) break; - if (tx_count < rate->count && arg->status && arg->ack_failures) + if (tx_count < rate->count && + arg->status == HIF_STATUS_RETRY_EXCEEDED && + arg->ack_failures) dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n", rate->count, tx_count); if (tx_count <= rate->count && tx_count && diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h index 29faa5640516..0fc388db62e0 100644 --- a/drivers/staging/wfx/data_tx.h +++ b/drivers/staging/wfx/data_tx.h @@ -39,9 +39,9 @@ struct wfx_link_entry { struct tx_policy { struct list_head link; + int usage_count; u8 rates[12]; - u8 usage_count; - u8 uploaded; + bool uploaded; }; struct tx_policy_cache { @@ -61,6 +61,7 @@ struct wfx_tx_priv { } __packed; void wfx_tx_policy_init(struct wfx_vif *wvif); +void wfx_tx_policy_upload_work(struct work_struct *work); void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h index bb091e395ff5..9be74881c56c 100644 --- a/drivers/staging/wfx/hif_tx_mib.h +++ b/drivers/staging/wfx/hif_tx_mib.h @@ -147,7 +147,6 @@ static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required) } if (!required) val.unpmf_allowed = 1; - cpu_to_le32s((u32 *) &val); return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_PROTECTED_MGMT_POLICY, &val, sizeof(val)); diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index 986a2ef678b9..3b47b6c21ea1 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -289,7 +289,7 @@ struct wfx_dev *wfx_init_common(struct device *dev, hw->sta_data_size = sizeof(struct wfx_sta_priv); hw->queues = 4; hw->max_rates = 8; - hw->max_rate_tries = 15; + hw->max_rate_tries = 8; hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) + sizeof(struct hif_msg) + sizeof(struct hif_req_tx) diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c index c7ee90888f69..680fed31cefb 100644 --- a/drivers/staging/wfx/queue.c +++ b/drivers/staging/wfx/queue.c @@ -422,6 +422,7 @@ static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb, break; case do_wep: wfx_tx_lock(wvif->wdev); + WARN_ON(wvif->wep_pending_skb); wvif->wep_default_key_id = tx_priv->hw_key->keyidx; wvif->wep_pending_skb = skb; if (!schedule_work(&wvif->wep_key_work)) diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c index 29848a202ab4..471dd15b227f 100644 --- a/drivers/staging/wfx/sta.c +++ b/drivers/staging/wfx/sta.c @@ -592,6 +592,7 @@ static void wfx_do_unjoin(struct wfx_vif *wvif) wfx_tx_flush(wvif->wdev); hif_keep_alive_period(wvif, 0); hif_reset(wvif, false); + wfx_tx_policy_init(wvif); hif_set_output_power(wvif, wvif->wdev->output_power * 10); wvif->dtim_period = 0; hif_set_macaddr(wvif, wvif->vif->addr); @@ -880,8 +881,10 @@ static int wfx_update_beaconing(struct wfx_vif *wvif) if (wvif->state != WFX_STATE_AP || wvif->beacon_int != conf->beacon_int) { wfx_tx_lock_flush(wvif->wdev); - if (wvif->state != WFX_STATE_PASSIVE) + if (wvif->state != WFX_STATE_PASSIVE) { hif_reset(wvif, false); + wfx_tx_policy_init(wvif); + } wvif->state = WFX_STATE_PASSIVE; wfx_start_ap(wvif); wfx_tx_unlock(wvif->wdev); @@ -1567,6 +1570,7 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work); INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work); + INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); mutex_unlock(&wdev->conf_mutex); hif_set_macaddr(wvif, vif->addr); diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig index ac136663fa8e..082c16a31616 100644 --- a/drivers/staging/wlan-ng/Kconfig +++ b/drivers/staging/wlan-ng/Kconfig @@ -4,6 +4,7 @@ config PRISM2_USB depends on WLAN && USB && CFG80211 select WIRELESS_EXT select WEXT_PRIV + select CRC32 help This is the wlan-ng prism 2.5/3 USB driver for a wide range of old USB wireless devices. diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index e877b917c15f..30ea37e1a3f5 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -708,7 +708,7 @@ static int __init cxgbit_init(void) pr_info("%s dcb enabled.\n", DRV_NAME); register_dcbevent_notifier(&cxgbit_dcbevent_nb); #endif - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < + BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < sizeof(union cxgbit_skb_cb)); return 0; } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7251a87bb576..b94ed4e30770 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4149,9 +4149,6 @@ int iscsit_close_connection( iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); - if (conn->conn_transport->iscsit_wait_conn) - conn->conn_transport->iscsit_wait_conn(conn); - /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands @@ -4237,6 +4234,9 @@ int iscsit_close_connection( target_sess_cmd_list_set_waiting(sess->se_sess); target_wait_for_sess_cmds(sess->se_sess); + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + ahash_request_free(conn->conn_tx_hash); if (conn->conn_rx_hash) { struct crypto_ahash *tfm; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 6949ea8bc387..51ffd5c002de 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, } bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); - bip_set_seed(bip, bio->bi_iter.bi_sector); + /* virtual start sector must be in integrity interval units */ + bip_set_seed(bip, bio->bi_iter.bi_sector >> + (bi->interval_exp - SECTOR_SHIFT)); pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, (unsigned long long)bip->bip_iter.bi_sector); diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c index cf3fad2cb871..c5b17dd8f587 100644 --- a/drivers/tc/tc.c +++ b/drivers/tc/tc.c @@ -47,7 +47,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus) for (slot = 0; slot < tbus->num_tcslots; slot++) { slotaddr = tbus->slot_base + slot * slotsize; extslotaddr = tbus->ext_slot_base + slot * extslotsize; - module = ioremap_nocache(slotaddr, slotsize); + module = ioremap(slotaddr, slotsize); BUG_ON(!module); offset = TC_OLDCARD; diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index d1ad512e1708..3ca71e3812ed 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -3,6 +3,7 @@ config OPTEE tristate "OP-TEE" depends on HAVE_ARM_SMCCC + depends on MMU help This implements the OP-TEE Trusted Execution Environment (TEE) driver. diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c index 0332a5301d61..d767eebf30bd 100644 --- a/drivers/tee/optee/shm_pool.c +++ b/drivers/tee/optee/shm_pool.c @@ -28,9 +28,22 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, shm->size = PAGE_SIZE << order; if (shm->flags & TEE_SHM_DMA_BUF) { + unsigned int nr_pages = 1 << order, i; + struct page **pages; + + pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0; i < nr_pages; i++) { + pages[i] = page; + page++; + } + shm->flags |= TEE_SHM_REGISTER; - rc = optee_shm_register(shm->ctx, shm, &page, 1 << order, + rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, (unsigned long)shm->kaddr); + kfree(pages); } return rc; diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 59b79fc48266..79b27865c6f4 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -108,7 +108,7 @@ config THERMAL_DEFAULT_GOV_USER_SPACE config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR bool "power_allocator" - select THERMAL_GOV_POWER_ALLOCATOR + depends on THERMAL_GOV_POWER_ALLOCATOR help Select this if you want to control temperature based on system and device power allocation. This governor can only diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 3517883b5cdb..efae0c02d898 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -369,6 +369,7 @@ static int int3400_thermal_remove(struct platform_device *pdev) } static const struct acpi_device_id int3400_thermal_match[] = { + {"INT1040", 0}, {"INT3400", 0}, {} }; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index a7bbd8584ae2..aeece1e136a5 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -282,6 +282,7 @@ static int int3403_remove(struct platform_device *pdev) } static const struct acpi_device_id int3403_device_ids[] = { + {"INT1043", 0}, {"INT3403", 0}, {"", 0}, }; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 015e7d201598..0e7cf5236932 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -110,6 +110,9 @@ static int tsens_register(struct tsens_priv *priv) irq = platform_get_irq_byname(pdev, "uplow"); if (irq < 0) { ret = irq; + /* For old DTs with no IRQ defined */ + if (irq == -ENXIO) + ret = 0; goto err_put_device; } diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index 4562c8060d09..a6aabfd6e2da 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -3256,7 +3256,7 @@ static int __init cy_detect_isa(void) return nboard; /* probe for CD1400... */ - cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin); + cy_isa_address = ioremap(isa_address, CyISA_Ywin); if (cy_isa_address == NULL) { printk(KERN_ERR "Cyclom-Y/ISA: can't remap base " "address\n"); @@ -3690,13 +3690,13 @@ static int cy_pci_probe(struct pci_dev *pdev, device_id == PCI_DEVICE_ID_CYCLOM_Y_Hi) { card_name = "Cyclom-Y"; - addr0 = ioremap_nocache(pci_resource_start(pdev, 0), + addr0 = ioremap(pci_resource_start(pdev, 0), CyPCI_Yctl); if (addr0 == NULL) { dev_err(&pdev->dev, "can't remap ctl region\n"); goto err_reg; } - addr2 = ioremap_nocache(pci_resource_start(pdev, 2), + addr2 = ioremap(pci_resource_start(pdev, 2), CyPCI_Ywin); if (addr2 == NULL) { dev_err(&pdev->dev, "can't remap base region\n"); @@ -3712,7 +3712,7 @@ static int cy_pci_probe(struct pci_dev *pdev, } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) { struct RUNTIME_9060 __iomem *ctl_addr; - ctl_addr = addr0 = ioremap_nocache(pci_resource_start(pdev, 0), + ctl_addr = addr0 = ioremap(pci_resource_start(pdev, 0), CyPCI_Zctl); if (addr0 == NULL) { dev_err(&pdev->dev, "can't remap ctl region\n"); @@ -3727,7 +3727,7 @@ static int cy_pci_probe(struct pci_dev *pdev, mailbox = readl(&ctl_addr->mail_box_0); - addr2 = ioremap_nocache(pci_resource_start(pdev, 2), + addr2 = ioremap(pci_resource_start(pdev, 2), mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin); if (addr2 == NULL) { dev_err(&pdev->dev, "can't remap base region\n"); diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 4c1cd49ae95b..620d8488b83e 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -898,7 +898,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev) atomic_set(&priv->xmit_total, 0); raw_spin_lock_init(&priv->lock); - priv->reg = devm_ioremap_nocache(priv->dev, dev->res.start, + priv->reg = devm_ioremap(priv->dev, dev->res.start, resource_size(&dev->res)); if (!priv->reg) { dev_err(priv->dev, "ioremap failed for resource %pR\n", diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 3a1a5e0ee93f..9f13f7d49dd7 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -961,7 +961,7 @@ static int moxa_pci_probe(struct pci_dev *pdev, goto err; } - board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000); + board->basemem = ioremap(pci_resource_start(pdev, 2), 0x4000); if (board->basemem == NULL) { dev_err(&pdev->dev, "can't remap io space 2\n"); retval = -ENOMEM; @@ -1071,7 +1071,7 @@ static int __init moxa_init(void) brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 : numports[i]; brd->busType = MOXA_BUS_TYPE_ISA; - brd->basemem = ioremap_nocache(baseaddr[i], 0x4000); + brd->basemem = ioremap(baseaddr[i], 0x4000); if (!brd->basemem) { printk(KERN_ERR "MOXA: can't remap %lx\n", baseaddr[i]); diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 226adeec2aed..ce5309d00280 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -663,6 +663,12 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl, return AE_OK; } +static const struct acpi_device_id serdev_acpi_devices_blacklist[] = { + { "INT3511", 0 }, + { "INT3512", 0 }, + { }, +}; + static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -675,6 +681,10 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level, if (acpi_device_enumerated(adev)) return AE_OK; + /* Skip if black listed */ + if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist)) + return AE_OK; + if (acpi_serdev_check_resources(ctrl, adev)) return AE_OK; diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 0809ae2aa9b1..673cda3d011d 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -55,7 +55,7 @@ static int __init serial_init_chip(struct parisc_device *dev) uart.port.uartclk = (dev->id.sversion != 0xad) ? 7272727 : 1843200; uart.port.mapbase = address; - uart.port.membase = ioremap_nocache(address, 16); + uart.port.membase = ioremap(address, 16); if (!uart.port.membase) { dev_warn(&dev->dev, "Failed to map memory\n"); return -ENOMEM; diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 836e736ae188..e603c66d6cc4 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1147,7 +1147,7 @@ static int omap8250_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - membase = devm_ioremap_nocache(&pdev->dev, regs->start, + membase = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!membase) return -ENODEV; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 022924d5ad54..939685fed396 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -275,7 +275,7 @@ static int pci_plx9050_init(struct pci_dev *dev) /* * enable/disable interrupts */ - p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); + p = ioremap(pci_resource_start(dev, 0), 0x80); if (p == NULL) return -ENOMEM; writel(irq_config, p + 0x4c); @@ -299,7 +299,7 @@ static void pci_plx9050_exit(struct pci_dev *dev) /* * disable interrupts */ - p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); + p = ioremap(pci_resource_start(dev, 0), 0x80); if (p != NULL) { writel(0, p + 0x4c); @@ -475,7 +475,7 @@ static int pci_siig10x_init(struct pci_dev *dev) break; } - p = ioremap_nocache(pci_resource_start(dev, 0), 0x80); + p = ioremap(pci_resource_start(dev, 0), 0x80); if (p == NULL) return -ENOMEM; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 90655910b0c7..9ff5dfad590a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2766,7 +2766,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) } if (port->flags & UPF_IOREMAP) { - port->membase = ioremap_nocache(port->mapbase, size); + port->membase = ioremap(port->mapbase, size); if (!port->membase) { release_mem_region(port->mapbase, size); ret = -ENOMEM; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index a8dc8af83f39..1ba9bc667e13 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2270,27 +2270,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, mode |= ATMEL_US_USMODE_NORMAL; } - /* set the mode, clock divisor, parity, stop bits and data size */ - atmel_uart_writel(port, ATMEL_US_MR, mode); - - /* - * when switching the mode, set the RTS line state according to the - * new mode, otherwise keep the former state - */ - if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { - unsigned int rts_state; - - if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { - /* let the hardware control the RTS line */ - rts_state = ATMEL_US_RTSDIS; - } else { - /* force RTS line to low level */ - rts_state = ATMEL_US_RTSEN; - } - - atmel_uart_writel(port, ATMEL_US_CR, rts_state); - } - /* * Set the baud rate: * Fractional baudrate allows to setup output frequency more @@ -2317,6 +2296,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) atmel_uart_writel(port, ATMEL_US_BRGR, quot); + + /* set the mode, clock divisor, parity, stop bits and data size */ + atmel_uart_writel(port, ATMEL_US_MR, mode); + + /* + * when switching the mode, set the RTS line state according to the + * new mode, otherwise keep the former state + */ + if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { + unsigned int rts_state; + + if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { + /* let the hardware control the RTS line */ + rts_state = ATMEL_US_RTSDIS; + } else { + /* force RTS line to low level */ + rts_state = ATMEL_US_RTSEN; + } + + atmel_uart_writel(port, ATMEL_US_CR, rts_state); + } + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); atmel_port->tx_stopped = false; diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c index 7b57e840e255..730da413d8ed 100644 --- a/drivers/tty/serial/dz.c +++ b/drivers/tty/serial/dz.c @@ -677,7 +677,7 @@ static void dz_release_port(struct uart_port *uport) static int dz_map_port(struct uart_port *uport) { if (!uport->membase) - uport->membase = ioremap_nocache(uport->mapbase, + uport->membase = ioremap(uport->mapbase, dec_kn_slot_size); if (!uport->membase) { printk(KERN_ERR "dz: Cannot map MMIO\n"); diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index fcbea43dc334..f67226df30d4 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c @@ -549,7 +549,7 @@ lqasc_request_port(struct uart_port *port) } if (port->flags & UPF_IOREMAP) { - port->membase = devm_ioremap_nocache(&pdev->dev, + port->membase = devm_ioremap(&pdev->dev, port->mapbase, size); if (port->membase == NULL) return -ENOMEM; diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index fbc5bc022a39..164b18372c02 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -411,7 +411,7 @@ static int meson_uart_request_port(struct uart_port *port) return -EBUSY; } - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, port->mapsize); if (!port->membase) return -ENOMEM; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 1cbae0768b1f..f6c45a796433 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1580,6 +1580,7 @@ static void __msm_console_write(struct uart_port *port, const char *s, int num_newlines = 0; bool replaced = false; void __iomem *tf; + int locked = 1; if (is_uartdm) tf = port->membase + UARTDM_TF; @@ -1592,7 +1593,13 @@ static void __msm_console_write(struct uart_port *port, const char *s, num_newlines++; count += num_newlines; - spin_lock(&port->lock); + if (port->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&port->lock); + else + spin_lock(&port->lock); + if (is_uartdm) msm_reset_dm_count(port, count); @@ -1628,7 +1635,9 @@ static void __msm_console_write(struct uart_port *port, const char *s, iowrite32_rep(tf, buf, 1); i += num_chars; } - spin_unlock(&port->lock); + + if (locked) + spin_unlock(&port->lock); } static void msm_console_write(struct console *co, const char *s, diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c index 00ce31e8d19a..fc58a004bef4 100644 --- a/drivers/tty/serial/mux.c +++ b/drivers/tty/serial/mux.c @@ -474,7 +474,7 @@ static int __init mux_probe(struct parisc_device *dev) port->iobase = 0; port->mapbase = dev->hpa.start + MUX_OFFSET + (i * MUX_LINE_OFFSET); - port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET); + port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET); port->iotype = UPIO_MEM; port->type = PORT_MUX; port->irq = 0; diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index d2d8b3494685..42c8cc93b603 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -427,7 +427,7 @@ static int owl_uart_request_port(struct uart_port *port) return -EBUSY; if (port->flags & UPF_IOREMAP) { - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, resource_size(res)); if (!port->membase) return -EBUSY; diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c index 0bdf1687983f..484b7e8d5381 100644 --- a/drivers/tty/serial/pic32_uart.c +++ b/drivers/tty/serial/pic32_uart.c @@ -618,7 +618,7 @@ static int pic32_uart_request_port(struct uart_port *port) "pic32_uart_mem")) return -EBUSY; - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, resource_size(res_mem)); if (!port->membase) { dev_err(port->dev, "Unable to map registers\n"); diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index ff9a27d48bca..b5ef86ae2746 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -498,7 +498,7 @@ static int rda_uart_request_port(struct uart_port *port) return -EBUSY; if (port->flags & UPF_IOREMAP) { - port->membase = devm_ioremap_nocache(port->dev, port->mapbase, + port->membase = devm_ioremap(port->dev, port->mapbase, resource_size(res)); if (!port->membase) return -EBUSY; diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c index 329aced26bd8..7c99340a3d66 100644 --- a/drivers/tty/serial/sb1250-duart.c +++ b/drivers/tty/serial/sb1250-duart.c @@ -668,7 +668,7 @@ static int sbd_map_port(struct uart_port *uport) struct sbd_duart *duart = sport->duart; if (!uport->membase) - uport->membase = ioremap_nocache(uport->mapbase, + uport->membase = ioremap(uport->mapbase, DUART_CHANREG_SPACING); if (!uport->membase) { printk(err); @@ -676,7 +676,7 @@ static int sbd_map_port(struct uart_port *uport) } if (!sport->memctrl) - sport->memctrl = ioremap_nocache(duart->mapctrl, + sport->memctrl = ioremap(duart->mapctrl, DUART_CHANREG_SPACING); if (!sport->memctrl) { printk(err); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index b0a6eb106edb..7c2782785736 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2834,6 +2834,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) if (uport->cons && uport->dev) of_console_check(uport->dev->of_node, uport->cons->name, uport->line); + tty_port_link_device(port, drv->tty_driver, uport->line); uart_configure_port(drv, state, uport); port->console = uart_console(uport); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 58bf9d496ba5..87ca6294de0e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2680,7 +2680,7 @@ static int sci_remap_port(struct uart_port *port) return 0; if (port->dev->of_node || (port->flags & UPF_IOREMAP)) { - port->membase = ioremap_nocache(port->mapbase, sport->reg_size); + port->membase = ioremap(port->mapbase, sport->reg_size); if (unlikely(!port->membase)) { dev_err(port->dev, "can't remap port#%d\n", port->line); return -ENXIO; diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 31df23502562..f60a59d9bf27 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -679,6 +679,9 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id) if (ims & SPRD_IMSR_TIMEOUT) serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT); + if (ims & SPRD_IMSR_BREAK_DETECT) + serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT); + if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT | SPRD_IMSR_TIMEOUT)) sprd_rx(port); diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c index b03d3e458ea2..89154ac4c577 100644 --- a/drivers/tty/serial/zs.c +++ b/drivers/tty/serial/zs.c @@ -992,7 +992,7 @@ static void zs_release_port(struct uart_port *uport) static int zs_map_port(struct uart_port *uport) { if (!uport->membase) - uport->membase = ioremap_nocache(uport->mapbase, + uport->membase = ioremap(uport->mapbase, ZS_CHAN_IO_SIZE); if (!uport->membase) { printk(KERN_ERR "zs: Cannot map MMIO\n"); diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c index 84f26e43b229..0ca13f889d84 100644 --- a/drivers/tty/synclink.c +++ b/drivers/tty/synclink.c @@ -4054,7 +4054,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info) } info->lcr_mem_requested = true; - info->memory_base = ioremap_nocache(info->phys_memory_base, + info->memory_base = ioremap(info->phys_memory_base, 0x40000); if (!info->memory_base) { printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n", @@ -4068,7 +4068,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info) goto errout; } - info->lcr_base = ioremap_nocache(info->phys_lcr_base, + info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE); if (!info->lcr_base) { printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n", diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index e8a9047de451..e506fc489d48 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -3450,7 +3450,7 @@ static int claim_resources(struct slgt_info *info) else info->reg_addr_requested = true; - info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE); + info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE); if (!info->reg_addr) { DBGERR(("%s can't map device registers, addr=%08X\n", info->device_name, info->phys_reg_addr)); diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index fcb91bf7a15b..b9d974474b64 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c @@ -3559,7 +3559,7 @@ static int claim_resources(SLMP_INFO *info) else info->sca_statctrl_requested = true; - info->memory_base = ioremap_nocache(info->phys_memory_base, + info->memory_base = ioremap(info->phys_memory_base, SCA_MEM_SIZE); if (!info->memory_base) { printk( "%s(%d):%s Can't map shared memory, MemAddr=%08X\n", @@ -3568,7 +3568,7 @@ static int claim_resources(SLMP_INFO *info) goto errout; } - info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE); + info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE); if (!info->lcr_base) { printk( "%s(%d):%s Can't map LCR memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); @@ -3577,7 +3577,7 @@ static int claim_resources(SLMP_INFO *info) } info->lcr_base += info->lcr_offset; - info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE); + info->sca_base = ioremap(info->phys_sca_base, PAGE_SIZE); if (!info->sca_base) { printk( "%s(%d):%s Can't map SCA memory, MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_sca_base ); @@ -3586,7 +3586,7 @@ static int claim_resources(SLMP_INFO *info) } info->sca_base += info->sca_offset; - info->statctrl_base = ioremap_nocache(info->phys_statctrl_base, + info->statctrl_base = ioremap(info->phys_statctrl_base, PAGE_SIZE); if (!info->statctrl_base) { printk( "%s(%d):%s Can't map SCA Status/Control memory, MemAddr=%08X\n", diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 8b0ea8c70d73..635cf0466b59 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -2124,10 +2124,11 @@ resubmit: /* * Start the modem : init the data and start kernel thread */ -static int uea_boot(struct uea_softc *sc) +static int uea_boot(struct uea_softc *sc, struct usb_interface *intf) { - int ret, size; struct intr_pkt *intr; + int ret = -ENOMEM; + int size; uea_enters(INS_TO_USBDEV(sc)); @@ -2152,6 +2153,11 @@ static int uea_boot(struct uea_softc *sc) if (UEA_CHIP_VERSION(sc) == ADI930) load_XILINX_firmware(sc); + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { + ret = -ENODEV; + goto err0; + } + intr = kmalloc(size, GFP_KERNEL); if (!intr) goto err0; @@ -2163,8 +2169,7 @@ static int uea_boot(struct uea_softc *sc) usb_fill_int_urb(sc->urb_int, sc->usb_dev, usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE), intr, size, uea_intr, sc, - sc->usb_dev->actconfig->interface[0]->altsetting[0]. - endpoint[0].desc.bInterval); + intf->cur_altsetting->endpoint[0].desc.bInterval); ret = usb_submit_urb(sc->urb_int, GFP_KERNEL); if (ret < 0) { @@ -2179,6 +2184,7 @@ static int uea_boot(struct uea_softc *sc) sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm"); if (IS_ERR(sc->kthread)) { uea_err(INS_TO_USBDEV(sc), "failed to create thread\n"); + ret = PTR_ERR(sc->kthread); goto err2; } @@ -2193,7 +2199,7 @@ err1: kfree(intr); err0: uea_leaves(INS_TO_USBDEV(sc)); - return -ENOMEM; + return ret; } /* @@ -2548,7 +2554,7 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf, } } - ret = uea_boot(sc); + ret = uea_boot(sc, intf); if (ret < 0) goto error; diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index dbea28495e1d..4e12a32ca392 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -1275,7 +1275,7 @@ EXPORT_SYMBOL_GPL(usbatm_usb_disconnect); static int __init usbatm_usb_init(void) { - if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) { + if (sizeof(struct usbatm_control) > sizeof_field(struct sk_buff, cb)) { printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name); return -EIO; } diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 4c1e75509303..02f6ca2cb1ba 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1375,13 +1375,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, */ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; u32 reg; - priv_dev = cdns->gadget_dev; - /* check USB device interrupt */ reg = readl(&priv_dev->regs->usb_ists); if (reg) { @@ -1419,14 +1416,12 @@ static irqreturn_t cdns3_device_irq_handler(int irq, void *data) */ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) { - struct cdns3_device *priv_dev; - struct cdns3 *cdns = data; + struct cdns3_device *priv_dev = data; irqreturn_t ret = IRQ_NONE; unsigned long flags; int bit; u32 reg; - priv_dev = cdns->gadget_dev; spin_lock_irqsave(&priv_dev->lock, flags); reg = readl(&priv_dev->regs->usb_ists); @@ -2539,7 +2534,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns) priv_dev = cdns->gadget_dev; - devm_free_irq(cdns->dev, cdns->dev_irq, cdns); + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); pm_runtime_mark_last_busy(cdns->dev); pm_runtime_put_autosuspend(cdns->dev); @@ -2710,7 +2705,8 @@ static int __cdns3_gadget_init(struct cdns3 *cdns) ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, cdns3_device_irq_handler, cdns3_device_thread_irq_handler, - IRQF_SHARED, dev_name(cdns->dev), cdns); + IRQF_SHARED, dev_name(cdns->dev), + cdns->gadget_dev); if (ret) goto err0; diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index b45ceb91c735..48e4a5ca1835 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -26,6 +26,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd); struct ehci_ci_priv { struct regulator *reg_vbus; + bool enabled; }; static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) @@ -37,7 +38,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) int ret = 0; int port = HCS_N_PORTS(ehci->hcs_params); - if (priv->reg_vbus) { + if (priv->reg_vbus && enable != priv->enabled) { if (port > 1) { dev_warn(dev, "Not support multi-port regulator control\n"); @@ -53,6 +54,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable) enable ? "enable" : "disable", ret); return ret; } + priv->enabled = enable; } if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) { diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 87338f9eb5be..ed204cbb63ea 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -156,7 +156,8 @@ static int usb_conn_probe(struct platform_device *pdev) info->vbus = devm_regulator_get(dev, "vbus"); if (IS_ERR(info->vbus)) { - dev_err(dev, "failed to get vbus\n"); + if (PTR_ERR(info->vbus) != -EPROBE_DEFER) + dev_err(dev, "failed to get vbus\n"); return PTR_ERR(info->vbus); } diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 5f40117e68e7..26bc05e48d8a 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_INT] = 1024, }; -static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, - int asnum, struct usb_host_interface *ifp, int num_ep, - unsigned char *buffer, int size) +static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1, + struct usb_endpoint_descriptor *e2) +{ + if (e1->bEndpointAddress == e2->bEndpointAddress) + return true; + + if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) { + if (usb_endpoint_num(e1) == usb_endpoint_num(e2)) + return true; + } + + return false; +} + +/* + * Check for duplicate endpoint addresses in other interfaces and in the + * altsetting currently being parsed. + */ +static bool config_endpoint_is_duplicate(struct usb_host_config *config, + int inum, int asnum, struct usb_endpoint_descriptor *d) +{ + struct usb_endpoint_descriptor *epd; + struct usb_interface_cache *intfc; + struct usb_host_interface *alt; + int i, j, k; + + for (i = 0; i < config->desc.bNumInterfaces; ++i) { + intfc = config->intf_cache[i]; + + for (j = 0; j < intfc->num_altsetting; ++j) { + alt = &intfc->altsetting[j]; + + if (alt->desc.bInterfaceNumber == inum && + alt->desc.bAlternateSetting != asnum) + continue; + + for (k = 0; k < alt->desc.bNumEndpoints; ++k) { + epd = &alt->endpoint[k].desc; + + if (endpoint_is_duplicate(epd, d)) + return true; + } + } + } + + return false; +} + +static int usb_parse_endpoint(struct device *ddev, int cfgno, + struct usb_host_config *config, int inum, int asnum, + struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) { unsigned char *buffer0 = buffer; struct usb_endpoint_descriptor *d; @@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, goto skip_to_next_endpoint_or_interface_descriptor; /* Check for duplicate endpoint addresses */ - for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { - if (ifp->endpoint[i].desc.bEndpointAddress == - d->bEndpointAddress) { - dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", - cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; - } + if (config_endpoint_is_duplicate(config, inum, asnum, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; } endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; @@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } - /* Validate the wMaxPacketSize field */ + /* + * Validate the wMaxPacketSize field. + * Some devices have isochronous endpoints in altsetting 0; + * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 + * (see the end of section 5.6.3), so don't warn about them. + */ maxp = usb_endpoint_maxp(&endpoint->desc); - if (maxp == 0) { - dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n", + if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); - goto skip_to_next_endpoint_or_interface_descriptor; } /* Find the highest legal maxpacket size for this endpoint */ @@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno, if (((struct usb_descriptor_header *) buffer)->bDescriptorType == USB_DT_INTERFACE) break; - retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt, - num_ep, buffer, size); + retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum, + alt, num_ep, buffer, size); if (retval < 0) return retval; ++n; diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 9ae2a7a93df2..f0a259937da8 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -222,7 +222,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) retval = -EBUSY; goto put_hcd; } - hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start, + hcd->regs = devm_ioremap(&dev->dev, hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&dev->dev, "error mapping memory\n"); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 281568d464f9..aa45840d8273 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1409,7 +1409,17 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, if (usb_endpoint_xfer_control(&urb->ep->desc)) { if (hcd->self.uses_pio_for_control) return ret; - if (hcd_uses_dma(hcd)) { + if (hcd->localmem_pool) { + ret = hcd_alloc_coherent( + urb->dev->bus, mem_flags, + &urb->setup_dma, + (void **)&urb->setup_packet, + sizeof(struct usb_ctrlrequest), + DMA_TO_DEVICE); + if (ret) + return ret; + urb->transfer_flags |= URB_SETUP_MAP_LOCAL; + } else if (hcd_uses_dma(hcd)) { if (object_is_on_stack(urb->setup_packet)) { WARN_ONCE(1, "setup packet is on stack\n"); return -EAGAIN; @@ -1424,23 +1434,22 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, urb->setup_dma)) return -EAGAIN; urb->transfer_flags |= URB_SETUP_MAP_SINGLE; - } else if (hcd->localmem_pool) { - ret = hcd_alloc_coherent( - urb->dev->bus, mem_flags, - &urb->setup_dma, - (void **)&urb->setup_packet, - sizeof(struct usb_ctrlrequest), - DMA_TO_DEVICE); - if (ret) - return ret; - urb->transfer_flags |= URB_SETUP_MAP_LOCAL; } } dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; if (urb->transfer_buffer_length != 0 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { - if (hcd_uses_dma(hcd)) { + if (hcd->localmem_pool) { + ret = hcd_alloc_coherent( + urb->dev->bus, mem_flags, + &urb->transfer_dma, + &urb->transfer_buffer, + urb->transfer_buffer_length, + dir); + if (ret == 0) + urb->transfer_flags |= URB_MAP_LOCAL; + } else if (hcd_uses_dma(hcd)) { if (urb->num_sgs) { int n; @@ -1491,15 +1500,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, else urb->transfer_flags |= URB_DMA_MAP_SINGLE; } - } else if (hcd->localmem_pool) { - ret = hcd_alloc_coherent( - urb->dev->bus, mem_flags, - &urb->transfer_dma, - &urb->transfer_buffer, - urb->transfer_buffer_length, - dir); - if (ret == 0) - urb->transfer_flags |= URB_MAP_LOCAL; } if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL))) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f229ad6952c0..3405b146edc9 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1192,6 +1192,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || + (portchange & USB_PORT_STAT_C_CONNECTION) || (portstatus & USB_PORT_STAT_OVERCURRENT) || (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); @@ -2692,7 +2693,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 #define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) -#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme) +#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme)) #define HUB_ROOT_RESET_TIME 60 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 0eab79f82ce4..da923ec17612 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -45,6 +45,7 @@ void usb_init_urb(struct urb *urb) if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); + INIT_LIST_HEAD(&urb->urb_list); INIT_LIST_HEAD(&urb->anchor_list); } } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 023f0357efd7..294276f7deb9 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -29,7 +29,8 @@ #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa #define PCI_DEVICE_ID_INTEL_APL 0x5aaa #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 -#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee +#define PCI_DEVICE_ID_INTEL_CMLLP 0x02ee +#define PCI_DEVICE_ID_INTEL_CMLH 0x06ee #define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e @@ -308,6 +309,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH), (kernel_ulong_t) &dwc3_pci_intel_properties, }, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 3996b9c4ff8d..fd1b100d2927 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -1117,6 +1117,9 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { + struct dwc3_ep *dep = dwc->eps[event->endpoint_number]; + u8 cmd; + switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: dwc3_ep0_xfer_complete(dwc, event); @@ -1129,7 +1132,12 @@ void dwc3_ep0_interrupt(struct dwc3 *dwc, case DWC3_DEPEVT_XFERINPROGRESS: case DWC3_DEPEVT_RXTXFIFOEVT: case DWC3_DEPEVT_STREAMEVT: + break; case DWC3_DEPEVT_EPCMDCMPLT: + cmd = DEPEVT_PARAMETER_CMD(event->parameters); + + if (cmd == DWC3_DEPCMD_ENDTRANSFER) + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; break; } } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a9aba716bf80..154f3f3e8cff 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2467,6 +2467,13 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { + /* + * For OUT direction, host may send less than the setup + * length. Return true for all OUT requests. + */ + if (!req->direction) + return true; + return req->request.actual == req->request.length; } @@ -2491,7 +2498,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, req->request.actual = req->request.length - req->remaining; - if (!dwc3_gadget_ep_request_completed(req) && + if (!dwc3_gadget_ep_request_completed(req) || req->num_pending_sgs) { __dwc3_gadget_kick_transfer(dep); goto out; @@ -2719,6 +2726,9 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, WARN_ON_ONCE(ret); dep->resource_index = 0; + if (!interrupt) + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; + if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) udelay(100); } diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 5567ed2cddbe..fa252870c926 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -88,10 +88,10 @@ int dwc3_host_init(struct dwc3 *dwc) memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); if (dwc->usb3_lpm_capable) - props[prop_idx++].name = "usb3-lpm-capable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable"); if (dwc->usb2_lpm_disable) - props[prop_idx++].name = "usb2-lpm-disable"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable"); /** * WORKAROUND: dwc3 revisions <=3.00a have a limitation @@ -103,7 +103,7 @@ int dwc3_host_init(struct dwc3 *dwc) * This following flag tells XHCI to do just that. */ if (dwc->revision <= DWC3_REVISION_300A) - props[prop_idx++].name = "quirk-broken-port-ped"; + props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped"); if (prop_idx) { ret = platform_device_add_properties(xhci, props); diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index cac991173ac0..971c6b92484a 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -971,7 +971,7 @@ static int __init xdbc_init(void) goto free_and_quit; } - base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length); + base = ioremap(xdbc.xhci_start, xdbc.xhci_length); if (!base) { xdbc_trace("failed to remap the io address\n"); ret = -ENOMEM; diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index 6ce044008cf6..460d5d7c984f 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -621,8 +621,12 @@ static void ecm_disable(struct usb_function *f) DBG(cdev, "ecm deactivated\n"); - if (ecm->port.in_ep->enabled) + if (ecm->port.in_ep->enabled) { gether_disconnect(&ecm->port); + } else { + ecm->port.in_ep->desc = NULL; + ecm->port.out_ep->desc = NULL; + } usb_ep_disable(ecm->notify); ecm->notify->desc = NULL; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index ce1d0235969c..0bbccac94d6c 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3509,7 +3509,7 @@ static void ffs_free_inst(struct usb_function_instance *f) static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) { - if (strlen(name) >= FIELD_SIZEOF(struct ffs_dev, name)) + if (strlen(name) >= sizeof_field(struct ffs_dev, name)) return -ENAMETOOLONG; return ffs_name_dev(to_f_fs_opts(fi)->dev, name); } diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index d48df36622b7..0d8e4a364ca6 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -618,6 +618,7 @@ static void rndis_disable(struct usb_function *f) gether_disconnect(&rndis->port); usb_ep_disable(rndis->notify); + rndis->notify->desc = NULL; } /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index ae70ce29d5e4..797d6ace8994 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -445,6 +445,7 @@ config USB_TEGRA_XUDC tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller" depends on ARCH_TEGRA || COMPILE_TEST depends on PHY_TEGRA_XUSB + select USB_ROLE_SWITCH help Enables NVIDIA Tegra USB 3.0 device mode controller driver. diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index 57b6f66331cf..bfd1c9e80a1f 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -116,7 +116,7 @@ static int udc_pci_probe( goto err_memreg; } - dev->virt_addr = ioremap_nocache(resource, len); + dev->virt_addr = ioremap(resource, len); if (!dev->virt_addr) { dev_dbg(&pdev->dev, "start address cannot be mapped\n"); retval = -EFAULT; diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c index c3721225b61e..4a46f661d0e4 100644 --- a/drivers/usb/gadget/udc/goku_udc.c +++ b/drivers/usb/gadget/udc/goku_udc.c @@ -1782,7 +1782,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) } dev->got_region = 1; - base = ioremap_nocache(resource, len); + base = ioremap(resource, len); if (base == NULL) { DBG(dev, "can't map memory\n"); retval = -EFAULT; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 247de0faaeb7..a8273b589456 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2323,7 +2323,7 @@ net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) goto err; } - mem_mapped_addr[i] = ioremap_nocache(resource, len); + mem_mapped_addr[i] = ioremap(resource, len); if (mem_mapped_addr[i] == NULL) { release_mem_region(resource, len); dev_dbg(dev->dev, "can't map memory\n"); @@ -2401,7 +2401,7 @@ net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) goto err; } - mem_mapped_addr[i] = ioremap_nocache(resource, len); + mem_mapped_addr[i] = ioremap(resource, len); if (mem_mapped_addr[i] == NULL) { release_mem_region(resource, len); dev_dbg(dev->dev, "can't map memory\n"); @@ -2625,7 +2625,7 @@ net2272_plat_probe(struct platform_device *pdev) ret = -EBUSY; goto err; } - dev->base_addr = ioremap_nocache(base, len); + dev->base_addr = ioremap(base, len); if (!dev->base_addr) { dev_dbg(dev->dev, "can't map memory\n"); ret = -EFAULT; diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 51efee21915f..1fd1b9186e46 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -3659,7 +3659,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) * 8051 code into the chip, e.g. to turn on PCI PM. */ - base = ioremap_nocache(resource, len); + base = ioremap(resource, len); if (base == NULL) { ep_dbg(dev, "can't map memory\n"); retval = -EFAULT; diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c index a2b610dbedfc..2d462fbbe0a6 100644 --- a/drivers/usb/host/ehci-pmcmsp.c +++ b/drivers/usb/host/ehci-pmcmsp.c @@ -107,7 +107,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev) if (!request_mem_region(res->start, res_len, "mab regs")) return -EBUSY; - dev->mab_regs = ioremap_nocache(res->start, res_len); + dev->mab_regs = ioremap(res->start, res_len); if (dev->mab_regs == NULL) { retval = -ENOMEM; goto err1; @@ -124,7 +124,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev) retval = -EBUSY; goto err2; } - dev->usbid_regs = ioremap_nocache(res->start, res_len); + dev->usbid_regs = ioremap(res->start, res_len); if (dev->usbid_regs == NULL) { retval = -ENOMEM; goto err3; @@ -178,7 +178,7 @@ int usb_hcd_msp_probe(const struct hc_driver *driver, retval = -EBUSY; goto err1; } - hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_debug("ioremap failed"); retval = -ENOMEM; diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index aa2f77f1506d..8a5c9b3ebe1e 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -27,6 +27,10 @@ /*-------------------------------------------------------------------------*/ +/* PID Codes that are used here, from EHCI specification, Table 3-16. */ +#define PID_CODE_IN 1 +#define PID_CODE_SETUP 2 + /* fill a qtd, returning how much of the buffer we were able to queue up */ static int @@ -190,7 +194,7 @@ static int qtd_copy_status ( int status = -EINPROGRESS; /* count IN/OUT bytes, not SETUP (even short packets) */ - if (likely (QTD_PID (token) != 2)) + if (likely(QTD_PID(token) != PID_CODE_SETUP)) urb->actual_length += length - QTD_LENGTH (token); /* don't modify error codes */ @@ -206,6 +210,13 @@ static int qtd_copy_status ( if (token & QTD_STS_BABBLE) { /* FIXME "must" disable babbling device's port too */ status = -EOVERFLOW; + /* + * When MMF is active and PID Code is IN, queue is halted. + * EHCI Specification, Table 4-13. + */ + } else if ((token & QTD_STS_MMF) && + (QTD_PID(token) == PID_CODE_IN)) { + status = -EPROTO; /* CERR nonzero + halt --> stall */ } else if (QTD_CERR(token)) { status = -EPIPE; diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index 38183ac438c6..1371b0c249ec 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -415,13 +415,17 @@ static int ohci_da8xx_probe(struct platform_device *pdev) } da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN); - if (IS_ERR(da8xx_ohci->oc_gpio)) + if (IS_ERR(da8xx_ohci->oc_gpio)) { + error = PTR_ERR(da8xx_ohci->oc_gpio); goto err; + } if (da8xx_ohci->oc_gpio) { oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio); - if (oc_irq < 0) + if (oc_irq < 0) { + error = oc_irq; goto err; + } error = devm_request_threaded_irq(dev, oc_irq, NULL, ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING | diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 6c7f0a876b96..beb2efa71341 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -1150,7 +1150,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) if (!mmio_resource_enabled(pdev, 0)) return; - base = ioremap_nocache(pci_resource_start(pdev, 0), len); + base = ioremap(pci_resource_start(pdev, 0), len); if (base == NULL) return; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index b7d23c438756..7a3a29e5e9d2 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -806,7 +806,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, static int xhci_handle_usb2_port_link_resume(struct xhci_port *port, u32 *status, u32 portsc, - unsigned long flags) + unsigned long *flags) { struct xhci_bus_state *bus_state; struct xhci_hcd *xhci; @@ -860,11 +860,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port, xhci_test_and_clear_bit(xhci, port, PORT_PLC); xhci_set_link_state(xhci, port, XDEV_U0); - spin_unlock_irqrestore(&xhci->lock, flags); + spin_unlock_irqrestore(&xhci->lock, *flags); time_left = wait_for_completion_timeout( &bus_state->rexit_done[wIndex], msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS)); - spin_lock_irqsave(&xhci->lock, flags); + spin_lock_irqsave(&xhci->lock, *flags); if (time_left) { slot_id = xhci_find_slot_id_by_port(hcd, xhci, @@ -920,11 +920,13 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status, { struct xhci_bus_state *bus_state; struct xhci_hcd *xhci; + struct usb_hcd *hcd; u32 link_state; u32 portnum; bus_state = &port->rhub->bus_state; xhci = hcd_to_xhci(port->rhub->hcd); + hcd = port->rhub->hcd; link_state = portsc & PORT_PLS_MASK; portnum = port->hcd_portnum; @@ -952,12 +954,20 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status, bus_state->suspended_ports &= ~(1 << portnum); } + /* remote wake resume signaling complete */ + if (bus_state->port_remote_wakeup & (1 << portnum) && + link_state != XDEV_RESUME && + link_state != XDEV_RECOVERY) { + bus_state->port_remote_wakeup &= ~(1 << portnum); + usb_hcd_end_port_resume(&hcd->self, portnum); + } + xhci_hub_report_usb3_link_state(xhci, status, portsc); xhci_del_comp_mod_timer(xhci, portsc, portnum); } static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, - u32 portsc, unsigned long flags) + u32 portsc, unsigned long *flags) { struct xhci_bus_state *bus_state; u32 link_state; @@ -1007,7 +1017,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, static u32 xhci_get_port_status(struct usb_hcd *hcd, struct xhci_bus_state *bus_state, u16 wIndex, u32 raw_port_status, - unsigned long flags) + unsigned long *flags) __releases(&xhci->lock) __acquires(&xhci->lock) { @@ -1130,7 +1140,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } trace_xhci_get_port_status(wIndex, temp); status = xhci_get_port_status(hcd, bus_state, wIndex, temp, - flags); + &flags); if (status == 0xffffffff) goto error; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index e16eda6e2b8b..3b1388fa2f36 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1909,13 +1909,17 @@ no_bw: xhci->usb3_rhub.num_ports = 0; xhci->num_active_eps = 0; kfree(xhci->usb2_rhub.ports); + kfree(xhci->usb2_rhub.psi); kfree(xhci->usb3_rhub.ports); + kfree(xhci->usb3_rhub.psi); kfree(xhci->hw_ports); kfree(xhci->rh_bw); kfree(xhci->ext_caps); xhci->usb2_rhub.ports = NULL; + xhci->usb2_rhub.psi = NULL; xhci->usb3_rhub.ports = NULL; + xhci->usb3_rhub.psi = NULL; xhci->hw_ports = NULL; xhci->rh_bw = NULL; xhci->ext_caps = NULL; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index a0025d23b257..4917c5b033fa 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -519,6 +519,18 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) retval = xhci_resume(xhci, hibernated); return retval; } + +static void xhci_pci_shutdown(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + xhci_shutdown(hcd); + + /* Yet another workaround for spurious wakeups at shutdown with HSW */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + pci_set_power_state(pdev, PCI_D3hot); +} #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ @@ -556,6 +568,7 @@ static int __init xhci_pci_init(void) #ifdef CONFIG_PM xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend; xhci_pci_hc_driver.pci_resume = xhci_pci_resume; + xhci_pci_hc_driver.shutdown = xhci_pci_shutdown; #endif return pci_register_driver(&xhci_pci_driver); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6475c3d3b43b..d23f7408c81f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1628,7 +1628,6 @@ static void handle_port_status(struct xhci_hcd *xhci, slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); if (slot_id && xhci->devs[slot_id]) xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; - bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); } if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { @@ -1648,6 +1647,7 @@ static void handle_port_status(struct xhci_hcd *xhci, */ bus_state->port_remote_wakeup |= 1 << hcd_portnum; xhci_test_and_clear_bit(xhci, port, PORT_PLC); + usb_hcd_start_port_resume(&hcd->self, hcd_portnum); xhci_set_link_state(xhci, port, XDEV_U0); /* Need to wait until the next link state change * indicates the device is actually in U0. @@ -1688,7 +1688,6 @@ static void handle_port_status(struct xhci_hcd *xhci, if (slot_id && xhci->devs[slot_id]) xhci_ring_device(xhci, slot_id); if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { - bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); xhci_test_and_clear_bit(xhci, port, PORT_PLC); usb_wakeup_notification(hcd->self.root_hub, hcd_portnum + 1); @@ -2382,7 +2381,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_SUCCESS: if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) break; - if (xhci->quirks & XHCI_TRUST_TX_LENGTH) + if (xhci->quirks & XHCI_TRUST_TX_LENGTH || + ep_ring->last_td_was_short) trb_comp_code = COMP_SHORT_PACKET; else xhci_warn_ratelimited(xhci, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6721d059f58a..dbac0fa9748d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -770,7 +770,7 @@ static void xhci_stop(struct usb_hcd *hcd) * * This will only ever be called with the main usb_hcd (the USB3 roothub). */ -static void xhci_shutdown(struct usb_hcd *hcd) +void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -789,11 +789,8 @@ static void xhci_shutdown(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_shutdown completed - status = %x", readl(&xhci->op_regs->status)); - - /* Yet another workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot); } +EXPORT_SYMBOL_GPL(xhci_shutdown); #ifdef CONFIG_PM static void xhci_save_registers(struct xhci_hcd *xhci) @@ -973,7 +970,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci) int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; - unsigned int delay = XHCI_MAX_HALT_USEC; + unsigned int delay = XHCI_MAX_HALT_USEC * 2; struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; u32 res; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index dc6f62a4b197..13d8838cd552 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2050,6 +2050,7 @@ int xhci_start(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); +void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c index 07cc82ff327c..ccd30f835888 100644 --- a/drivers/usb/isp1760/isp1760-if.c +++ b/drivers/usb/isp1760/isp1760-if.c @@ -50,7 +50,7 @@ static int isp1761_pci_init(struct pci_dev *dev) } /* map available memory */ - iobase = ioremap_nocache(mem_start, mem_length); + iobase = ioremap(mem_start, mem_length); if (!iobase) { printk(KERN_ERR "Error ioremap failed\n"); release_mem_region(mem_start, mem_length); @@ -101,7 +101,7 @@ static int isp1761_pci_init(struct pci_dev *dev) return -EBUSY; } - iobase = ioremap_nocache(mem_start, mem_length); + iobase = ioremap(mem_start, mem_length); if (!iobase) { printk(KERN_ERR "ioremap #1\n"); release_mem_region(mem_start, mem_length); diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 6f5edb9fc61e..d8d157c4c271 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -669,7 +669,7 @@ static int adu_probe(struct usb_interface *interface, init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); - res = usb_find_common_endpoints_reverse(&interface->altsetting[0], + res = usb_find_common_endpoints_reverse(interface->cur_altsetting, NULL, NULL, &dev->interrupt_in_endpoint, &dev->interrupt_out_endpoint); diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index 4afb5ddfd361..e9437a176518 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -322,7 +322,7 @@ static int idmouse_probe(struct usb_interface *interface, int result; /* check if we have gotten the data or the hid interface */ - iface_desc = &interface->altsetting[0]; + iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0x0A) return -ENODEV; diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index ac2b4fcc265f..f48a23adbc35 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1039,12 +1039,18 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); - mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); - kfree(rp->b_vec); - rp->b_vec = vec; - rp->b_size = size; - rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; - rp->cnt_lost = 0; + if (rp->mmap_active) { + mon_free_buff(vec, size/CHUNK_SIZE); + kfree(vec); + ret = -EBUSY; + } else { + mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); + kfree(rp->b_vec); + rp->b_vec = vec; + rp->b_size = size; + rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; + rp->cnt_lost = 0; + } spin_unlock_irqrestore(&rp->b_lock, flags); mutex_unlock(&rp->fetch_lock); } @@ -1216,13 +1222,21 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait) static void mon_bin_vma_open(struct vm_area_struct *vma) { struct mon_reader_bin *rp = vma->vm_private_data; + unsigned long flags; + + spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active++; + spin_unlock_irqrestore(&rp->b_lock, flags); } static void mon_bin_vma_close(struct vm_area_struct *vma) { + unsigned long flags; + struct mon_reader_bin *rp = vma->vm_private_data; + spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active--; + spin_unlock_irqrestore(&rp->b_lock, flags); } /* @@ -1234,16 +1248,12 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) unsigned long offset, chunk_idx; struct page *pageptr; - mutex_lock(&rp->fetch_lock); offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= rp->b_size) { - mutex_unlock(&rp->fetch_lock); + if (offset >= rp->b_size) return VM_FAULT_SIGBUS; - } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); - mutex_unlock(&rp->fetch_lock); vmf->page = pageptr; return 0; } diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c index 5261f8dfedec..e3b8c84ccdb8 100644 --- a/drivers/usb/musb/jz4740.c +++ b/drivers/usb/musb/jz4740.c @@ -75,14 +75,17 @@ static struct musb_hdrc_platform_data jz4740_musb_platform_data = { static int jz4740_musb_init(struct musb *musb) { struct device *dev = musb->controller->parent; + int err; if (dev->of_node) musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0); else musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); if (IS_ERR(musb->xceiv)) { - dev_err(dev, "No transceiver configured\n"); - return PTR_ERR(musb->xceiv); + err = PTR_ERR(musb->xceiv); + if (err != -EPROBE_DEFER) + dev_err(dev, "No transceiver configured: %d", err); + return err; } /* Silicon does not implement ConfigData register. diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 15cca912c53e..5ebf30bd61bd 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1840,6 +1840,9 @@ ATTRIBUTE_GROUPS(musb); #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \ (2 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) +#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \ + (3 << MUSB_DEVCTL_VBUS_SHIFT) | \ + MUSB_DEVCTL_SESSION) #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \ MUSB_DEVCTL_SESSION) @@ -1862,6 +1865,11 @@ static void musb_pm_runtime_check_session(struct musb *musb) s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV | MUSB_DEVCTL_HR; switch (devctl & ~s) { + case MUSB_QUIRK_B_DISCONNECT_99: + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + break; case MUSB_QUIRK_B_INVALID_VBUS_91: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, @@ -2310,6 +2318,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_disable_interrupts(musb); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */ + musb_writeb(musb->mregs, MUSB_POWER, 0); + /* Init IRQ workqueue before request_irq */ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 5fc6825745f2..2d3751d885b4 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -425,7 +425,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb, controller->controller.channel_abort = dma_channel_abort; if (request_irq(irq, dma_controller_irq, 0, - dev_name(musb->controller), &controller->controller)) { + dev_name(musb->controller), controller)) { dev_err(dev, "request_irq %d failed!\n", irq); musb_dma_controller_destroy(&controller->controller); diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index 8273126ffdf4..63a00ff26655 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -169,8 +169,8 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get); void usb_role_switch_put(struct usb_role_switch *sw) { if (!IS_ERR_OR_NULL(sw)) { - put_device(&sw->dev); module_put(sw->dev.parent->driver->owner); + put_device(&sw->dev); } } EXPORT_SYMBOL_GPL(usb_role_switch_put); diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c index 409851306e99..80d6559bbcb2 100644 --- a/drivers/usb/roles/intel-xhci-usb-role-switch.c +++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c @@ -161,7 +161,7 @@ static int intel_xhci_usb_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; - data->base = devm_ioremap_nocache(dev, res->start, resource_size(res)); + data->base = devm_ioremap(dev, res->start, resource_size(res)); if (!data->base) return -ENOMEM; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index df582fe855f0..d3f420f3a083 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -642,9 +642,13 @@ static int ch341_tiocmget(struct tty_struct *tty) static int ch341_reset_resume(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; - struct ch341_private *priv = usb_get_serial_port_data(port); + struct ch341_private *priv; int ret; + priv = usb_get_serial_port_data(port); + if (!priv) + return 0; + /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 48a439298a68..5737add6a2a4 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -716,7 +716,7 @@ static void edge_interrupt_callback(struct urb *urb) if (txCredits) { port = edge_serial->serial->port[portNumber]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { spin_lock_irqsave(&edge_port->ep_lock, flags); edge_port->txCredits += txCredits; @@ -1725,7 +1725,8 @@ static void edge_break(struct tty_struct *tty, int break_state) static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength) { - struct device *dev = &edge_serial->serial->dev->dev; + struct usb_serial *serial = edge_serial->serial; + struct device *dev = &serial->dev->dev; struct usb_serial_port *port; struct edgeport_port *edge_port; __u16 lastBufferLength; @@ -1821,11 +1822,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, /* spit this data back into the tty driver if this port is open */ - if (rxLen) { - port = edge_serial->serial->port[ - edge_serial->rxPort]; + if (rxLen && edge_serial->rxPort < serial->num_ports) { + port = serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); - if (edge_port->open) { + if (edge_port && edge_port->open) { dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n", __func__, rxLen, edge_serial->rxPort); @@ -1833,8 +1833,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial, rxLen); edge_port->port->icount.rx += rxLen; } - buffer += rxLen; } + buffer += rxLen; break; case EXPECT_HDR3: /* Expect 3rd byte of status header */ @@ -1869,6 +1869,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 code = edge_serial->rxStatusCode; /* switch the port pointer to the one being currently talked about */ + if (edge_serial->rxPort >= edge_serial->serial->num_ports) + return; port = edge_serial->serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port == NULL) { @@ -2901,16 +2903,18 @@ static int edge_startup(struct usb_serial *serial) response = 0; if (edge_serial->is_epic) { + struct usb_host_interface *alt; + + alt = serial->interface->cur_altsetting; + /* EPIC thing, set up our interrupt polling now and our read * urb, so that the device knows it really is connected. */ interrupt_in_found = bulk_in_found = bulk_out_found = false; - for (i = 0; i < serial->interface->altsetting[0] - .desc.bNumEndpoints; ++i) { + for (i = 0; i < alt->desc.bNumEndpoints; ++i) { struct usb_endpoint_descriptor *endpoint; int buffer_size; - endpoint = &serial->interface->altsetting[0]. - endpoint[i].desc; + endpoint = &alt->endpoint[i].desc; buffer_size = usb_endpoint_maxp(endpoint); if (!interrupt_in_found && (usb_endpoint_is_int_in(endpoint))) { diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index e66a59ef43a1..aa3dbce22cfb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1058,6 +1058,8 @@ static void usa49_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); @@ -1459,6 +1461,8 @@ static void usa67_glocont_callback(struct urb *urb) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + continue; if (p_priv->resend_cont) { dev_dbg(&port->dev, "%s - sending setup\n", __func__); diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index cb7aac9cd9e7..ed2b4e6dca38 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -113,7 +113,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), requesttype, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, - 0, 0, buffer, 1, 0); + 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT); kfree(buffer); if (retval < 0) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e9491d400a24..084cc2fff3ae 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 +#define QUECTEL_PRODUCT_RM500Q 0x0800 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -567,6 +568,9 @@ static void option_instat_callback(struct urb *urb); /* Interface must have two endpoints */ #define NUMEP2 BIT(16) +/* Device needs ZLP */ +#define ZLP BIT(17) + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, @@ -1101,6 +1105,11 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), + .driver_info = ZLP }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), @@ -1172,6 +1181,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */ + .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), @@ -1196,6 +1207,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -2097,6 +2110,9 @@ static int option_attach(struct usb_serial *serial) if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) data->use_send_setup = 1; + if (device_flags & ZLP) + data->use_zlp = 1; + spin_lock_init(&data->susp_lock); usb_set_serial_data(serial, data); diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index a62981ca7a73..f93b81a297d6 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -841,7 +841,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) u8 newMSR = (u8) *ch; unsigned long flags; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowMSR = newMSR; @@ -869,7 +872,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch) unsigned long flags; u8 newLSR = (u8) *ch; + /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); + if (!port_priv) + return; if (newLSR & UART_LSR_BI) newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index edbbb13d6de6..bd23a7cb1be2 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -86,6 +86,8 @@ DEVICE(moto_modem, MOTO_IDS); #define MOTOROLA_TETRA_IDS() \ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ + { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \ + { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 8f066bb55d7d..dc7a65b9ec98 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1317,6 +1317,9 @@ static int usb_serial_register(struct usb_serial_driver *driver) return -EINVAL; } + /* Prevent individual ports from being unbound. */ + driver->driver.suppress_bind_attrs = true; + usb_serial_operations_init(driver); /* Add this device to our list of devices */ diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h index 1c120eaf4091..934e9361cf6b 100644 --- a/drivers/usb/serial/usb-wwan.h +++ b/drivers/usb/serial/usb-wwan.h @@ -38,6 +38,7 @@ struct usb_wwan_intf_private { spinlock_t susp_lock; unsigned int suspended:1; unsigned int use_send_setup:1; + unsigned int use_zlp:1; int in_flight; unsigned int open_ports; void *private; diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 7e855c87e4f7..13be21aad2f4 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -461,6 +461,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, void (*callback) (struct urb *)) { struct usb_serial *serial = port->serial; + struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ @@ -471,6 +472,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); + if (intfdata->use_zlp && dir == USB_DIR_OUT) + urb->transfer_flags |= URB_ZERO_PACKET; + return urb; } diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 66a4dcbbb1fc..f4c2359abb1b 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -135,7 +135,8 @@ static int slave_configure(struct scsi_device *sdev) * For such controllers we need to make sure the block layer sets * up bounce buffers in addressable memory. */ - if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus))) + if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) || + (bus_to_hcd(us->pusb_dev->bus)->localmem_pool != NULL)) blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH); /* diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 7ece6ca6e690..91d62276b56f 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1612,14 +1612,16 @@ struct typec_port *typec_register_port(struct device *parent, port->sw = typec_switch_get(&port->dev); if (IS_ERR(port->sw)) { + ret = PTR_ERR(port->sw); put_device(&port->dev); - return ERR_CAST(port->sw); + return ERR_PTR(ret); } port->mux = typec_mux_get(&port->dev, NULL); if (IS_ERR(port->mux)) { + ret = PTR_ERR(port->mux); put_device(&port->dev); - return ERR_CAST(port->mux); + return ERR_PTR(ret); } ret = device_add(&port->dev); diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index 72481bbb2af3..5b986d6c801d 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -32,6 +32,7 @@ endif # TYPEC_TCPCI config TYPEC_FUSB302 tristate "Fairchild FUSB302 Type-C chip driver" depends on I2C + depends on EXTCON || !EXTCON help The Fairchild FUSB302 Type-C chip driver that works with Type-C Port Controller Manager to provide USB PD and USB diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index c1f7073a56de..8b4ff9fff340 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -432,20 +432,30 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci) if (status & TCPC_ALERT_RX_STATUS) { struct pd_message msg; - unsigned int cnt; + unsigned int cnt, payload_cnt; u16 header; regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt); + /* + * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14 + * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is + * defined in table 4-36 as one greater than the number of + * bytes received. And that number includes the header. So: + */ + if (cnt > 3) + payload_cnt = cnt - (1 + sizeof(msg.header)); + else + payload_cnt = 0; tcpci_read16(tcpci, TCPC_RX_HDR, &header); msg.header = cpu_to_le16(header); - if (WARN_ON(cnt > sizeof(msg.payload))) - cnt = sizeof(msg.payload); + if (WARN_ON(payload_cnt > sizeof(msg.payload))) + payload_cnt = sizeof(msg.payload); - if (cnt > 0) + if (payload_cnt > 0) regmap_raw_read(tcpci->regmap, TCPC_RX_DATA, - &msg.payload, cnt); + &msg.payload, payload_cnt); /* Read complete, clear RX status alert bit */ tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 8569bbd3762f..831c9470bdc1 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -94,15 +94,15 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); #define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16) #define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17) #define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18) -#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19) -#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20) -#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21) -#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22) -#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23) -#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24) -#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25) -#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26) -#define UCSI_ENABLE_NTFY_ERROR BIT(27) +#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(21) +#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(22) +#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(23) +#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(24) +#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(25) +#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(27) +#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(28) +#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(30) +#define UCSI_ENABLE_NTFY_ERROR BIT(31) #define UCSI_ENABLE_NTFY_ALL 0xdbe70000 /* SET_UOR command bits */ diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 3f1786170098..9fc4f338e870 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -127,7 +127,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev) return -ENODEV; } - /* This will make sure we can use ioremap_nocache() */ + /* This will make sure we can use ioremap() */ status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1); if (ACPI_FAILURE(status)) return -ENOMEM; diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 6532d68e8808..e4b96674c405 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -727,6 +727,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) copy -= recv; ret += recv; + + if (!copy) + break; } if (ret != size) diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 33f8972ba842..00fc98741c5d 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -77,16 +77,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0); /* recv transfer buffer */ - if (usbip_recv_xbuff(ud, urb) < 0) - return; + if (usbip_recv_xbuff(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* recv iso_packet_descriptor */ - if (usbip_recv_iso(ud, urb) < 0) - return; + if (usbip_recv_iso(ud, urb) < 0) { + urb->status = -EPROTO; + goto error; + } /* restore the padding in iso packets */ usbip_pad_iso(ud, urb); +error: if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 0120d8324a40..a87992892a9f 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -230,7 +230,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, switch ((u32)pos) { case 0xa0000 ... 0xbffff: count = min(count, (size_t)(0xc0000 - pos)); - iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1); + iomem = ioremap(0xa0000, 0xbffff - 0xa0000 + 1); off = pos - 0xa0000; rsrc = VGA_RSRC_LEGACY_MEM; is_ioport = false; diff --git a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c index 2d2babe21b2f..40d4fb9276ba 100644 --- a/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c +++ b/drivers/vfio/platform/reset/vfio_platform_amdxgbe.c @@ -54,13 +54,13 @@ static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev) if (!xgmac_regs->ioaddr) { xgmac_regs->ioaddr = - ioremap_nocache(xgmac_regs->addr, xgmac_regs->size); + ioremap(xgmac_regs->addr, xgmac_regs->size); if (!xgmac_regs->ioaddr) return -ENOMEM; } if (!xpcs_regs->ioaddr) { xpcs_regs->ioaddr = - ioremap_nocache(xpcs_regs->addr, xpcs_regs->size); + ioremap(xpcs_regs->addr, xpcs_regs->size); if (!xpcs_regs->ioaddr) return -ENOMEM; } diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c index 16165a62b86d..96064ef8f629 100644 --- a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c +++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c @@ -82,7 +82,7 @@ static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev) /* Map FlexRM ring registers if not mapped */ if (!reg->ioaddr) { - reg->ioaddr = ioremap_nocache(reg->addr, reg->size); + reg->ioaddr = ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; } diff --git a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c index f67bab547501..09a9453b75c5 100644 --- a/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c +++ b/drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c @@ -52,7 +52,7 @@ static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev) if (!reg->ioaddr) { reg->ioaddr = - ioremap_nocache(reg->addr, reg->size); + ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; } diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index e8f2bdbe0542..c0771a9567fb 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -409,7 +409,7 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg, if (!reg->ioaddr) { reg->ioaddr = - ioremap_nocache(reg->addr, reg->size); + ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; @@ -486,7 +486,7 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg, if (!reg->ioaddr) { reg->ioaddr = - ioremap_nocache(reg->addr, reg->size); + ioremap(reg->addr, reg->size); if (!reg->ioaddr) return -ENOMEM; diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c index 9f3be0258623..27ba2ed4138a 100644 --- a/drivers/video/fbdev/carminefb.c +++ b/drivers/video/fbdev/carminefb.c @@ -633,7 +633,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent) ret = -EBUSY; goto err_free_hw; } - hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start, + hw->v_regs = ioremap(carminefb_fix.mmio_start, carminefb_fix.mmio_len); if (!hw->v_regs) { printk(KERN_ERR "carminefb: Can't remap %s register.\n", @@ -664,7 +664,7 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent) goto err_unmap_vregs; } - hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start, + hw->screen_mem = ioremap(carminefb_fix.smem_start, carminefb_fix.smem_len); if (!hw->screen_mem) { printk(KERN_ERR "carmine: Can't ioremap smem area.\n"); diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c index d18f7b31932c..aa7583d963ac 100644 --- a/drivers/video/fbdev/i810/i810_main.c +++ b/drivers/video/fbdev/i810/i810_main.c @@ -1883,7 +1883,7 @@ static int i810_allocate_pci_resource(struct i810fb_par *par, } par->res_flags |= MMIO_REQ; - par->mmio_start_virtual = ioremap_nocache(par->mmio_start_phys, + par->mmio_start_virtual = ioremap(par->mmio_start_phys, MMIO_SIZE); if (!par->mmio_start_virtual) { printk("i810fb_init: cannot remap mmio region\n"); diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index a76c61512c60..a09fc2eaa40d 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -654,7 +654,7 @@ static int intelfb_pci_register(struct pci_dev *pdev, } dinfo->mmio_base = - (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys, + (u8 __iomem *)ioremap(dinfo->mmio_base_phys, INTEL_REG_SIZE); if (!dinfo->mmio_base) { ERR_MSG("Cannot remap MMIO region.\n"); diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index a7bd9f25911b..a8660926924b 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -683,7 +683,7 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) kyro_fix.mmio_len = pci_resource_len(pdev, 1); currentpar->regbase = deviceInfo.pSTGReg = - ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len); + ioremap(kyro_fix.mmio_start, kyro_fix.mmio_len); if (!currentpar->regbase) goto out_free_fb; diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 1a555f70923a..36cc718b96ae 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1710,7 +1710,7 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b) memsize = mem; err = -ENOMEM; - minfo->mmio.vbase.vaddr = ioremap_nocache(ctrlptr_phys, 16384); + minfo->mmio.vbase.vaddr = ioremap(ctrlptr_phys, 16384); if (!minfo->mmio.vbase.vaddr) { printk(KERN_ERR "matroxfb: cannot ioremap(%lX, 16384), matroxfb disabled\n", ctrlptr_phys); goto failVideoMR; diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c index 50935252b50b..3de4b3ed990a 100644 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ b/drivers/video/fbdev/mbx/mbxfb.c @@ -938,7 +938,7 @@ static int mbxfb_probe(struct platform_device *dev) } mfbi->reg_phys_addr = mfbi->reg_res->start; - mfbi->reg_virt_addr = devm_ioremap_nocache(&dev->dev, + mfbi->reg_virt_addr = devm_ioremap(&dev->dev, mfbi->reg_phys_addr, res_size(mfbi->reg_req)); if (!mfbi->reg_virt_addr) { @@ -948,7 +948,7 @@ static int mbxfb_probe(struct platform_device *dev) } virt_base_2700 = mfbi->reg_virt_addr; - mfbi->fb_virt_addr = devm_ioremap_nocache(&dev->dev, mfbi->fb_phys_addr, + mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr, res_size(mfbi->fb_req)); if (!mfbi->fb_virt_addr) { dev_err(&dev->dev, "failed to ioremap frame buffer\n"); diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index 17174cd7a5bb..974e4c28b08b 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -485,7 +485,7 @@ static int mmphw_probe(struct platform_device *pdev) goto failed; } - ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, + ctrl->reg_base = devm_ioremap(ctrl->dev, res->start, resource_size(res)); if (ctrl->reg_base == NULL) { dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res); diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index 1dcf02e12af4..7cc1216b1389 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -1563,7 +1563,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_exit_neither; } default_par->v_regs = - ioremap_nocache(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); + ioremap(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len); if (!default_par->v_regs) { printk(KERN_WARNING "pm2fb: Can't remap %s register area.\n", pm2fb_fix.id); diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c index 6130aa56a1e9..2fa46607e0fc 100644 --- a/drivers/video/fbdev/pm3fb.c +++ b/drivers/video/fbdev/pm3fb.c @@ -1236,7 +1236,7 @@ static unsigned long pm3fb_size_memory(struct pm3_par *par) return 0; } screen_mem = - ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len); + ioremap(pm3fb_fix.smem_start, pm3fb_fix.smem_len); if (!screen_mem) { printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n"); release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len); @@ -1347,7 +1347,7 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent) goto err_exit_neither; } par->v_regs = - ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len); + ioremap(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len); if (!par->v_regs) { printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n", pm3fb_fix.id); diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c index d1e78ce3a9c2..d5bf185fc376 100644 --- a/drivers/video/fbdev/pmag-aa-fb.c +++ b/drivers/video/fbdev/pmag-aa-fb.c @@ -188,7 +188,7 @@ static int pmagaafb_probe(struct device *dev) /* MMIO mapping setup. */ info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET; - par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; @@ -199,7 +199,7 @@ static int pmagaafb_probe(struct device *dev) /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET; - info->screen_base = ioremap_nocache(info->fix.smem_start, + info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c index 56b912bb28de..2ddcdf7919a2 100644 --- a/drivers/video/fbdev/pmag-ba-fb.c +++ b/drivers/video/fbdev/pmag-ba-fb.c @@ -180,7 +180,7 @@ static int pmagbafb_probe(struct device *dev) /* MMIO mapping setup. */ info->fix.mmio_start = start; - par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; @@ -190,7 +190,7 @@ static int pmagbafb_probe(struct device *dev) /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAG_BA_FBMEM; - info->screen_base = ioremap_nocache(info->fix.smem_start, + info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); diff --git a/drivers/video/fbdev/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c index 2822b2225924..90d2b04feb42 100644 --- a/drivers/video/fbdev/pmagb-b-fb.c +++ b/drivers/video/fbdev/pmagb-b-fb.c @@ -287,7 +287,7 @@ static int pmagbbfb_probe(struct device *dev) /* MMIO mapping setup. */ info->fix.mmio_start = start; - par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + par->mmio = ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; @@ -298,7 +298,7 @@ static int pmagbbfb_probe(struct device *dev) /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAGB_B_FBMEM; - par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); + par->smem = ioremap(info->fix.smem_start, info->fix.smem_len); if (!par->smem) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); err = -ENOMEM; diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index 0a3b2b7c7891..c680b3e651cb 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -770,7 +770,7 @@ static int __maybe_unused pvr2fb_common_init(void) struct pvr2fb_par *par = currentpar; unsigned long modememused, rev; - fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start, + fb_info->screen_base = ioremap(pvr2_fix.smem_start, pvr2_fix.smem_len); if (!fb_info->screen_base) { @@ -778,7 +778,7 @@ static int __maybe_unused pvr2fb_common_init(void) goto out_err; } - par->mmio_base = ioremap_nocache(pvr2_fix.mmio_start, + par->mmio_base = ioremap(pvr2_fix.mmio_start, pvr2_fix.mmio_len); if (!par->mmio_base) { printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n"); diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c index 1410f476e135..5615054a0cad 100644 --- a/drivers/video/fbdev/pxa168fb.c +++ b/drivers/video/fbdev/pxa168fb.c @@ -665,7 +665,7 @@ static int pxa168fb_probe(struct platform_device *pdev) /* * Map LCD controller registers. */ - fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start, + fbi->reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (fbi->reg_base == NULL) { ret = -ENOMEM; diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c index e04efb567b5c..8048499e398d 100644 --- a/drivers/video/fbdev/s1d13xxxfb.c +++ b/drivers/video/fbdev/s1d13xxxfb.c @@ -809,7 +809,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, info); default_par = info->par; - default_par->regs = ioremap_nocache(pdev->resource[1].start, + default_par->regs = ioremap(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1); if (!default_par->regs) { printk(KERN_ERR PFX "unable to map registers\n"); @@ -818,7 +818,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev) } info->pseudo_palette = default_par->pseudo_palette; - info->screen_base = ioremap_nocache(pdev->resource[0].start, + info->screen_base = ioremap(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1); if (!info->screen_base) { diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c index ab8fe838c776..f72b03594719 100644 --- a/drivers/video/fbdev/sh7760fb.c +++ b/drivers/video/fbdev/sh7760fb.c @@ -463,7 +463,7 @@ static int sh7760fb_probe(struct platform_device *pdev) goto out_fb; } - par->base = ioremap_nocache(res->start, resource_size(res)); + par->base = ioremap(res->start, resource_size(res)); if (!par->base) { dev_err(&pdev->dev, "cannot remap\n"); ret = -ENODEV; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index c249763dbf0b..54ee7e02a244 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -2588,7 +2588,7 @@ static int sh_mobile_lcdc_probe(struct platform_device *pdev) if (num_channels == 2) priv->forced_fourcc = pdata->ch[0].fourcc; - priv->base = ioremap_nocache(res->start, resource_size(res)); + priv->base = ioremap(res->start, resource_size(res)); if (!priv->base) { error = -ENOMEM; goto err1; diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c index 4e22ae383c87..1f171a527174 100644 --- a/drivers/video/fbdev/sstfb.c +++ b/drivers/video/fbdev/sstfb.c @@ -1363,14 +1363,14 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto fail_fb_mem; } - par->mmio_vbase = ioremap_nocache(fix->mmio_start, + par->mmio_vbase = ioremap(fix->mmio_start, fix->mmio_len); if (!par->mmio_vbase) { printk(KERN_ERR "sstfb: cannot remap register area %#lx\n", fix->mmio_start); goto fail_mmio_remap; } - info->screen_base = ioremap_nocache(fix->smem_start, 0x400000); + info->screen_base = ioremap(fix->smem_start, 0x400000); if (!info->screen_base) { printk(KERN_ERR "sstfb: cannot remap framebuffer %#lx\n", fix->smem_start); diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 9e88e3f594c2..46709443a82f 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1198,7 +1198,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) case S9000_ID_TOMCAT: /* Dual CRX, behaves else like a CRX */ /* FIXME: TomCat supports two heads: * fb.iobase = REGION_BASE(fb_info,3); - * fb.screen_base = ioremap_nocache(REGION_BASE(fb_info,2),xxx); + * fb.screen_base = ioremap(REGION_BASE(fb_info,2),xxx); * for now we only support the left one ! */ xres = fb->ngle_rom.x_size_visible; yres = fb->ngle_rom.y_size_visible; @@ -1291,7 +1291,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) strcpy(fix->id, "stifb"); info->fbops = &stifb_ops; - info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len); + info->screen_base = ioremap(REGION_BASE(fb,1), fix->smem_len); if (!info->screen_base) { printk(KERN_ERR "stifb: failed to map memory\n"); goto out_err0; diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c index fdbb1ea66e6c..0337d1a1a70b 100644 --- a/drivers/video/fbdev/tdfxfb.c +++ b/drivers/video/fbdev/tdfxfb.c @@ -1417,7 +1417,7 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) } default_par->regbase_virt = - ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); + ioremap(info->fix.mmio_start, info->fix.mmio_len); if (!default_par->regbase_virt) { printk(KERN_ERR "fb: Can't remap %s register area.\n", info->fix.id); diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c index 286b2371c7dd..1966f1d70899 100644 --- a/drivers/video/fbdev/tgafb.c +++ b/drivers/video/fbdev/tgafb.c @@ -1438,7 +1438,7 @@ static int tgafb_register(struct device *dev) } /* Map the framebuffer. */ - mem_base = ioremap_nocache(bar0_start, bar0_len); + mem_base = ioremap(bar0_start, bar0_len); if (!mem_base) { printk(KERN_ERR "tgafb: Cannot map MMIO\n"); goto err1; diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index da74bf6c5996..91b2f6ca2607 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c @@ -1556,7 +1556,7 @@ static int trident_pci_probe(struct pci_dev *dev, return -1; } - default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start, + default_par->io_virt = ioremap(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); if (!default_par->io_virt) { @@ -1579,7 +1579,7 @@ static int trident_pci_probe(struct pci_dev *dev, goto out_unmap1; } - info->screen_base = ioremap_nocache(tridentfb_fix.smem_start, + info->screen_base = ioremap(tridentfb_fix.smem_start, tridentfb_fix.smem_len); if (!info->screen_base) { diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c index e04fde9c1fcd..97a59b5a4570 100644 --- a/drivers/video/fbdev/valkyriefb.c +++ b/drivers/video/fbdev/valkyriefb.c @@ -356,7 +356,7 @@ int __init valkyriefb_init(void) p->total_vram = 0x100000; p->frame_buffer_phys = frame_buffer_phys; #ifdef CONFIG_MAC - p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram); + p->frame_buffer = ioremap(frame_buffer_phys, p->total_vram); #else p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram); #endif diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c index c1e3738e6789..79d42b23d850 100644 --- a/drivers/video/fbdev/vermilion/cr_pll.c +++ b/drivers/video/fbdev/vermilion/cr_pll.c @@ -159,7 +159,7 @@ static int __init cr_pll_init(void) pci_read_config_dword(mch_dev, CRVML_REG_MCHBAR, &mch_bar); mch_regs_base = - ioremap_nocache(mch_bar, CRVML_MCHMAP_SIZE); + ioremap(mch_bar, CRVML_MCHMAP_SIZE); if (!mch_regs_base) { printk(KERN_ERR "Carillo Ranch MCH device was not enabled.\n"); diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c index 498038a964ee..ff61605b8764 100644 --- a/drivers/video/fbdev/vermilion/vermilion.c +++ b/drivers/video/fbdev/vermilion/vermilion.c @@ -317,7 +317,7 @@ static int vmlfb_enable_mmio(struct vml_par *par) ": Could not claim display controller MMIO.\n"); return -EBUSY; } - par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size); + par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size); if (par->vdc_mem == NULL) { printk(KERN_ERR MODULE_NAME ": Could not map display controller MMIO.\n"); @@ -332,7 +332,7 @@ static int vmlfb_enable_mmio(struct vml_par *par) err = -EBUSY; goto out_err_1; } - par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size); + par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size); if (par->gpu_mem == NULL) { printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n"); err = -ENOMEM; diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c index ffa2ca2d3f5e..703ddee9a244 100644 --- a/drivers/video/fbdev/via/via-core.c +++ b/drivers/video/fbdev/via/via-core.c @@ -442,7 +442,7 @@ static int via_pci_setup_mmio(struct viafb_dev *vdev) */ vdev->engine_start = pci_resource_start(vdev->pdev, 1); vdev->engine_len = pci_resource_len(vdev->pdev, 1); - vdev->engine_mmio = ioremap_nocache(vdev->engine_start, + vdev->engine_mmio = ioremap(vdev->engine_start, vdev->engine_len); if (vdev->engine_mmio == NULL) dev_err(&vdev->pdev->dev, diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index 3be07807edcd..0796b1d90981 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -648,12 +648,12 @@ int w100fb_probe(struct platform_device *pdev) return -EINVAL; /* Remap the chip base address */ - remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN); + remapped_base = ioremap(mem->start+W100_CFG_BASE, W100_CFG_LEN); if (remapped_base == NULL) goto out; /* Map the register space */ - remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN); + remapped_regs = ioremap(mem->start+W100_REG_BASE, W100_REG_LEN); if (remapped_regs == NULL) goto out; @@ -672,7 +672,7 @@ int w100fb_probe(struct platform_device *pdev) printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE); /* Remap the framebuffer */ - remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE); + remapped_fbuf = ioremap(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE); if (remapped_fbuf == NULL) goto out; diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 2307b0329aec..d823d558c0c4 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -6,6 +6,7 @@ */ #include <linux/device.h> +#include <linux/io.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/sizes.h> diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 43c391626a00..50920b6fc319 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -7,6 +7,7 @@ */ #include <linux/errno.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index e05679c478e2..93f995f6cf36 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -32,10 +32,11 @@ #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ __GFP_NOMEMALLOC) /* The order of free page blocks to report to host */ -#define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1) +#define VIRTIO_BALLOON_HINT_BLOCK_ORDER (MAX_ORDER - 1) /* The size of a free page block in bytes */ -#define VIRTIO_BALLOON_FREE_PAGE_SIZE \ - (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT)) +#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \ + (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT)) +#define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER) #ifdef CONFIG_BALLOON_COMPACTION static struct vfsmount *balloon_mnt; @@ -380,7 +381,7 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, if (!page) break; free_pages((unsigned long)page_address(page), - VIRTIO_BALLOON_FREE_PAGE_ORDER); + VIRTIO_BALLOON_HINT_BLOCK_ORDER); } vb->num_free_page_blocks -= num_returned; spin_unlock_irq(&vb->free_page_list_lock); @@ -582,7 +583,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb) ; page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, - VIRTIO_BALLOON_FREE_PAGE_ORDER); + VIRTIO_BALLOON_HINT_BLOCK_ORDER); /* * When the allocation returns NULL, it indicates that we have got all * the possible free pages, so return -EINTR to stop. @@ -591,13 +592,13 @@ static int get_free_page_and_send(struct virtio_balloon *vb) return -EINTR; p = page_address(page); - sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE); + sg_init_one(&sg, p, VIRTIO_BALLOON_HINT_BLOCK_BYTES); /* There is always 1 entry reserved for the cmd id to use. */ if (vq->num_free > 1) { err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); if (unlikely(err)) { free_pages((unsigned long)p, - VIRTIO_BALLOON_FREE_PAGE_ORDER); + VIRTIO_BALLOON_HINT_BLOCK_ORDER); return err; } virtqueue_kick(vq); @@ -610,7 +611,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb) * The vq has no available entry to add this page block, so * just free it. */ - free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); + free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER); } return 0; @@ -721,6 +722,17 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, get_page(newpage); /* balloon reference */ + /* + * When we migrate a page to a different zone and adjusted the + * managed page count when inflating, we have to fixup the count of + * both involved zones. + */ + if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) && + page_zone(page) != page_zone(newpage)) { + adjust_managed_page_count(page, 1); + adjust_managed_page_count(newpage, -1); + } + /* balloon's page migration 1st step -- inflate "newpage" */ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_insert(vb_dev_info, newpage); @@ -765,11 +777,11 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb, unsigned long blocks_to_free, blocks_freed; pages_to_free = round_up(pages_to_free, - 1 << VIRTIO_BALLOON_FREE_PAGE_ORDER); - blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER; + VIRTIO_BALLOON_HINT_BLOCK_PAGES); + blocks_to_free = pages_to_free / VIRTIO_BALLOON_HINT_BLOCK_PAGES; blocks_freed = return_free_pages_to_mm(vb, blocks_to_free); - return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER; + return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES; } static unsigned long leak_balloon_pages(struct virtio_balloon *vb, @@ -826,7 +838,7 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, unsigned long count; count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; - count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER; + count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES; return count; } diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c index 1b6e42e5e8fd..51e056bae943 100644 --- a/drivers/vme/boards/vme_vmivme7805.c +++ b/drivers/vme/boards/vme_vmivme7805.c @@ -55,7 +55,7 @@ static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* Map registers in BAR 0 */ - vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16); + vmic_base = ioremap(pci_resource_start(pdev, 0), 16); if (!vmic_base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); retval = -EIO; diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index 1edb8a5de873..ea938dc29c5e 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -554,7 +554,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image, goto err_resource; } - image->kern_base = ioremap_nocache( + image->kern_base = ioremap( image->bus_resource.start, size); if (!image->kern_base) { dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n"); @@ -1638,7 +1638,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* map registers in BAR 0 */ - ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0), + ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0), 4096); if (!ca91cx42_device->base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 7e079d39bd76..50ae26977a02 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -770,7 +770,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image, goto err_resource; } - image->kern_base = ioremap_nocache( + image->kern_base = ioremap( image->bus_resource.start, size); if (!image->kern_base) { dev_err(tsi148_bridge->parent, "Failed to remap resource\n"); @@ -2317,7 +2317,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* map registers in BAR 0 */ - tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0), + tsi148_device->base = ioremap(pci_resource_start(pdev, 0), 4096); if (!tsi148_device->base) { dev_err(&pdev->dev, "Unable to remap CRG region\n"); diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c index 3110791a2f1c..ee716c715710 100644 --- a/drivers/w1/masters/matrox_w1.c +++ b/drivers/w1/masters/matrox_w1.c @@ -139,7 +139,7 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent dev->phys_addr = pci_resource_start(pdev, 1); - dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384); + dev->virt_addr = ioremap(dev->phys_addr, 16384); if (!dev->virt_addr) { dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n", __func__, dev->phys_addr, 16384); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1679e0dc869b..cec868f8db3f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -687,6 +687,7 @@ config MAX63XX_WATCHDOG config MAX77620_WATCHDOG tristate "Maxim Max77620 Watchdog Timer" depends on MFD_MAX77620 || COMPILE_TEST + select WATCHDOG_CORE help This is the driver for the Max77620 watchdog timer. Say 'Y' here to enable the watchdog timer support for @@ -1444,6 +1445,7 @@ config SMSC37B787_WDT config TQMX86_WDT tristate "TQ-Systems TQMX86 Watchdog Timer" depends on X86 + select WATCHDOG_CORE help This is the driver for the hardware watchdog timer in the TQMX86 IO controller found on some of their ComExpress Modules. diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c index 8a043b52aa2f..7cdb25363ea0 100644 --- a/drivers/watchdog/bcm63xx_wdt.c +++ b/drivers/watchdog/bcm63xx_wdt.c @@ -246,7 +246,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev) return -ENODEV; } - bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start, + bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!bcm63xx_wdt_device.regs) { dev_err(&pdev->dev, "failed to remap I/O resources\n"); diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c index 0a87c6f4bab2..11b9e7c6b7f5 100644 --- a/drivers/watchdog/imx7ulp_wdt.c +++ b/drivers/watchdog/imx7ulp_wdt.c @@ -112,7 +112,7 @@ static int imx7ulp_wdt_restart(struct watchdog_device *wdog, { struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); - imx7ulp_wdt_enable(wdt->base, true); + imx7ulp_wdt_enable(wdog, true); imx7ulp_wdt_set_timeout(&wdt->wdd, 1); /* wait for wdog to fire */ diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c index 6ad5bf3451ec..804e35940983 100644 --- a/drivers/watchdog/intel_scu_watchdog.c +++ b/drivers/watchdog/intel_scu_watchdog.c @@ -463,7 +463,7 @@ static int __init intel_scu_watchdog_init(void) return -ENODEV; } - tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr, + tmp_addr = ioremap(watchdog_device.timer_tbl_ptr->phys_addr, 20); if (tmp_addr == NULL) { diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 1cccf8eb1c5d..8e6dfe76f9c9 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -602,7 +602,7 @@ static int orion_wdt_probe(struct platform_device *pdev) set_bit(WDOG_HW_RUNNING, &dev->wdt.status); /* Request the IRQ only after the watchdog is disabled */ - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { /* * Not all supported platforms specify an interrupt for the @@ -617,7 +617,7 @@ static int orion_wdt_probe(struct platform_device *pdev) } /* Optional 2nd interrupt for pretimeout */ - irq = platform_get_irq(pdev, 1); + irq = platform_get_irq_optional(pdev, 1); if (irq > 0) { orion_wdt_info.options |= WDIOF_PRETIMEOUT; ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq, diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index 1dfede0abf18..aee3c2efd565 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c @@ -26,7 +26,7 @@ #include <linux/platform_device.h> /* For platform_driver framework */ #include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ -#include <linux/io.h> /* For devm_ioremap_nocache */ +#include <linux/io.h> /* For devm_ioremap */ #include <asm/mach-rc32434/integ.h> /* For the Watchdog registers */ @@ -267,7 +267,7 @@ static int rc32434_wdt_probe(struct platform_device *pdev) return -ENODEV; } - wdt_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); + wdt_reg = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!wdt_reg) { pr_err("failed to remap I/O resources\n"); return -ENXIO; diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c index 234876047431..6e524c8e26a8 100644 --- a/drivers/watchdog/rn5t618_wdt.c +++ b/drivers/watchdog/rn5t618_wdt.c @@ -188,6 +188,7 @@ static struct platform_driver rn5t618_wdt_driver = { module_platform_driver(rn5t618_wdt_driver); +MODULE_ALIAS("platform:rn5t618-wdt"); MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); MODULE_DESCRIPTION("RN5T618 watchdog driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index fdf533fe0bb2..56a4a4030ca9 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -420,7 +420,7 @@ static int wdt_find(int addr) cr_wdt_csr = NCT6102D_WDT_CSR; break; case NCT6116_ID: - ret = nct6102; + ret = nct6116; cr_wdt_timeout = NCT6102D_WDT_TIMEOUT; cr_wdt_control = NCT6102D_WDT_CONTROL; cr_wdt_csr = NCT6102D_WDT_CSR; diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 4f2e78a5e4db..0c142bcab79d 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -394,7 +394,8 @@ static struct notifier_block xen_memory_nb = { #else static enum bp_state reserve_additional_memory(void) { - balloon_stats.target_pages = balloon_stats.current_pages; + balloon_stats.target_pages = balloon_stats.current_pages + + balloon_stats.target_unpopulated; return BP_ECANCELED; } #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 49b381e104ef..7b36b51cdb9f 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -664,7 +664,6 @@ static int grow_gnttab_list(unsigned int more_frames) unsigned int nr_glist_frames, new_nr_glist_frames; unsigned int grefs_per_frame; - BUG_ON(gnttab_interface == NULL); grefs_per_frame = gnttab_interface->grefs_per_grant_frame; new_nr_grant_frames = nr_grant_frames + more_frames; @@ -1160,7 +1159,6 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); static unsigned int nr_status_frames(unsigned int nr_grant_frames) { - BUG_ON(gnttab_interface == NULL); return gnttab_frames(nr_grant_frames, SPP); } @@ -1388,7 +1386,6 @@ static int gnttab_expand(unsigned int req_entries) int rc; unsigned int cur, extra; - BUG_ON(gnttab_interface == NULL); cur = nr_grant_frames; extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) / gnttab_interface->grefs_per_grant_frame); @@ -1423,7 +1420,6 @@ int gnttab_init(void) /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ - BUG_ON(gnttab_interface == NULL); max_nr_glist_frames = (max_nr_grant_frames * gnttab_interface->grefs_per_grant_frame / RPP); diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 8b9919c26095..70650b248de5 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -8,7 +8,7 @@ #include <linux/sched.h> #include <xen/xen-ops.h> -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION /* * Some hypercalls issued by the toolstack can take many 10s of @@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void) __this_cpu_write(xen_in_preemptible_hcall, true); } } -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index d75a2385b37c..5f5b8a7d5b80 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -116,8 +116,6 @@ int xenbus_probe_devices(struct xen_bus_type *bus); void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); -void xenbus_dev_shutdown(struct device *_dev); - int xenbus_dev_suspend(struct device *dev); int xenbus_dev_resume(struct device *dev); int xenbus_dev_cancel(struct device *dev); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index c21be6e9d38a..378486b79f96 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -255,7 +255,6 @@ fail_put: module_put(drv->driver.owner); fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); - xenbus_switch_state(dev, XenbusStateClosed); return err; } EXPORT_SYMBOL_GPL(xenbus_dev_probe); @@ -276,34 +275,20 @@ int xenbus_dev_remove(struct device *_dev) free_otherend_details(dev); - xenbus_switch_state(dev, XenbusStateClosed); + /* + * If the toolstack has forced the device state to closing then set + * the state to closed now to allow it to be cleaned up. + * Similarly, if the driver does not support re-bind, set the + * closed. + */ + if (!drv->allow_rebind || + xenbus_read_driver_state(dev->nodename) == XenbusStateClosing) + xenbus_switch_state(dev, XenbusStateClosed); + return 0; } EXPORT_SYMBOL_GPL(xenbus_dev_remove); -void xenbus_dev_shutdown(struct device *_dev) -{ - struct xenbus_device *dev = to_xenbus_device(_dev); - unsigned long timeout = 5*HZ; - - DPRINTK("%s", dev->nodename); - - get_device(&dev->dev); - if (dev->state != XenbusStateConnected) { - pr_info("%s: %s: %s != Connected, skipping\n", - __func__, dev->nodename, xenbus_strstate(dev->state)); - goto out; - } - xenbus_switch_state(dev, XenbusStateClosing); - timeout = wait_for_completion_timeout(&dev->down, timeout); - if (!timeout) - pr_info("%s: %s timeout closing device\n", - __func__, dev->nodename); - out: - put_device(&dev->dev); -} -EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); - int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name) diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index b0bed4faf44c..14876faff3b0 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c @@ -198,7 +198,6 @@ static struct xen_bus_type xenbus_backend = { .uevent = xenbus_uevent_backend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, - .shutdown = xenbus_dev_shutdown, .dev_groups = xenbus_dev_groups, }, }; diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index a7d90a719cea..8a1650bbe18f 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -126,6 +126,28 @@ static int xenbus_frontend_dev_probe(struct device *dev) return xenbus_dev_probe(dev); } +static void xenbus_frontend_dev_shutdown(struct device *_dev) +{ + struct xenbus_device *dev = to_xenbus_device(_dev); + unsigned long timeout = 5*HZ; + + DPRINTK("%s", dev->nodename); + + get_device(&dev->dev); + if (dev->state != XenbusStateConnected) { + pr_info("%s: %s: %s != Connected, skipping\n", + __func__, dev->nodename, xenbus_strstate(dev->state)); + goto out; + } + xenbus_switch_state(dev, XenbusStateClosing); + timeout = wait_for_completion_timeout(&dev->down, timeout); + if (!timeout) + pr_info("%s: %s timeout closing device\n", + __func__, dev->nodename); + out: + put_device(&dev->dev); +} + static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, .resume = xenbus_frontend_dev_resume, @@ -146,7 +168,7 @@ static struct xen_bus_type xenbus_frontend = { .uevent = xenbus_uevent_frontend, .probe = xenbus_frontend_dev_probe, .remove = xenbus_dev_remove, - .shutdown = xenbus_dev_shutdown, + .shutdown = xenbus_frontend_dev_shutdown, .dev_groups = xenbus_dev_groups, .pm = &xenbus_pm_ops, |